1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ipsec.h" 36 #include "opt_kern_tls.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/arb.h> 41 #include <sys/callout.h> 42 #include <sys/eventhandler.h> 43 #ifdef TCP_HHOOK 44 #include <sys/hhook.h> 45 #endif 46 #include <sys/kernel.h> 47 #ifdef TCP_HHOOK 48 #include <sys/khelp.h> 49 #endif 50 #ifdef KERN_TLS 51 #include <sys/ktls.h> 52 #endif 53 #include <sys/qmath.h> 54 #include <sys/stats.h> 55 #include <sys/sysctl.h> 56 #include <sys/jail.h> 57 #include <sys/malloc.h> 58 #include <sys/refcount.h> 59 #include <sys/mbuf.h> 60 #include <sys/priv.h> 61 #include <sys/proc.h> 62 #include <sys/sdt.h> 63 #include <sys/socket.h> 64 #include <sys/socketvar.h> 65 #include <sys/protosw.h> 66 #include <sys/random.h> 67 68 #include <vm/uma.h> 69 70 #include <net/route.h> 71 #include <net/route/nhop.h> 72 #include <net/if.h> 73 #include <net/if_var.h> 74 #include <net/if_private.h> 75 #include <net/vnet.h> 76 77 #include <netinet/in.h> 78 #include <netinet/in_fib.h> 79 #include <netinet/in_kdtrace.h> 80 #include <netinet/in_pcb.h> 81 #include <netinet/in_systm.h> 82 #include <netinet/in_var.h> 83 #include <netinet/ip.h> 84 #include <netinet/ip_icmp.h> 85 #include <netinet/ip_var.h> 86 #ifdef INET6 87 #include <netinet/icmp6.h> 88 #include <netinet/ip6.h> 89 #include <netinet6/in6_fib.h> 90 #include <netinet6/in6_pcb.h> 91 #include <netinet6/ip6_var.h> 92 #include <netinet6/scope6_var.h> 93 #include <netinet6/nd6.h> 94 #endif 95 96 #include <netinet/tcp.h> 97 #ifdef INVARIANTS 98 #define TCPSTATES 99 #endif 100 #include <netinet/tcp_fsm.h> 101 #include <netinet/tcp_seq.h> 102 #include <netinet/tcp_timer.h> 103 #include <netinet/tcp_var.h> 104 #include <netinet/tcp_ecn.h> 105 #include <netinet/tcp_log_buf.h> 106 #include <netinet/tcp_syncache.h> 107 #include <netinet/tcp_hpts.h> 108 #include <netinet/tcp_lro.h> 109 #include <netinet/cc/cc.h> 110 #include <netinet/tcpip.h> 111 #include <netinet/tcp_fastopen.h> 112 #include <netinet/tcp_accounting.h> 113 #ifdef TCPPCAP 114 #include <netinet/tcp_pcap.h> 115 #endif 116 #ifdef TCP_OFFLOAD 117 #include <netinet/tcp_offload.h> 118 #endif 119 #include <netinet/udp.h> 120 #include <netinet/udp_var.h> 121 #ifdef INET6 122 #include <netinet6/tcp6_var.h> 123 #endif 124 125 #include <netipsec/ipsec_support.h> 126 127 #include <machine/in_cksum.h> 128 #include <crypto/siphash/siphash.h> 129 130 #include <security/mac/mac_framework.h> 131 132 #ifdef INET6 133 static ip6proto_ctlinput_t tcp6_ctlinput; 134 static udp_tun_icmp_t tcp6_ctlinput_viaudp; 135 #endif 136 137 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS; 138 #ifdef INET6 139 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS; 140 #endif 141 142 #ifdef TCP_SAD_DETECTION 143 /* Sack attack detection thresholds and such */ 144 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack_attack, 145 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 146 "Sack Attack detection thresholds"); 147 int32_t tcp_force_detection = 0; 148 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, force_detection, 149 CTLFLAG_RW, 150 &tcp_force_detection, 0, 151 "Do we force detection even if the INP has it off?"); 152 int32_t tcp_sad_limit = 10000; 153 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, limit, 154 CTLFLAG_RW, 155 &tcp_sad_limit, 10000, 156 "If SaD is enabled, what is the limit to sendmap entries (0 = unlimited)?"); 157 int32_t tcp_sack_to_ack_thresh = 700; /* 70 % */ 158 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sack_to_ack_thresh, 159 CTLFLAG_RW, 160 &tcp_sack_to_ack_thresh, 700, 161 "Percentage of sacks to acks we must see above (10.1 percent is 101)?"); 162 int32_t tcp_sack_to_move_thresh = 600; /* 60 % */ 163 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, move_thresh, 164 CTLFLAG_RW, 165 &tcp_sack_to_move_thresh, 600, 166 "Percentage of sack moves we must see above (10.1 percent is 101)"); 167 int32_t tcp_restoral_thresh = 450; /* 45 % (sack:2:ack -25%) (mv:ratio -15%) **/ 168 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, restore_thresh, 169 CTLFLAG_RW, 170 &tcp_restoral_thresh, 450, 171 "Percentage of sack to ack percentage we must see below to restore(10.1 percent is 101)"); 172 int32_t tcp_sad_decay_val = 800; 173 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, decay_per, 174 CTLFLAG_RW, 175 &tcp_sad_decay_val, 800, 176 "The decay percentage (10.1 percent equals 101 )"); 177 int32_t tcp_map_minimum = 500; 178 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, nummaps, 179 CTLFLAG_RW, 180 &tcp_map_minimum, 500, 181 "Number of Map enteries before we start detection"); 182 int32_t tcp_sad_pacing_interval = 2000; 183 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_pacing_int, 184 CTLFLAG_RW, 185 &tcp_sad_pacing_interval, 2000, 186 "What is the minimum pacing interval for a classified attacker?"); 187 188 int32_t tcp_sad_low_pps = 100; 189 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_low_pps, 190 CTLFLAG_RW, 191 &tcp_sad_low_pps, 100, 192 "What is the input pps that below which we do not decay?"); 193 #endif 194 uint32_t tcp_ack_war_time_window = 1000; 195 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_timewindow, 196 CTLFLAG_RW, 197 &tcp_ack_war_time_window, 1000, 198 "If the tcp_stack does ack-war prevention how many milliseconds are in its time window?"); 199 uint32_t tcp_ack_war_cnt = 5; 200 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_cnt, 201 CTLFLAG_RW, 202 &tcp_ack_war_cnt, 5, 203 "If the tcp_stack does ack-war prevention how many acks can be sent in its time window?"); 204 205 struct rwlock tcp_function_lock; 206 207 static int 208 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS) 209 { 210 int error, new; 211 212 new = V_tcp_mssdflt; 213 error = sysctl_handle_int(oidp, &new, 0, req); 214 if (error == 0 && req->newptr) { 215 if (new < TCP_MINMSS) 216 error = EINVAL; 217 else 218 V_tcp_mssdflt = new; 219 } 220 return (error); 221 } 222 223 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, 224 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 225 &VNET_NAME(tcp_mssdflt), 0, &sysctl_net_inet_tcp_mss_check, "I", 226 "Default TCP Maximum Segment Size"); 227 228 #ifdef INET6 229 static int 230 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS) 231 { 232 int error, new; 233 234 new = V_tcp_v6mssdflt; 235 error = sysctl_handle_int(oidp, &new, 0, req); 236 if (error == 0 && req->newptr) { 237 if (new < TCP_MINMSS) 238 error = EINVAL; 239 else 240 V_tcp_v6mssdflt = new; 241 } 242 return (error); 243 } 244 245 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 246 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 247 &VNET_NAME(tcp_v6mssdflt), 0, &sysctl_net_inet_tcp_mss_v6_check, "I", 248 "Default TCP Maximum Segment Size for IPv6"); 249 #endif /* INET6 */ 250 251 /* 252 * Minimum MSS we accept and use. This prevents DoS attacks where 253 * we are forced to a ridiculous low MSS like 20 and send hundreds 254 * of packets instead of one. The effect scales with the available 255 * bandwidth and quickly saturates the CPU and network interface 256 * with packet generation and sending. Set to zero to disable MINMSS 257 * checking. This setting prevents us from sending too small packets. 258 */ 259 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS; 260 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_VNET | CTLFLAG_RW, 261 &VNET_NAME(tcp_minmss), 0, 262 "Minimum TCP Maximum Segment Size"); 263 264 VNET_DEFINE(int, tcp_do_rfc1323) = 1; 265 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_VNET | CTLFLAG_RW, 266 &VNET_NAME(tcp_do_rfc1323), 0, 267 "Enable rfc1323 (high performance TCP) extensions"); 268 269 /* 270 * As of June 2021, several TCP stacks violate RFC 7323 from September 2014. 271 * Some stacks negotiate TS, but never send them after connection setup. Some 272 * stacks negotiate TS, but don't send them when sending keep-alive segments. 273 * These include modern widely deployed TCP stacks. 274 * Therefore tolerating violations for now... 275 */ 276 VNET_DEFINE(int, tcp_tolerate_missing_ts) = 1; 277 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tolerate_missing_ts, CTLFLAG_VNET | CTLFLAG_RW, 278 &VNET_NAME(tcp_tolerate_missing_ts), 0, 279 "Tolerate missing TCP timestamps"); 280 281 VNET_DEFINE(int, tcp_ts_offset_per_conn) = 1; 282 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ts_offset_per_conn, CTLFLAG_VNET | CTLFLAG_RW, 283 &VNET_NAME(tcp_ts_offset_per_conn), 0, 284 "Initialize TCP timestamps per connection instead of per host pair"); 285 286 /* How many connections are pacing */ 287 static volatile uint32_t number_of_tcp_connections_pacing = 0; 288 static uint32_t shadow_num_connections = 0; 289 static counter_u64_t tcp_pacing_failures; 290 291 static int tcp_pacing_limit = 10000; 292 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pacing_limit, CTLFLAG_RW, 293 &tcp_pacing_limit, 1000, 294 "If the TCP stack does pacing, is there a limit (-1 = no, 0 = no pacing N = number of connections)"); 295 296 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pacing_count, CTLFLAG_RD, 297 &shadow_num_connections, 0, "Number of TCP connections being paced"); 298 299 SYSCTL_COUNTER_U64(_net_inet_tcp, OID_AUTO, pacing_failures, CTLFLAG_RD, 300 &tcp_pacing_failures, "Number of times we failed to enable pacing to avoid exceeding the limit"); 301 302 static int tcp_log_debug = 0; 303 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW, 304 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments"); 305 306 /* 307 * Target size of TCP PCB hash tables. Must be a power of two. 308 * 309 * Note that this can be overridden by the kernel environment 310 * variable net.inet.tcp.tcbhashsize 311 */ 312 #ifndef TCBHASHSIZE 313 #define TCBHASHSIZE 0 314 #endif 315 static int tcp_tcbhashsize = TCBHASHSIZE; 316 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN, 317 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 318 319 static int do_tcpdrain = 1; 320 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 321 "Enable tcp_drain routine for extra help when low on mbufs"); 322 323 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_VNET | CTLFLAG_RD, 324 &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs"); 325 326 VNET_DEFINE_STATIC(int, icmp_may_rst) = 1; 327 #define V_icmp_may_rst VNET(icmp_may_rst) 328 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_VNET | CTLFLAG_RW, 329 &VNET_NAME(icmp_may_rst), 0, 330 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 331 332 VNET_DEFINE_STATIC(int, tcp_isn_reseed_interval) = 0; 333 #define V_tcp_isn_reseed_interval VNET(tcp_isn_reseed_interval) 334 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_VNET | CTLFLAG_RW, 335 &VNET_NAME(tcp_isn_reseed_interval), 0, 336 "Seconds between reseeding of ISN secret"); 337 338 static int tcp_soreceive_stream; 339 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN, 340 &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets"); 341 342 VNET_DEFINE(uma_zone_t, sack_hole_zone); 343 #define V_sack_hole_zone VNET(sack_hole_zone) 344 VNET_DEFINE(uint32_t, tcp_map_entries_limit) = 0; /* unlimited */ 345 static int 346 sysctl_net_inet_tcp_map_limit_check(SYSCTL_HANDLER_ARGS) 347 { 348 int error; 349 uint32_t new; 350 351 new = V_tcp_map_entries_limit; 352 error = sysctl_handle_int(oidp, &new, 0, req); 353 if (error == 0 && req->newptr) { 354 /* only allow "0" and value > minimum */ 355 if (new > 0 && new < TCP_MIN_MAP_ENTRIES_LIMIT) 356 error = EINVAL; 357 else 358 V_tcp_map_entries_limit = new; 359 } 360 return (error); 361 } 362 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, map_limit, 363 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 364 &VNET_NAME(tcp_map_entries_limit), 0, 365 &sysctl_net_inet_tcp_map_limit_check, "IU", 366 "Total sendmap entries limit"); 367 368 VNET_DEFINE(uint32_t, tcp_map_split_limit) = 0; /* unlimited */ 369 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, split_limit, CTLFLAG_VNET | CTLFLAG_RW, 370 &VNET_NAME(tcp_map_split_limit), 0, 371 "Total sendmap split entries limit"); 372 373 #ifdef TCP_HHOOK 374 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]); 375 #endif 376 377 #define TS_OFFSET_SECRET_LENGTH SIPHASH_KEY_LENGTH 378 VNET_DEFINE_STATIC(u_char, ts_offset_secret[TS_OFFSET_SECRET_LENGTH]); 379 #define V_ts_offset_secret VNET(ts_offset_secret) 380 381 static int tcp_default_fb_init(struct tcpcb *tp, void **ptr); 382 static void tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged); 383 static int tcp_default_handoff_ok(struct tcpcb *tp); 384 static struct inpcb *tcp_notify(struct inpcb *, int); 385 static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int); 386 static struct inpcb *tcp_mtudisc(struct inpcb *, int); 387 static struct inpcb *tcp_drop_syn_sent(struct inpcb *, int); 388 static char * tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, 389 const void *ip4hdr, const void *ip6hdr); 390 static void tcp_default_switch_failed(struct tcpcb *tp); 391 static ipproto_ctlinput_t tcp_ctlinput; 392 static udp_tun_icmp_t tcp_ctlinput_viaudp; 393 394 static struct tcp_function_block tcp_def_funcblk = { 395 .tfb_tcp_block_name = "freebsd", 396 .tfb_tcp_output = tcp_default_output, 397 .tfb_tcp_do_segment = tcp_do_segment, 398 .tfb_tcp_ctloutput = tcp_default_ctloutput, 399 .tfb_tcp_handoff_ok = tcp_default_handoff_ok, 400 .tfb_tcp_fb_init = tcp_default_fb_init, 401 .tfb_tcp_fb_fini = tcp_default_fb_fini, 402 .tfb_switch_failed = tcp_default_switch_failed, 403 }; 404 405 static int tcp_fb_cnt = 0; 406 struct tcp_funchead t_functions; 407 VNET_DEFINE_STATIC(struct tcp_function_block *, tcp_func_set_ptr) = &tcp_def_funcblk; 408 #define V_tcp_func_set_ptr VNET(tcp_func_set_ptr) 409 410 void 411 tcp_record_dsack(struct tcpcb *tp, tcp_seq start, tcp_seq end, int tlp) 412 { 413 TCPSTAT_INC(tcps_dsack_count); 414 tp->t_dsack_pack++; 415 if (tlp == 0) { 416 if (SEQ_GT(end, start)) { 417 tp->t_dsack_bytes += (end - start); 418 TCPSTAT_ADD(tcps_dsack_bytes, (end - start)); 419 } else { 420 tp->t_dsack_tlp_bytes += (start - end); 421 TCPSTAT_ADD(tcps_dsack_bytes, (start - end)); 422 } 423 } else { 424 if (SEQ_GT(end, start)) { 425 tp->t_dsack_bytes += (end - start); 426 TCPSTAT_ADD(tcps_dsack_tlp_bytes, (end - start)); 427 } else { 428 tp->t_dsack_tlp_bytes += (start - end); 429 TCPSTAT_ADD(tcps_dsack_tlp_bytes, (start - end)); 430 } 431 } 432 } 433 434 static struct tcp_function_block * 435 find_tcp_functions_locked(struct tcp_function_set *fs) 436 { 437 struct tcp_function *f; 438 struct tcp_function_block *blk=NULL; 439 440 TAILQ_FOREACH(f, &t_functions, tf_next) { 441 if (strcmp(f->tf_name, fs->function_set_name) == 0) { 442 blk = f->tf_fb; 443 break; 444 } 445 } 446 return(blk); 447 } 448 449 static struct tcp_function_block * 450 find_tcp_fb_locked(struct tcp_function_block *blk, struct tcp_function **s) 451 { 452 struct tcp_function_block *rblk=NULL; 453 struct tcp_function *f; 454 455 TAILQ_FOREACH(f, &t_functions, tf_next) { 456 if (f->tf_fb == blk) { 457 rblk = blk; 458 if (s) { 459 *s = f; 460 } 461 break; 462 } 463 } 464 return (rblk); 465 } 466 467 struct tcp_function_block * 468 find_and_ref_tcp_functions(struct tcp_function_set *fs) 469 { 470 struct tcp_function_block *blk; 471 472 rw_rlock(&tcp_function_lock); 473 blk = find_tcp_functions_locked(fs); 474 if (blk) 475 refcount_acquire(&blk->tfb_refcnt); 476 rw_runlock(&tcp_function_lock); 477 return(blk); 478 } 479 480 struct tcp_function_block * 481 find_and_ref_tcp_fb(struct tcp_function_block *blk) 482 { 483 struct tcp_function_block *rblk; 484 485 rw_rlock(&tcp_function_lock); 486 rblk = find_tcp_fb_locked(blk, NULL); 487 if (rblk) 488 refcount_acquire(&rblk->tfb_refcnt); 489 rw_runlock(&tcp_function_lock); 490 return(rblk); 491 } 492 493 /* Find a matching alias for the given tcp_function_block. */ 494 int 495 find_tcp_function_alias(struct tcp_function_block *blk, 496 struct tcp_function_set *fs) 497 { 498 struct tcp_function *f; 499 int found; 500 501 found = 0; 502 rw_rlock(&tcp_function_lock); 503 TAILQ_FOREACH(f, &t_functions, tf_next) { 504 if ((f->tf_fb == blk) && 505 (strncmp(f->tf_name, blk->tfb_tcp_block_name, 506 TCP_FUNCTION_NAME_LEN_MAX) != 0)) { 507 /* Matching function block with different name. */ 508 strncpy(fs->function_set_name, f->tf_name, 509 TCP_FUNCTION_NAME_LEN_MAX); 510 found = 1; 511 break; 512 } 513 } 514 /* Null terminate the string appropriately. */ 515 if (found) { 516 fs->function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0'; 517 } else { 518 fs->function_set_name[0] = '\0'; 519 } 520 rw_runlock(&tcp_function_lock); 521 return (found); 522 } 523 524 static struct tcp_function_block * 525 find_and_ref_tcp_default_fb(void) 526 { 527 struct tcp_function_block *rblk; 528 529 rw_rlock(&tcp_function_lock); 530 rblk = V_tcp_func_set_ptr; 531 refcount_acquire(&rblk->tfb_refcnt); 532 rw_runlock(&tcp_function_lock); 533 return (rblk); 534 } 535 536 void 537 tcp_switch_back_to_default(struct tcpcb *tp) 538 { 539 struct tcp_function_block *tfb; 540 void *ptr = NULL; 541 542 KASSERT(tp->t_fb != &tcp_def_funcblk, 543 ("%s: called by the built-in default stack", __func__)); 544 545 /* 546 * Now, we'll find a new function block to use. 547 * Start by trying the current user-selected 548 * default, unless this stack is the user-selected 549 * default. 550 */ 551 tfb = find_and_ref_tcp_default_fb(); 552 if (tfb == tp->t_fb) { 553 refcount_release(&tfb->tfb_refcnt); 554 tfb = NULL; 555 } 556 /* Does the stack accept this connection? */ 557 if (tfb != NULL && tfb->tfb_tcp_handoff_ok != NULL && 558 (*tfb->tfb_tcp_handoff_ok)(tp)) { 559 refcount_release(&tfb->tfb_refcnt); 560 tfb = NULL; 561 } 562 /* Try to use that stack. */ 563 if (tfb != NULL) { 564 /* Initialize the new stack. If it succeeds, we are done. */ 565 if (tfb->tfb_tcp_fb_init == NULL || 566 (*tfb->tfb_tcp_fb_init)(tp, &ptr) == 0) { 567 /* Release the old stack */ 568 if (tp->t_fb->tfb_tcp_fb_fini != NULL) 569 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0); 570 refcount_release(&tp->t_fb->tfb_refcnt); 571 /* Now set in all the pointers */ 572 tp->t_fb = tfb; 573 tp->t_fb_ptr = ptr; 574 return; 575 } 576 /* 577 * Initialization failed. Release the reference count on 578 * the looked up default stack. 579 */ 580 refcount_release(&tfb->tfb_refcnt); 581 } 582 583 /* 584 * If that wasn't feasible, use the built-in default 585 * stack which is not allowed to reject anyone. 586 */ 587 tfb = find_and_ref_tcp_fb(&tcp_def_funcblk); 588 if (tfb == NULL) { 589 /* there always should be a default */ 590 panic("Can't refer to tcp_def_funcblk"); 591 } 592 if (tfb->tfb_tcp_handoff_ok != NULL) { 593 if ((*tfb->tfb_tcp_handoff_ok) (tp)) { 594 /* The default stack cannot say no */ 595 panic("Default stack rejects a new session?"); 596 } 597 } 598 if (tfb->tfb_tcp_fb_init != NULL && 599 (*tfb->tfb_tcp_fb_init)(tp, &ptr)) { 600 /* The default stack cannot fail */ 601 panic("Default stack initialization failed"); 602 } 603 /* Now release the old stack */ 604 if (tp->t_fb->tfb_tcp_fb_fini != NULL) 605 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0); 606 refcount_release(&tp->t_fb->tfb_refcnt); 607 /* And set in the pointers to the new */ 608 tp->t_fb = tfb; 609 tp->t_fb_ptr = ptr; 610 } 611 612 static bool 613 tcp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 614 const struct sockaddr *sa, void *ctx) 615 { 616 struct ip *iph; 617 #ifdef INET6 618 struct ip6_hdr *ip6; 619 #endif 620 struct udphdr *uh; 621 struct tcphdr *th; 622 int thlen; 623 uint16_t port; 624 625 TCPSTAT_INC(tcps_tunneled_pkts); 626 if ((m->m_flags & M_PKTHDR) == 0) { 627 /* Can't handle one that is not a pkt hdr */ 628 TCPSTAT_INC(tcps_tunneled_errs); 629 goto out; 630 } 631 thlen = sizeof(struct tcphdr); 632 if (m->m_len < off + sizeof(struct udphdr) + thlen && 633 (m = m_pullup(m, off + sizeof(struct udphdr) + thlen)) == NULL) { 634 TCPSTAT_INC(tcps_tunneled_errs); 635 goto out; 636 } 637 iph = mtod(m, struct ip *); 638 uh = (struct udphdr *)((caddr_t)iph + off); 639 th = (struct tcphdr *)(uh + 1); 640 thlen = th->th_off << 2; 641 if (m->m_len < off + sizeof(struct udphdr) + thlen) { 642 m = m_pullup(m, off + sizeof(struct udphdr) + thlen); 643 if (m == NULL) { 644 TCPSTAT_INC(tcps_tunneled_errs); 645 goto out; 646 } else { 647 iph = mtod(m, struct ip *); 648 uh = (struct udphdr *)((caddr_t)iph + off); 649 th = (struct tcphdr *)(uh + 1); 650 } 651 } 652 m->m_pkthdr.tcp_tun_port = port = uh->uh_sport; 653 bcopy(th, uh, m->m_len - off); 654 m->m_len -= sizeof(struct udphdr); 655 m->m_pkthdr.len -= sizeof(struct udphdr); 656 /* 657 * We use the same algorithm for 658 * both UDP and TCP for c-sum. So 659 * the code in tcp_input will skip 660 * the checksum. So we do nothing 661 * with the flag (m->m_pkthdr.csum_flags). 662 */ 663 switch (iph->ip_v) { 664 #ifdef INET 665 case IPVERSION: 666 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 667 tcp_input_with_port(&m, &off, IPPROTO_TCP, port); 668 break; 669 #endif 670 #ifdef INET6 671 case IPV6_VERSION >> 4: 672 ip6 = mtod(m, struct ip6_hdr *); 673 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 674 tcp6_input_with_port(&m, &off, IPPROTO_TCP, port); 675 break; 676 #endif 677 default: 678 goto out; 679 break; 680 } 681 return (true); 682 out: 683 m_freem(m); 684 685 return (true); 686 } 687 688 static int 689 sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS) 690 { 691 int error=ENOENT; 692 struct tcp_function_set fs; 693 struct tcp_function_block *blk; 694 695 memset(&fs, 0, sizeof(fs)); 696 rw_rlock(&tcp_function_lock); 697 blk = find_tcp_fb_locked(V_tcp_func_set_ptr, NULL); 698 if (blk) { 699 /* Found him */ 700 strcpy(fs.function_set_name, blk->tfb_tcp_block_name); 701 fs.pcbcnt = blk->tfb_refcnt; 702 } 703 rw_runlock(&tcp_function_lock); 704 error = sysctl_handle_string(oidp, fs.function_set_name, 705 sizeof(fs.function_set_name), req); 706 707 /* Check for error or no change */ 708 if (error != 0 || req->newptr == NULL) 709 return(error); 710 711 rw_wlock(&tcp_function_lock); 712 blk = find_tcp_functions_locked(&fs); 713 if ((blk == NULL) || 714 (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) { 715 error = ENOENT; 716 goto done; 717 } 718 V_tcp_func_set_ptr = blk; 719 done: 720 rw_wunlock(&tcp_function_lock); 721 return (error); 722 } 723 724 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_default, 725 CTLFLAG_VNET | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 726 NULL, 0, sysctl_net_inet_default_tcp_functions, "A", 727 "Set/get the default TCP functions"); 728 729 static int 730 sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS) 731 { 732 int error, cnt, linesz; 733 struct tcp_function *f; 734 char *buffer, *cp; 735 size_t bufsz, outsz; 736 bool alias; 737 738 cnt = 0; 739 rw_rlock(&tcp_function_lock); 740 TAILQ_FOREACH(f, &t_functions, tf_next) { 741 cnt++; 742 } 743 rw_runlock(&tcp_function_lock); 744 745 bufsz = (cnt+2) * ((TCP_FUNCTION_NAME_LEN_MAX * 2) + 13) + 1; 746 buffer = malloc(bufsz, M_TEMP, M_WAITOK); 747 748 error = 0; 749 cp = buffer; 750 751 linesz = snprintf(cp, bufsz, "\n%-32s%c %-32s %s\n", "Stack", 'D', 752 "Alias", "PCB count"); 753 cp += linesz; 754 bufsz -= linesz; 755 outsz = linesz; 756 757 rw_rlock(&tcp_function_lock); 758 TAILQ_FOREACH(f, &t_functions, tf_next) { 759 alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name); 760 linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n", 761 f->tf_fb->tfb_tcp_block_name, 762 (f->tf_fb == V_tcp_func_set_ptr) ? '*' : ' ', 763 alias ? f->tf_name : "-", 764 f->tf_fb->tfb_refcnt); 765 if (linesz >= bufsz) { 766 error = EOVERFLOW; 767 break; 768 } 769 cp += linesz; 770 bufsz -= linesz; 771 outsz += linesz; 772 } 773 rw_runlock(&tcp_function_lock); 774 if (error == 0) 775 error = sysctl_handle_string(oidp, buffer, outsz + 1, req); 776 free(buffer, M_TEMP); 777 return (error); 778 } 779 780 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_available, 781 CTLFLAG_VNET | CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 782 NULL, 0, sysctl_net_inet_list_available, "A", 783 "list available TCP Function sets"); 784 785 VNET_DEFINE(int, tcp_udp_tunneling_port) = TCP_TUNNELING_PORT_DEFAULT; 786 787 #ifdef INET 788 VNET_DEFINE(struct socket *, udp4_tun_socket) = NULL; 789 #define V_udp4_tun_socket VNET(udp4_tun_socket) 790 #endif 791 #ifdef INET6 792 VNET_DEFINE(struct socket *, udp6_tun_socket) = NULL; 793 #define V_udp6_tun_socket VNET(udp6_tun_socket) 794 #endif 795 796 static struct sx tcpoudp_lock; 797 798 static void 799 tcp_over_udp_stop(void) 800 { 801 802 sx_assert(&tcpoudp_lock, SA_XLOCKED); 803 804 #ifdef INET 805 if (V_udp4_tun_socket != NULL) { 806 soclose(V_udp4_tun_socket); 807 V_udp4_tun_socket = NULL; 808 } 809 #endif 810 #ifdef INET6 811 if (V_udp6_tun_socket != NULL) { 812 soclose(V_udp6_tun_socket); 813 V_udp6_tun_socket = NULL; 814 } 815 #endif 816 } 817 818 static int 819 tcp_over_udp_start(void) 820 { 821 uint16_t port; 822 int ret; 823 #ifdef INET 824 struct sockaddr_in sin; 825 #endif 826 #ifdef INET6 827 struct sockaddr_in6 sin6; 828 #endif 829 830 sx_assert(&tcpoudp_lock, SA_XLOCKED); 831 832 port = V_tcp_udp_tunneling_port; 833 if (ntohs(port) == 0) { 834 /* Must have a port set */ 835 return (EINVAL); 836 } 837 #ifdef INET 838 if (V_udp4_tun_socket != NULL) { 839 /* Already running -- must stop first */ 840 return (EALREADY); 841 } 842 #endif 843 #ifdef INET6 844 if (V_udp6_tun_socket != NULL) { 845 /* Already running -- must stop first */ 846 return (EALREADY); 847 } 848 #endif 849 #ifdef INET 850 if ((ret = socreate(PF_INET, &V_udp4_tun_socket, 851 SOCK_DGRAM, IPPROTO_UDP, 852 curthread->td_ucred, curthread))) { 853 tcp_over_udp_stop(); 854 return (ret); 855 } 856 /* Call the special UDP hook. */ 857 if ((ret = udp_set_kernel_tunneling(V_udp4_tun_socket, 858 tcp_recv_udp_tunneled_packet, 859 tcp_ctlinput_viaudp, 860 NULL))) { 861 tcp_over_udp_stop(); 862 return (ret); 863 } 864 /* Ok, we have a socket, bind it to the port. */ 865 memset(&sin, 0, sizeof(struct sockaddr_in)); 866 sin.sin_len = sizeof(struct sockaddr_in); 867 sin.sin_family = AF_INET; 868 sin.sin_port = htons(port); 869 if ((ret = sobind(V_udp4_tun_socket, 870 (struct sockaddr *)&sin, curthread))) { 871 tcp_over_udp_stop(); 872 return (ret); 873 } 874 #endif 875 #ifdef INET6 876 if ((ret = socreate(PF_INET6, &V_udp6_tun_socket, 877 SOCK_DGRAM, IPPROTO_UDP, 878 curthread->td_ucred, curthread))) { 879 tcp_over_udp_stop(); 880 return (ret); 881 } 882 /* Call the special UDP hook. */ 883 if ((ret = udp_set_kernel_tunneling(V_udp6_tun_socket, 884 tcp_recv_udp_tunneled_packet, 885 tcp6_ctlinput_viaudp, 886 NULL))) { 887 tcp_over_udp_stop(); 888 return (ret); 889 } 890 /* Ok, we have a socket, bind it to the port. */ 891 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 892 sin6.sin6_len = sizeof(struct sockaddr_in6); 893 sin6.sin6_family = AF_INET6; 894 sin6.sin6_port = htons(port); 895 if ((ret = sobind(V_udp6_tun_socket, 896 (struct sockaddr *)&sin6, curthread))) { 897 tcp_over_udp_stop(); 898 return (ret); 899 } 900 #endif 901 return (0); 902 } 903 904 static int 905 sysctl_net_inet_tcp_udp_tunneling_port_check(SYSCTL_HANDLER_ARGS) 906 { 907 int error; 908 uint32_t old, new; 909 910 old = V_tcp_udp_tunneling_port; 911 new = old; 912 error = sysctl_handle_int(oidp, &new, 0, req); 913 if ((error == 0) && 914 (req->newptr != NULL)) { 915 if ((new < TCP_TUNNELING_PORT_MIN) || 916 (new > TCP_TUNNELING_PORT_MAX)) { 917 error = EINVAL; 918 } else { 919 sx_xlock(&tcpoudp_lock); 920 V_tcp_udp_tunneling_port = new; 921 if (old != 0) { 922 tcp_over_udp_stop(); 923 } 924 if (new != 0) { 925 error = tcp_over_udp_start(); 926 if (error != 0) { 927 V_tcp_udp_tunneling_port = 0; 928 } 929 } 930 sx_xunlock(&tcpoudp_lock); 931 } 932 } 933 return (error); 934 } 935 936 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_port, 937 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 938 &VNET_NAME(tcp_udp_tunneling_port), 939 0, &sysctl_net_inet_tcp_udp_tunneling_port_check, "IU", 940 "Tunneling port for tcp over udp"); 941 942 VNET_DEFINE(int, tcp_udp_tunneling_overhead) = TCP_TUNNELING_OVERHEAD_DEFAULT; 943 944 static int 945 sysctl_net_inet_tcp_udp_tunneling_overhead_check(SYSCTL_HANDLER_ARGS) 946 { 947 int error, new; 948 949 new = V_tcp_udp_tunneling_overhead; 950 error = sysctl_handle_int(oidp, &new, 0, req); 951 if (error == 0 && req->newptr) { 952 if ((new < TCP_TUNNELING_OVERHEAD_MIN) || 953 (new > TCP_TUNNELING_OVERHEAD_MAX)) 954 error = EINVAL; 955 else 956 V_tcp_udp_tunneling_overhead = new; 957 } 958 return (error); 959 } 960 961 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_overhead, 962 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 963 &VNET_NAME(tcp_udp_tunneling_overhead), 964 0, &sysctl_net_inet_tcp_udp_tunneling_overhead_check, "IU", 965 "MSS reduction when using tcp over udp"); 966 967 /* 968 * Exports one (struct tcp_function_info) for each alias/name. 969 */ 970 static int 971 sysctl_net_inet_list_func_info(SYSCTL_HANDLER_ARGS) 972 { 973 int cnt, error; 974 struct tcp_function *f; 975 struct tcp_function_info tfi; 976 977 /* 978 * We don't allow writes. 979 */ 980 if (req->newptr != NULL) 981 return (EINVAL); 982 983 /* 984 * Wire the old buffer so we can directly copy the functions to 985 * user space without dropping the lock. 986 */ 987 if (req->oldptr != NULL) { 988 error = sysctl_wire_old_buffer(req, 0); 989 if (error) 990 return (error); 991 } 992 993 /* 994 * Walk the list and copy out matching entries. If INVARIANTS 995 * is compiled in, also walk the list to verify the length of 996 * the list matches what we have recorded. 997 */ 998 rw_rlock(&tcp_function_lock); 999 1000 cnt = 0; 1001 #ifndef INVARIANTS 1002 if (req->oldptr == NULL) { 1003 cnt = tcp_fb_cnt; 1004 goto skip_loop; 1005 } 1006 #endif 1007 TAILQ_FOREACH(f, &t_functions, tf_next) { 1008 #ifdef INVARIANTS 1009 cnt++; 1010 #endif 1011 if (req->oldptr != NULL) { 1012 bzero(&tfi, sizeof(tfi)); 1013 tfi.tfi_refcnt = f->tf_fb->tfb_refcnt; 1014 tfi.tfi_id = f->tf_fb->tfb_id; 1015 (void)strlcpy(tfi.tfi_alias, f->tf_name, 1016 sizeof(tfi.tfi_alias)); 1017 (void)strlcpy(tfi.tfi_name, 1018 f->tf_fb->tfb_tcp_block_name, sizeof(tfi.tfi_name)); 1019 error = SYSCTL_OUT(req, &tfi, sizeof(tfi)); 1020 /* 1021 * Don't stop on error, as that is the 1022 * mechanism we use to accumulate length 1023 * information if the buffer was too short. 1024 */ 1025 } 1026 } 1027 KASSERT(cnt == tcp_fb_cnt, 1028 ("%s: cnt (%d) != tcp_fb_cnt (%d)", __func__, cnt, tcp_fb_cnt)); 1029 #ifndef INVARIANTS 1030 skip_loop: 1031 #endif 1032 rw_runlock(&tcp_function_lock); 1033 if (req->oldptr == NULL) 1034 error = SYSCTL_OUT(req, NULL, 1035 (cnt + 1) * sizeof(struct tcp_function_info)); 1036 1037 return (error); 1038 } 1039 1040 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, function_info, 1041 CTLTYPE_OPAQUE | CTLFLAG_SKIP | CTLFLAG_RD | CTLFLAG_MPSAFE, 1042 NULL, 0, sysctl_net_inet_list_func_info, "S,tcp_function_info", 1043 "List TCP function block name-to-ID mappings"); 1044 1045 /* 1046 * tfb_tcp_handoff_ok() function for the default stack. 1047 * Note that we'll basically try to take all comers. 1048 */ 1049 static int 1050 tcp_default_handoff_ok(struct tcpcb *tp) 1051 { 1052 1053 return (0); 1054 } 1055 1056 /* 1057 * tfb_tcp_fb_init() function for the default stack. 1058 * 1059 * This handles making sure we have appropriate timers set if you are 1060 * transitioning a socket that has some amount of setup done. 1061 * 1062 * The init() fuction from the default can *never* return non-zero i.e. 1063 * it is required to always succeed since it is the stack of last resort! 1064 */ 1065 static int 1066 tcp_default_fb_init(struct tcpcb *tp, void **ptr) 1067 { 1068 struct socket *so = tptosocket(tp); 1069 int rexmt; 1070 1071 INP_WLOCK_ASSERT(tptoinpcb(tp)); 1072 /* We don't use the pointer */ 1073 *ptr = NULL; 1074 1075 KASSERT(tp->t_state >= 0 && tp->t_state < TCPS_TIME_WAIT, 1076 ("%s: connection %p in unexpected state %d", __func__, tp, 1077 tp->t_state)); 1078 1079 /* Make sure we get no interesting mbuf queuing behavior */ 1080 /* All mbuf queue/ack compress flags should be off */ 1081 tcp_lro_features_off(tp); 1082 1083 /* Cancel the GP measurement in progress */ 1084 tp->t_flags &= ~TF_GPUTINPROG; 1085 /* Validate the timers are not in usec, if they are convert */ 1086 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS); 1087 if ((tp->t_state == TCPS_SYN_SENT) || 1088 (tp->t_state == TCPS_SYN_RECEIVED)) 1089 rexmt = tcp_rexmit_initial * tcp_backoff[tp->t_rxtshift]; 1090 else 1091 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 1092 if (tp->t_rxtshift == 0) 1093 tp->t_rxtcur = rexmt; 1094 else 1095 TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX); 1096 1097 /* 1098 * Nothing to do for ESTABLISHED or LISTEN states. And, we don't 1099 * know what to do for unexpected states (which includes TIME_WAIT). 1100 */ 1101 if (tp->t_state <= TCPS_LISTEN || tp->t_state >= TCPS_TIME_WAIT) 1102 return (0); 1103 1104 /* 1105 * Make sure some kind of transmission timer is set if there is 1106 * outstanding data. 1107 */ 1108 if ((!TCPS_HAVEESTABLISHED(tp->t_state) || sbavail(&so->so_snd) || 1109 tp->snd_una != tp->snd_max) && !(tcp_timer_active(tp, TT_REXMT) || 1110 tcp_timer_active(tp, TT_PERSIST))) { 1111 /* 1112 * If the session has established and it looks like it should 1113 * be in the persist state, set the persist timer. Otherwise, 1114 * set the retransmit timer. 1115 */ 1116 if (TCPS_HAVEESTABLISHED(tp->t_state) && tp->snd_wnd == 0 && 1117 (int32_t)(tp->snd_nxt - tp->snd_una) < 1118 (int32_t)sbavail(&so->so_snd)) 1119 tcp_setpersist(tp); 1120 else 1121 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp)); 1122 } 1123 1124 /* All non-embryonic sessions get a keepalive timer. */ 1125 if (!tcp_timer_active(tp, TT_KEEP)) 1126 tcp_timer_activate(tp, TT_KEEP, 1127 TCPS_HAVEESTABLISHED(tp->t_state) ? TP_KEEPIDLE(tp) : 1128 TP_KEEPINIT(tp)); 1129 1130 /* 1131 * Make sure critical variables are initialized 1132 * if transitioning while in Recovery. 1133 */ 1134 if IN_FASTRECOVERY(tp->t_flags) { 1135 if (tp->sackhint.recover_fs == 0) 1136 tp->sackhint.recover_fs = max(1, 1137 tp->snd_nxt - tp->snd_una); 1138 } 1139 1140 return (0); 1141 } 1142 1143 /* 1144 * tfb_tcp_fb_fini() function for the default stack. 1145 * 1146 * This changes state as necessary (or prudent) to prepare for another stack 1147 * to assume responsibility for the connection. 1148 */ 1149 static void 1150 tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged) 1151 { 1152 1153 INP_WLOCK_ASSERT(tptoinpcb(tp)); 1154 1155 #ifdef TCP_BLACKBOX 1156 tcp_log_flowend(tp); 1157 #endif 1158 tp->t_acktime = 0; 1159 return; 1160 } 1161 1162 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers"); 1163 MALLOC_DEFINE(M_TCPFUNCTIONS, "tcpfunc", "TCP function set memory"); 1164 1165 static struct mtx isn_mtx; 1166 1167 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF) 1168 #define ISN_LOCK() mtx_lock(&isn_mtx) 1169 #define ISN_UNLOCK() mtx_unlock(&isn_mtx) 1170 1171 INPCBSTORAGE_DEFINE(tcpcbstor, tcpcb, "tcpinp", "tcp_inpcb", "tcp", "tcphash"); 1172 1173 /* 1174 * Take a value and get the next power of 2 that doesn't overflow. 1175 * Used to size the tcp_inpcb hash buckets. 1176 */ 1177 static int 1178 maketcp_hashsize(int size) 1179 { 1180 int hashsize; 1181 1182 /* 1183 * auto tune. 1184 * get the next power of 2 higher than maxsockets. 1185 */ 1186 hashsize = 1 << fls(size); 1187 /* catch overflow, and just go one power of 2 smaller */ 1188 if (hashsize < size) { 1189 hashsize = 1 << (fls(size) - 1); 1190 } 1191 return (hashsize); 1192 } 1193 1194 static volatile int next_tcp_stack_id = 1; 1195 1196 /* 1197 * Register a TCP function block with the name provided in the names 1198 * array. (Note that this function does NOT automatically register 1199 * blk->tfb_tcp_block_name as a stack name. Therefore, you should 1200 * explicitly include blk->tfb_tcp_block_name in the list of names if 1201 * you wish to register the stack with that name.) 1202 * 1203 * Either all name registrations will succeed or all will fail. If 1204 * a name registration fails, the function will update the num_names 1205 * argument to point to the array index of the name that encountered 1206 * the failure. 1207 * 1208 * Returns 0 on success, or an error code on failure. 1209 */ 1210 int 1211 register_tcp_functions_as_names(struct tcp_function_block *blk, int wait, 1212 const char *names[], int *num_names) 1213 { 1214 struct tcp_function *n; 1215 struct tcp_function_set fs; 1216 int error, i; 1217 1218 KASSERT(names != NULL && *num_names > 0, 1219 ("%s: Called with 0-length name list", __func__)); 1220 KASSERT(names != NULL, ("%s: Called with NULL name list", __func__)); 1221 KASSERT(rw_initialized(&tcp_function_lock), 1222 ("%s: called too early", __func__)); 1223 1224 if ((blk->tfb_tcp_output == NULL) || 1225 (blk->tfb_tcp_do_segment == NULL) || 1226 (blk->tfb_tcp_ctloutput == NULL) || 1227 (strlen(blk->tfb_tcp_block_name) == 0)) { 1228 /* 1229 * These functions are required and you 1230 * need a name. 1231 */ 1232 *num_names = 0; 1233 return (EINVAL); 1234 } 1235 1236 if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) { 1237 *num_names = 0; 1238 return (EINVAL); 1239 } 1240 1241 refcount_init(&blk->tfb_refcnt, 0); 1242 blk->tfb_id = atomic_fetchadd_int(&next_tcp_stack_id, 1); 1243 for (i = 0; i < *num_names; i++) { 1244 n = malloc(sizeof(struct tcp_function), M_TCPFUNCTIONS, wait); 1245 if (n == NULL) { 1246 error = ENOMEM; 1247 goto cleanup; 1248 } 1249 n->tf_fb = blk; 1250 1251 (void)strlcpy(fs.function_set_name, names[i], 1252 sizeof(fs.function_set_name)); 1253 rw_wlock(&tcp_function_lock); 1254 if (find_tcp_functions_locked(&fs) != NULL) { 1255 /* Duplicate name space not allowed */ 1256 rw_wunlock(&tcp_function_lock); 1257 free(n, M_TCPFUNCTIONS); 1258 error = EALREADY; 1259 goto cleanup; 1260 } 1261 (void)strlcpy(n->tf_name, names[i], sizeof(n->tf_name)); 1262 TAILQ_INSERT_TAIL(&t_functions, n, tf_next); 1263 tcp_fb_cnt++; 1264 rw_wunlock(&tcp_function_lock); 1265 } 1266 return(0); 1267 1268 cleanup: 1269 /* 1270 * Deregister the names we just added. Because registration failed 1271 * for names[i], we don't need to deregister that name. 1272 */ 1273 *num_names = i; 1274 rw_wlock(&tcp_function_lock); 1275 while (--i >= 0) { 1276 TAILQ_FOREACH(n, &t_functions, tf_next) { 1277 if (!strncmp(n->tf_name, names[i], 1278 TCP_FUNCTION_NAME_LEN_MAX)) { 1279 TAILQ_REMOVE(&t_functions, n, tf_next); 1280 tcp_fb_cnt--; 1281 n->tf_fb = NULL; 1282 free(n, M_TCPFUNCTIONS); 1283 break; 1284 } 1285 } 1286 } 1287 rw_wunlock(&tcp_function_lock); 1288 return (error); 1289 } 1290 1291 /* 1292 * Register a TCP function block using the name provided in the name 1293 * argument. 1294 * 1295 * Returns 0 on success, or an error code on failure. 1296 */ 1297 int 1298 register_tcp_functions_as_name(struct tcp_function_block *blk, const char *name, 1299 int wait) 1300 { 1301 const char *name_list[1]; 1302 int num_names, rv; 1303 1304 num_names = 1; 1305 if (name != NULL) 1306 name_list[0] = name; 1307 else 1308 name_list[0] = blk->tfb_tcp_block_name; 1309 rv = register_tcp_functions_as_names(blk, wait, name_list, &num_names); 1310 return (rv); 1311 } 1312 1313 /* 1314 * Register a TCP function block using the name defined in 1315 * blk->tfb_tcp_block_name. 1316 * 1317 * Returns 0 on success, or an error code on failure. 1318 */ 1319 int 1320 register_tcp_functions(struct tcp_function_block *blk, int wait) 1321 { 1322 1323 return (register_tcp_functions_as_name(blk, NULL, wait)); 1324 } 1325 1326 /* 1327 * Deregister all names associated with a function block. This 1328 * functionally removes the function block from use within the system. 1329 * 1330 * When called with a true quiesce argument, mark the function block 1331 * as being removed so no more stacks will use it and determine 1332 * whether the removal would succeed. 1333 * 1334 * When called with a false quiesce argument, actually attempt the 1335 * removal. 1336 * 1337 * When called with a force argument, attempt to switch all TCBs to 1338 * use the default stack instead of returning EBUSY. 1339 * 1340 * Returns 0 on success (or if the removal would succeed), or an error 1341 * code on failure. 1342 */ 1343 int 1344 deregister_tcp_functions(struct tcp_function_block *blk, bool quiesce, 1345 bool force) 1346 { 1347 struct tcp_function *f; 1348 VNET_ITERATOR_DECL(vnet_iter); 1349 1350 if (blk == &tcp_def_funcblk) { 1351 /* You can't un-register the default */ 1352 return (EPERM); 1353 } 1354 rw_wlock(&tcp_function_lock); 1355 VNET_LIST_RLOCK_NOSLEEP(); 1356 VNET_FOREACH(vnet_iter) { 1357 CURVNET_SET(vnet_iter); 1358 if (blk == V_tcp_func_set_ptr) { 1359 /* You can't free the current default in some vnet. */ 1360 CURVNET_RESTORE(); 1361 VNET_LIST_RUNLOCK_NOSLEEP(); 1362 rw_wunlock(&tcp_function_lock); 1363 return (EBUSY); 1364 } 1365 CURVNET_RESTORE(); 1366 } 1367 VNET_LIST_RUNLOCK_NOSLEEP(); 1368 /* Mark the block so no more stacks can use it. */ 1369 blk->tfb_flags |= TCP_FUNC_BEING_REMOVED; 1370 /* 1371 * If TCBs are still attached to the stack, attempt to switch them 1372 * to the default stack. 1373 */ 1374 if (force && blk->tfb_refcnt) { 1375 struct inpcb *inp; 1376 struct tcpcb *tp; 1377 VNET_ITERATOR_DECL(vnet_iter); 1378 1379 rw_wunlock(&tcp_function_lock); 1380 1381 VNET_LIST_RLOCK(); 1382 VNET_FOREACH(vnet_iter) { 1383 CURVNET_SET(vnet_iter); 1384 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo, 1385 INPLOOKUP_WLOCKPCB); 1386 1387 while ((inp = inp_next(&inpi)) != NULL) { 1388 tp = intotcpcb(inp); 1389 if (tp == NULL || tp->t_fb != blk) 1390 continue; 1391 tcp_switch_back_to_default(tp); 1392 } 1393 CURVNET_RESTORE(); 1394 } 1395 VNET_LIST_RUNLOCK(); 1396 1397 rw_wlock(&tcp_function_lock); 1398 } 1399 if (blk->tfb_refcnt) { 1400 /* TCBs still attached. */ 1401 rw_wunlock(&tcp_function_lock); 1402 return (EBUSY); 1403 } 1404 if (quiesce) { 1405 /* Skip removal. */ 1406 rw_wunlock(&tcp_function_lock); 1407 return (0); 1408 } 1409 /* Remove any function names that map to this function block. */ 1410 while (find_tcp_fb_locked(blk, &f) != NULL) { 1411 TAILQ_REMOVE(&t_functions, f, tf_next); 1412 tcp_fb_cnt--; 1413 f->tf_fb = NULL; 1414 free(f, M_TCPFUNCTIONS); 1415 } 1416 rw_wunlock(&tcp_function_lock); 1417 return (0); 1418 } 1419 1420 static void 1421 tcp_drain(void) 1422 { 1423 struct epoch_tracker et; 1424 VNET_ITERATOR_DECL(vnet_iter); 1425 1426 if (!do_tcpdrain) 1427 return; 1428 1429 NET_EPOCH_ENTER(et); 1430 VNET_LIST_RLOCK_NOSLEEP(); 1431 VNET_FOREACH(vnet_iter) { 1432 CURVNET_SET(vnet_iter); 1433 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo, 1434 INPLOOKUP_WLOCKPCB); 1435 struct inpcb *inpb; 1436 struct tcpcb *tcpb; 1437 1438 /* 1439 * Walk the tcpbs, if existing, and flush the reassembly queue, 1440 * if there is one... 1441 * XXX: The "Net/3" implementation doesn't imply that the TCP 1442 * reassembly queue should be flushed, but in a situation 1443 * where we're really low on mbufs, this is potentially 1444 * useful. 1445 */ 1446 while ((inpb = inp_next(&inpi)) != NULL) { 1447 if ((tcpb = intotcpcb(inpb)) != NULL) { 1448 tcp_reass_flush(tcpb); 1449 tcp_clean_sackreport(tcpb); 1450 #ifdef TCP_BLACKBOX 1451 tcp_log_drain(tcpb); 1452 #endif 1453 #ifdef TCPPCAP 1454 if (tcp_pcap_aggressive_free) { 1455 /* Free the TCP PCAP queues. */ 1456 tcp_pcap_drain(&(tcpb->t_inpkts)); 1457 tcp_pcap_drain(&(tcpb->t_outpkts)); 1458 } 1459 #endif 1460 } 1461 } 1462 CURVNET_RESTORE(); 1463 } 1464 VNET_LIST_RUNLOCK_NOSLEEP(); 1465 NET_EPOCH_EXIT(et); 1466 } 1467 1468 static void 1469 tcp_vnet_init(void *arg __unused) 1470 { 1471 1472 #ifdef TCP_HHOOK 1473 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, 1474 &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) 1475 printf("%s: WARNING: unable to register helper hook\n", __func__); 1476 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, 1477 &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) 1478 printf("%s: WARNING: unable to register helper hook\n", __func__); 1479 #endif 1480 #ifdef STATS 1481 if (tcp_stats_init()) 1482 printf("%s: WARNING: unable to initialise TCP stats\n", 1483 __func__); 1484 #endif 1485 in_pcbinfo_init(&V_tcbinfo, &tcpcbstor, tcp_tcbhashsize, 1486 tcp_tcbhashsize); 1487 1488 syncache_init(); 1489 tcp_hc_init(); 1490 1491 TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack); 1492 V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole), 1493 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1494 1495 tcp_fastopen_init(); 1496 1497 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK); 1498 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK); 1499 1500 V_tcp_msl = TCPTV_MSL; 1501 } 1502 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, 1503 tcp_vnet_init, NULL); 1504 1505 static void 1506 tcp_init(void *arg __unused) 1507 { 1508 int hashsize; 1509 1510 tcp_reass_global_init(); 1511 1512 /* XXX virtualize those below? */ 1513 tcp_delacktime = TCPTV_DELACK; 1514 tcp_keepinit = TCPTV_KEEP_INIT; 1515 tcp_keepidle = TCPTV_KEEP_IDLE; 1516 tcp_keepintvl = TCPTV_KEEPINTVL; 1517 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 1518 tcp_rexmit_initial = TCPTV_RTOBASE; 1519 if (tcp_rexmit_initial < 1) 1520 tcp_rexmit_initial = 1; 1521 tcp_rexmit_min = TCPTV_MIN; 1522 if (tcp_rexmit_min < 1) 1523 tcp_rexmit_min = 1; 1524 tcp_persmin = TCPTV_PERSMIN; 1525 tcp_persmax = TCPTV_PERSMAX; 1526 tcp_rexmit_slop = TCPTV_CPU_VAR; 1527 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT; 1528 1529 /* Setup the tcp function block list */ 1530 TAILQ_INIT(&t_functions); 1531 rw_init(&tcp_function_lock, "tcp_func_lock"); 1532 register_tcp_functions(&tcp_def_funcblk, M_WAITOK); 1533 sx_init(&tcpoudp_lock, "TCP over UDP configuration"); 1534 #ifdef TCP_BLACKBOX 1535 /* Initialize the TCP logging data. */ 1536 tcp_log_init(); 1537 #endif 1538 arc4rand(&V_ts_offset_secret, sizeof(V_ts_offset_secret), 0); 1539 1540 if (tcp_soreceive_stream) { 1541 #ifdef INET 1542 tcp_protosw.pr_soreceive = soreceive_stream; 1543 #endif 1544 #ifdef INET6 1545 tcp6_protosw.pr_soreceive = soreceive_stream; 1546 #endif /* INET6 */ 1547 } 1548 1549 #ifdef INET6 1550 max_protohdr_grow(sizeof(struct ip6_hdr) + sizeof(struct tcphdr)); 1551 #else /* INET6 */ 1552 max_protohdr_grow(sizeof(struct tcpiphdr)); 1553 #endif /* INET6 */ 1554 1555 ISN_LOCK_INIT(); 1556 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL, 1557 SHUTDOWN_PRI_DEFAULT); 1558 EVENTHANDLER_REGISTER(vm_lowmem, tcp_drain, NULL, LOWMEM_PRI_DEFAULT); 1559 EVENTHANDLER_REGISTER(mbuf_lowmem, tcp_drain, NULL, LOWMEM_PRI_DEFAULT); 1560 1561 tcp_inp_lro_direct_queue = counter_u64_alloc(M_WAITOK); 1562 tcp_inp_lro_wokeup_queue = counter_u64_alloc(M_WAITOK); 1563 tcp_inp_lro_compressed = counter_u64_alloc(M_WAITOK); 1564 tcp_inp_lro_locks_taken = counter_u64_alloc(M_WAITOK); 1565 tcp_extra_mbuf = counter_u64_alloc(M_WAITOK); 1566 tcp_would_have_but = counter_u64_alloc(M_WAITOK); 1567 tcp_comp_total = counter_u64_alloc(M_WAITOK); 1568 tcp_uncomp_total = counter_u64_alloc(M_WAITOK); 1569 tcp_bad_csums = counter_u64_alloc(M_WAITOK); 1570 tcp_pacing_failures = counter_u64_alloc(M_WAITOK); 1571 #ifdef TCPPCAP 1572 tcp_pcap_init(); 1573 #endif 1574 1575 hashsize = tcp_tcbhashsize; 1576 if (hashsize == 0) { 1577 /* 1578 * Auto tune the hash size based on maxsockets. 1579 * A perfect hash would have a 1:1 mapping 1580 * (hashsize = maxsockets) however it's been 1581 * suggested that O(2) average is better. 1582 */ 1583 hashsize = maketcp_hashsize(maxsockets / 4); 1584 /* 1585 * Our historical default is 512, 1586 * do not autotune lower than this. 1587 */ 1588 if (hashsize < 512) 1589 hashsize = 512; 1590 if (bootverbose) 1591 printf("%s: %s auto tuned to %d\n", __func__, 1592 "net.inet.tcp.tcbhashsize", hashsize); 1593 } 1594 /* 1595 * We require a hashsize to be a power of two. 1596 * Previously if it was not a power of two we would just reset it 1597 * back to 512, which could be a nasty surprise if you did not notice 1598 * the error message. 1599 * Instead what we do is clip it to the closest power of two lower 1600 * than the specified hash value. 1601 */ 1602 if (!powerof2(hashsize)) { 1603 int oldhashsize = hashsize; 1604 1605 hashsize = maketcp_hashsize(hashsize); 1606 /* prevent absurdly low value */ 1607 if (hashsize < 16) 1608 hashsize = 16; 1609 printf("%s: WARNING: TCB hash size not a power of 2, " 1610 "clipped from %d to %d.\n", __func__, oldhashsize, 1611 hashsize); 1612 } 1613 tcp_tcbhashsize = hashsize; 1614 1615 #ifdef INET 1616 IPPROTO_REGISTER(IPPROTO_TCP, tcp_input, tcp_ctlinput); 1617 #endif 1618 #ifdef INET6 1619 IP6PROTO_REGISTER(IPPROTO_TCP, tcp6_input, tcp6_ctlinput); 1620 #endif 1621 } 1622 SYSINIT(tcp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, tcp_init, NULL); 1623 1624 #ifdef VIMAGE 1625 static void 1626 tcp_destroy(void *unused __unused) 1627 { 1628 int n; 1629 #ifdef TCP_HHOOK 1630 int error; 1631 #endif 1632 1633 /* 1634 * All our processes are gone, all our sockets should be cleaned 1635 * up, which means, we should be past the tcp_discardcb() calls. 1636 * Sleep to let all tcpcb timers really disappear and cleanup. 1637 */ 1638 for (;;) { 1639 INP_INFO_WLOCK(&V_tcbinfo); 1640 n = V_tcbinfo.ipi_count; 1641 INP_INFO_WUNLOCK(&V_tcbinfo); 1642 if (n == 0) 1643 break; 1644 pause("tcpdes", hz / 10); 1645 } 1646 tcp_hc_destroy(); 1647 syncache_destroy(); 1648 in_pcbinfo_destroy(&V_tcbinfo); 1649 /* tcp_discardcb() clears the sack_holes up. */ 1650 uma_zdestroy(V_sack_hole_zone); 1651 1652 /* 1653 * Cannot free the zone until all tcpcbs are released as we attach 1654 * the allocations to them. 1655 */ 1656 tcp_fastopen_destroy(); 1657 1658 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES); 1659 VNET_PCPUSTAT_FREE(tcpstat); 1660 1661 #ifdef TCP_HHOOK 1662 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]); 1663 if (error != 0) { 1664 printf("%s: WARNING: unable to deregister helper hook " 1665 "type=%d, id=%d: error %d returned\n", __func__, 1666 HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, error); 1667 } 1668 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_OUT]); 1669 if (error != 0) { 1670 printf("%s: WARNING: unable to deregister helper hook " 1671 "type=%d, id=%d: error %d returned\n", __func__, 1672 HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error); 1673 } 1674 #endif 1675 } 1676 VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL); 1677 #endif 1678 1679 void 1680 tcp_fini(void *xtp) 1681 { 1682 1683 } 1684 1685 /* 1686 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 1687 * tcp_template used to store this data in mbufs, but we now recopy it out 1688 * of the tcpcb each time to conserve mbufs. 1689 */ 1690 void 1691 tcpip_fillheaders(struct inpcb *inp, uint16_t port, void *ip_ptr, void *tcp_ptr) 1692 { 1693 struct tcphdr *th = (struct tcphdr *)tcp_ptr; 1694 1695 INP_WLOCK_ASSERT(inp); 1696 1697 #ifdef INET6 1698 if ((inp->inp_vflag & INP_IPV6) != 0) { 1699 struct ip6_hdr *ip6; 1700 1701 ip6 = (struct ip6_hdr *)ip_ptr; 1702 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 1703 (inp->inp_flow & IPV6_FLOWINFO_MASK); 1704 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 1705 (IPV6_VERSION & IPV6_VERSION_MASK); 1706 if (port == 0) 1707 ip6->ip6_nxt = IPPROTO_TCP; 1708 else 1709 ip6->ip6_nxt = IPPROTO_UDP; 1710 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 1711 ip6->ip6_src = inp->in6p_laddr; 1712 ip6->ip6_dst = inp->in6p_faddr; 1713 } 1714 #endif /* INET6 */ 1715 #if defined(INET6) && defined(INET) 1716 else 1717 #endif 1718 #ifdef INET 1719 { 1720 struct ip *ip; 1721 1722 ip = (struct ip *)ip_ptr; 1723 ip->ip_v = IPVERSION; 1724 ip->ip_hl = 5; 1725 ip->ip_tos = inp->inp_ip_tos; 1726 ip->ip_len = 0; 1727 ip->ip_id = 0; 1728 ip->ip_off = 0; 1729 ip->ip_ttl = inp->inp_ip_ttl; 1730 ip->ip_sum = 0; 1731 if (port == 0) 1732 ip->ip_p = IPPROTO_TCP; 1733 else 1734 ip->ip_p = IPPROTO_UDP; 1735 ip->ip_src = inp->inp_laddr; 1736 ip->ip_dst = inp->inp_faddr; 1737 } 1738 #endif /* INET */ 1739 th->th_sport = inp->inp_lport; 1740 th->th_dport = inp->inp_fport; 1741 th->th_seq = 0; 1742 th->th_ack = 0; 1743 th->th_off = 5; 1744 tcp_set_flags(th, 0); 1745 th->th_win = 0; 1746 th->th_urp = 0; 1747 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */ 1748 } 1749 1750 /* 1751 * Create template to be used to send tcp packets on a connection. 1752 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 1753 * use for this function is in keepalives, which use tcp_respond. 1754 */ 1755 struct tcptemp * 1756 tcpip_maketemplate(struct inpcb *inp) 1757 { 1758 struct tcptemp *t; 1759 1760 t = malloc(sizeof(*t), M_TEMP, M_NOWAIT); 1761 if (t == NULL) 1762 return (NULL); 1763 tcpip_fillheaders(inp, 0, (void *)&t->tt_ipgen, (void *)&t->tt_t); 1764 return (t); 1765 } 1766 1767 /* 1768 * Send a single message to the TCP at address specified by 1769 * the given TCP/IP header. If m == NULL, then we make a copy 1770 * of the tcpiphdr at th and send directly to the addressed host. 1771 * This is used to force keep alive messages out using the TCP 1772 * template for a connection. If flags are given then we send 1773 * a message back to the TCP which originated the segment th, 1774 * and discard the mbuf containing it and any other attached mbufs. 1775 * 1776 * In any case the ack and sequence number of the transmitted 1777 * segment are as specified by the parameters. 1778 * 1779 * NOTE: If m != NULL, then th must point to *inside* the mbuf. 1780 */ 1781 void 1782 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 1783 tcp_seq ack, tcp_seq seq, uint16_t flags) 1784 { 1785 struct tcpopt to; 1786 struct inpcb *inp; 1787 struct ip *ip; 1788 struct mbuf *optm; 1789 struct udphdr *uh = NULL; 1790 struct tcphdr *nth; 1791 struct tcp_log_buffer *lgb; 1792 u_char *optp; 1793 #ifdef INET6 1794 struct ip6_hdr *ip6; 1795 int isipv6; 1796 #endif /* INET6 */ 1797 int optlen, tlen, win, ulen; 1798 int ect = 0; 1799 bool incl_opts; 1800 uint16_t port; 1801 int output_ret; 1802 #ifdef INVARIANTS 1803 int thflags = tcp_get_flags(th); 1804 #endif 1805 1806 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL")); 1807 NET_EPOCH_ASSERT(); 1808 1809 #ifdef INET6 1810 isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4); 1811 ip6 = ipgen; 1812 #endif /* INET6 */ 1813 ip = ipgen; 1814 1815 if (tp != NULL) { 1816 inp = tptoinpcb(tp); 1817 INP_LOCK_ASSERT(inp); 1818 } else 1819 inp = NULL; 1820 1821 if (m != NULL) { 1822 #ifdef INET6 1823 if (isipv6 && ip6 && (ip6->ip6_nxt == IPPROTO_UDP)) 1824 port = m->m_pkthdr.tcp_tun_port; 1825 else 1826 #endif 1827 if (ip && (ip->ip_p == IPPROTO_UDP)) 1828 port = m->m_pkthdr.tcp_tun_port; 1829 else 1830 port = 0; 1831 } else 1832 port = tp->t_port; 1833 1834 incl_opts = false; 1835 win = 0; 1836 if (tp != NULL) { 1837 if (!(flags & TH_RST)) { 1838 win = sbspace(&inp->inp_socket->so_rcv); 1839 if (win > TCP_MAXWIN << tp->rcv_scale) 1840 win = TCP_MAXWIN << tp->rcv_scale; 1841 } 1842 if ((tp->t_flags & TF_NOOPT) == 0) 1843 incl_opts = true; 1844 } 1845 if (m == NULL) { 1846 m = m_gethdr(M_NOWAIT, MT_DATA); 1847 if (m == NULL) 1848 return; 1849 m->m_data += max_linkhdr; 1850 #ifdef INET6 1851 if (isipv6) { 1852 bcopy((caddr_t)ip6, mtod(m, caddr_t), 1853 sizeof(struct ip6_hdr)); 1854 ip6 = mtod(m, struct ip6_hdr *); 1855 nth = (struct tcphdr *)(ip6 + 1); 1856 if (port) { 1857 /* Insert a UDP header */ 1858 uh = (struct udphdr *)nth; 1859 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1860 uh->uh_dport = port; 1861 nth = (struct tcphdr *)(uh + 1); 1862 } 1863 } else 1864 #endif /* INET6 */ 1865 { 1866 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 1867 ip = mtod(m, struct ip *); 1868 nth = (struct tcphdr *)(ip + 1); 1869 if (port) { 1870 /* Insert a UDP header */ 1871 uh = (struct udphdr *)nth; 1872 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1873 uh->uh_dport = port; 1874 nth = (struct tcphdr *)(uh + 1); 1875 } 1876 } 1877 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 1878 flags = TH_ACK; 1879 } else if ((!M_WRITABLE(m)) || (port != 0)) { 1880 struct mbuf *n; 1881 1882 /* Can't reuse 'm', allocate a new mbuf. */ 1883 n = m_gethdr(M_NOWAIT, MT_DATA); 1884 if (n == NULL) { 1885 m_freem(m); 1886 return; 1887 } 1888 1889 if (!m_dup_pkthdr(n, m, M_NOWAIT)) { 1890 m_freem(m); 1891 m_freem(n); 1892 return; 1893 } 1894 1895 n->m_data += max_linkhdr; 1896 /* m_len is set later */ 1897 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 1898 #ifdef INET6 1899 if (isipv6) { 1900 bcopy((caddr_t)ip6, mtod(n, caddr_t), 1901 sizeof(struct ip6_hdr)); 1902 ip6 = mtod(n, struct ip6_hdr *); 1903 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 1904 nth = (struct tcphdr *)(ip6 + 1); 1905 if (port) { 1906 /* Insert a UDP header */ 1907 uh = (struct udphdr *)nth; 1908 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1909 uh->uh_dport = port; 1910 nth = (struct tcphdr *)(uh + 1); 1911 } 1912 } else 1913 #endif /* INET6 */ 1914 { 1915 bcopy((caddr_t)ip, mtod(n, caddr_t), sizeof(struct ip)); 1916 ip = mtod(n, struct ip *); 1917 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t); 1918 nth = (struct tcphdr *)(ip + 1); 1919 if (port) { 1920 /* Insert a UDP header */ 1921 uh = (struct udphdr *)nth; 1922 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1923 uh->uh_dport = port; 1924 nth = (struct tcphdr *)(uh + 1); 1925 } 1926 } 1927 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 1928 xchg(nth->th_dport, nth->th_sport, uint16_t); 1929 th = nth; 1930 m_freem(m); 1931 m = n; 1932 } else { 1933 /* 1934 * reuse the mbuf. 1935 * XXX MRT We inherit the FIB, which is lucky. 1936 */ 1937 m_freem(m->m_next); 1938 m->m_next = NULL; 1939 m->m_data = (caddr_t)ipgen; 1940 /* clear any receive flags for proper bpf timestamping */ 1941 m->m_flags &= ~(M_TSTMP | M_TSTMP_LRO); 1942 /* m_len is set later */ 1943 #ifdef INET6 1944 if (isipv6) { 1945 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 1946 nth = (struct tcphdr *)(ip6 + 1); 1947 } else 1948 #endif /* INET6 */ 1949 { 1950 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t); 1951 nth = (struct tcphdr *)(ip + 1); 1952 } 1953 if (th != nth) { 1954 /* 1955 * this is usually a case when an extension header 1956 * exists between the IPv6 header and the 1957 * TCP header. 1958 */ 1959 nth->th_sport = th->th_sport; 1960 nth->th_dport = th->th_dport; 1961 } 1962 xchg(nth->th_dport, nth->th_sport, uint16_t); 1963 #undef xchg 1964 } 1965 tlen = 0; 1966 #ifdef INET6 1967 if (isipv6) 1968 tlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 1969 #endif 1970 #if defined(INET) && defined(INET6) 1971 else 1972 #endif 1973 #ifdef INET 1974 tlen = sizeof (struct tcpiphdr); 1975 #endif 1976 if (port) 1977 tlen += sizeof (struct udphdr); 1978 #ifdef INVARIANTS 1979 m->m_len = 0; 1980 KASSERT(M_TRAILINGSPACE(m) >= tlen, 1981 ("Not enough trailing space for message (m=%p, need=%d, have=%ld)", 1982 m, tlen, (long)M_TRAILINGSPACE(m))); 1983 #endif 1984 m->m_len = tlen; 1985 to.to_flags = 0; 1986 if (incl_opts) { 1987 ect = tcp_ecn_output_established(tp, &flags, 0, false); 1988 /* Make sure we have room. */ 1989 if (M_TRAILINGSPACE(m) < TCP_MAXOLEN) { 1990 m->m_next = m_get(M_NOWAIT, MT_DATA); 1991 if (m->m_next) { 1992 optp = mtod(m->m_next, u_char *); 1993 optm = m->m_next; 1994 } else 1995 incl_opts = false; 1996 } else { 1997 optp = (u_char *) (nth + 1); 1998 optm = m; 1999 } 2000 } 2001 if (incl_opts) { 2002 /* Timestamps. */ 2003 if (tp->t_flags & TF_RCVD_TSTMP) { 2004 to.to_tsval = tcp_ts_getticks() + tp->ts_offset; 2005 to.to_tsecr = tp->ts_recent; 2006 to.to_flags |= TOF_TS; 2007 } 2008 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 2009 /* TCP-MD5 (RFC2385). */ 2010 if (tp->t_flags & TF_SIGNATURE) 2011 to.to_flags |= TOF_SIGNATURE; 2012 #endif 2013 /* Add the options. */ 2014 tlen += optlen = tcp_addoptions(&to, optp); 2015 2016 /* Update m_len in the correct mbuf. */ 2017 optm->m_len += optlen; 2018 } else 2019 optlen = 0; 2020 #ifdef INET6 2021 if (isipv6) { 2022 if (uh) { 2023 ulen = tlen - sizeof(struct ip6_hdr); 2024 uh->uh_ulen = htons(ulen); 2025 } 2026 ip6->ip6_flow = htonl(ect << IPV6_FLOWLABEL_LEN); 2027 ip6->ip6_vfc = IPV6_VERSION; 2028 if (port) 2029 ip6->ip6_nxt = IPPROTO_UDP; 2030 else 2031 ip6->ip6_nxt = IPPROTO_TCP; 2032 ip6->ip6_plen = htons(tlen - sizeof(*ip6)); 2033 } 2034 #endif 2035 #if defined(INET) && defined(INET6) 2036 else 2037 #endif 2038 #ifdef INET 2039 { 2040 if (uh) { 2041 ulen = tlen - sizeof(struct ip); 2042 uh->uh_ulen = htons(ulen); 2043 } 2044 ip->ip_len = htons(tlen); 2045 if (inp != NULL) { 2046 ip->ip_tos = inp->inp_ip_tos & ~IPTOS_ECN_MASK; 2047 ip->ip_ttl = inp->inp_ip_ttl; 2048 } else { 2049 ip->ip_tos = 0; 2050 ip->ip_ttl = V_ip_defttl; 2051 } 2052 ip->ip_tos |= ect; 2053 if (port) { 2054 ip->ip_p = IPPROTO_UDP; 2055 } else { 2056 ip->ip_p = IPPROTO_TCP; 2057 } 2058 if (V_path_mtu_discovery) 2059 ip->ip_off |= htons(IP_DF); 2060 } 2061 #endif 2062 m->m_pkthdr.len = tlen; 2063 m->m_pkthdr.rcvif = NULL; 2064 #ifdef MAC 2065 if (inp != NULL) { 2066 /* 2067 * Packet is associated with a socket, so allow the 2068 * label of the response to reflect the socket label. 2069 */ 2070 INP_LOCK_ASSERT(inp); 2071 mac_inpcb_create_mbuf(inp, m); 2072 } else { 2073 /* 2074 * Packet is not associated with a socket, so possibly 2075 * update the label in place. 2076 */ 2077 mac_netinet_tcp_reply(m); 2078 } 2079 #endif 2080 nth->th_seq = htonl(seq); 2081 nth->th_ack = htonl(ack); 2082 nth->th_off = (sizeof (struct tcphdr) + optlen) >> 2; 2083 tcp_set_flags(nth, flags); 2084 if (tp && (flags & TH_RST)) { 2085 /* Log the reset */ 2086 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 2087 } 2088 if (tp != NULL) 2089 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 2090 else 2091 nth->th_win = htons((u_short)win); 2092 nth->th_urp = 0; 2093 2094 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 2095 if (to.to_flags & TOF_SIGNATURE) { 2096 if (!TCPMD5_ENABLED() || 2097 TCPMD5_OUTPUT(m, nth, to.to_signature) != 0) { 2098 m_freem(m); 2099 return; 2100 } 2101 } 2102 #endif 2103 2104 #ifdef INET6 2105 if (isipv6) { 2106 if (port) { 2107 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 2108 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 2109 uh->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 2110 nth->th_sum = 0; 2111 } else { 2112 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 2113 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2114 nth->th_sum = in6_cksum_pseudo(ip6, 2115 tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0); 2116 } 2117 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 2118 } 2119 #endif /* INET6 */ 2120 #if defined(INET6) && defined(INET) 2121 else 2122 #endif 2123 #ifdef INET 2124 { 2125 if (port) { 2126 uh->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 2127 htons(ulen + IPPROTO_UDP)); 2128 m->m_pkthdr.csum_flags = CSUM_UDP; 2129 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 2130 nth->th_sum = 0; 2131 } else { 2132 m->m_pkthdr.csum_flags = CSUM_TCP; 2133 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2134 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 2135 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 2136 } 2137 } 2138 #endif /* INET */ 2139 TCP_PROBE3(debug__output, tp, th, m); 2140 if (flags & TH_RST) 2141 TCP_PROBE5(accept__refused, NULL, NULL, m, tp, nth); 2142 lgb = NULL; 2143 if ((tp != NULL) && tcp_bblogging_on(tp)) { 2144 if (INP_WLOCKED(inp)) { 2145 union tcp_log_stackspecific log; 2146 struct timeval tv; 2147 2148 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2149 log.u_bbr.inhpts = tcp_in_hpts(tp); 2150 log.u_bbr.flex8 = 4; 2151 log.u_bbr.pkts_out = tp->t_maxseg; 2152 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2153 log.u_bbr.delivered = 0; 2154 lgb = tcp_log_event(tp, nth, NULL, NULL, TCP_LOG_OUT, 2155 ERRNO_UNK, 0, &log, false, NULL, NULL, 0, &tv); 2156 } else { 2157 /* 2158 * We can not log the packet, since we only own the 2159 * read lock, but a write lock is needed. The read lock 2160 * is not upgraded to a write lock, since only getting 2161 * the read lock was done intentionally to improve the 2162 * handling of SYN flooding attacks. 2163 * This happens only for pure SYN segments received in 2164 * the initial CLOSED state, or received in a more 2165 * advanced state than listen and the UDP encapsulation 2166 * port is unexpected. 2167 * The incoming SYN segments do not really belong to 2168 * the TCP connection and the handling does not change 2169 * the state of the TCP connection. Therefore, the 2170 * sending of the RST segments is not logged. Please 2171 * note that also the incoming SYN segments are not 2172 * logged. 2173 * 2174 * The following code ensures that the above description 2175 * is and stays correct. 2176 */ 2177 KASSERT((thflags & (TH_ACK|TH_SYN)) == TH_SYN && 2178 (tp->t_state == TCPS_CLOSED || 2179 (tp->t_state > TCPS_LISTEN && tp->t_port != port)), 2180 ("%s: Logging of TCP segment with flags 0x%b and " 2181 "UDP encapsulation port %u skipped in state %s", 2182 __func__, thflags, PRINT_TH_FLAGS, 2183 ntohs(port), tcpstates[tp->t_state])); 2184 } 2185 } 2186 2187 if (flags & TH_ACK) 2188 TCPSTAT_INC(tcps_sndacks); 2189 else if (flags & (TH_SYN|TH_FIN|TH_RST)) 2190 TCPSTAT_INC(tcps_sndctrl); 2191 TCPSTAT_INC(tcps_sndtotal); 2192 2193 #ifdef INET6 2194 if (isipv6) { 2195 TCP_PROBE5(send, NULL, tp, ip6, tp, nth); 2196 output_ret = ip6_output(m, inp ? inp->in6p_outputopts : NULL, 2197 NULL, 0, NULL, NULL, inp); 2198 } 2199 #endif /* INET6 */ 2200 #if defined(INET) && defined(INET6) 2201 else 2202 #endif 2203 #ifdef INET 2204 { 2205 TCP_PROBE5(send, NULL, tp, ip, tp, nth); 2206 output_ret = ip_output(m, NULL, NULL, 0, NULL, inp); 2207 } 2208 #endif 2209 if (lgb != NULL) 2210 lgb->tlb_errno = output_ret; 2211 } 2212 2213 /* 2214 * Create a new TCP control block, making an empty reassembly queue and hooking 2215 * it to the argument protocol control block. The `inp' parameter must have 2216 * come from the zone allocator set up by tcpcbstor declaration. 2217 */ 2218 struct tcpcb * 2219 tcp_newtcpcb(struct inpcb *inp) 2220 { 2221 struct tcpcb *tp = intotcpcb(inp); 2222 #ifdef INET6 2223 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 2224 #endif /* INET6 */ 2225 2226 /* 2227 * Historically allocation was done with M_ZERO. There is a lot of 2228 * code that rely on that. For now take safe approach and zero whole 2229 * tcpcb. This definitely can be optimized. 2230 */ 2231 bzero(&tp->t_start_zero, t_zero_size); 2232 2233 /* Initialise cc_var struct for this tcpcb. */ 2234 tp->t_ccv.type = IPPROTO_TCP; 2235 tp->t_ccv.ccvc.tcp = tp; 2236 rw_rlock(&tcp_function_lock); 2237 tp->t_fb = V_tcp_func_set_ptr; 2238 refcount_acquire(&tp->t_fb->tfb_refcnt); 2239 rw_runlock(&tcp_function_lock); 2240 /* 2241 * Use the current system default CC algorithm. 2242 */ 2243 cc_attach(tp, CC_DEFAULT_ALGO()); 2244 2245 if (CC_ALGO(tp)->cb_init != NULL) 2246 if (CC_ALGO(tp)->cb_init(&tp->t_ccv, NULL) > 0) { 2247 cc_detach(tp); 2248 if (tp->t_fb->tfb_tcp_fb_fini) 2249 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 2250 refcount_release(&tp->t_fb->tfb_refcnt); 2251 return (NULL); 2252 } 2253 2254 #ifdef TCP_HHOOK 2255 if (khelp_init_osd(HELPER_CLASS_TCP, &tp->t_osd)) { 2256 if (tp->t_fb->tfb_tcp_fb_fini) 2257 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 2258 refcount_release(&tp->t_fb->tfb_refcnt); 2259 return (NULL); 2260 } 2261 #endif 2262 2263 TAILQ_INIT(&tp->t_segq); 2264 STAILQ_INIT(&tp->t_inqueue); 2265 tp->t_maxseg = 2266 #ifdef INET6 2267 isipv6 ? V_tcp_v6mssdflt : 2268 #endif /* INET6 */ 2269 V_tcp_mssdflt; 2270 2271 /* All mbuf queue/ack compress flags should be off */ 2272 tcp_lro_features_off(tp); 2273 2274 callout_init_rw(&tp->t_callout, &inp->inp_lock, CALLOUT_RETURNUNLOCKED); 2275 for (int i = 0; i < TT_N; i++) 2276 tp->t_timers[i] = SBT_MAX; 2277 2278 switch (V_tcp_do_rfc1323) { 2279 case 0: 2280 break; 2281 default: 2282 case 1: 2283 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 2284 break; 2285 case 2: 2286 tp->t_flags = TF_REQ_SCALE; 2287 break; 2288 case 3: 2289 tp->t_flags = TF_REQ_TSTMP; 2290 break; 2291 } 2292 if (V_tcp_do_sack) 2293 tp->t_flags |= TF_SACK_PERMIT; 2294 TAILQ_INIT(&tp->snd_holes); 2295 2296 /* 2297 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 2298 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 2299 * reasonable initial retransmit time. 2300 */ 2301 tp->t_srtt = TCPTV_SRTTBASE; 2302 tp->t_rttvar = ((tcp_rexmit_initial - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 2303 tp->t_rttmin = tcp_rexmit_min; 2304 tp->t_rxtcur = tcp_rexmit_initial; 2305 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 2306 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 2307 tp->t_rcvtime = ticks; 2308 /* We always start with ticks granularity */ 2309 tp->t_tmr_granularity = TCP_TMR_GRANULARITY_TICKS; 2310 /* 2311 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 2312 * because the socket may be bound to an IPv6 wildcard address, 2313 * which may match an IPv4-mapped IPv6 address. 2314 */ 2315 inp->inp_ip_ttl = V_ip_defttl; 2316 #ifdef TCPHPTS 2317 tcp_hpts_init(tp); 2318 #endif 2319 #ifdef TCPPCAP 2320 /* 2321 * Init the TCP PCAP queues. 2322 */ 2323 tcp_pcap_tcpcb_init(tp); 2324 #endif 2325 #ifdef TCP_BLACKBOX 2326 /* Initialize the per-TCPCB log data. */ 2327 tcp_log_tcpcbinit(tp); 2328 #endif 2329 tp->t_pacing_rate = -1; 2330 if (tp->t_fb->tfb_tcp_fb_init) { 2331 if ((*tp->t_fb->tfb_tcp_fb_init)(tp, &tp->t_fb_ptr)) { 2332 refcount_release(&tp->t_fb->tfb_refcnt); 2333 return (NULL); 2334 } 2335 } 2336 #ifdef STATS 2337 if (V_tcp_perconn_stats_enable == 1) 2338 tp->t_stats = stats_blob_alloc(V_tcp_perconn_stats_dflt_tpl, 0); 2339 #endif 2340 if (V_tcp_do_lrd) 2341 tp->t_flags |= TF_LRD; 2342 2343 return (tp); 2344 } 2345 2346 /* 2347 * Drop a TCP connection, reporting 2348 * the specified error. If connection is synchronized, 2349 * then send a RST to peer. 2350 */ 2351 struct tcpcb * 2352 tcp_drop(struct tcpcb *tp, int errno) 2353 { 2354 struct socket *so = tptosocket(tp); 2355 2356 NET_EPOCH_ASSERT(); 2357 INP_WLOCK_ASSERT(tptoinpcb(tp)); 2358 2359 if (TCPS_HAVERCVDSYN(tp->t_state)) { 2360 tcp_state_change(tp, TCPS_CLOSED); 2361 /* Don't use tcp_output() here due to possible recursion. */ 2362 (void)tcp_output_nodrop(tp); 2363 TCPSTAT_INC(tcps_drops); 2364 } else 2365 TCPSTAT_INC(tcps_conndrops); 2366 if (errno == ETIMEDOUT && tp->t_softerror) 2367 errno = tp->t_softerror; 2368 so->so_error = errno; 2369 return (tcp_close(tp)); 2370 } 2371 2372 void 2373 tcp_discardcb(struct tcpcb *tp) 2374 { 2375 struct inpcb *inp = tptoinpcb(tp); 2376 struct socket *so = tptosocket(tp); 2377 struct mbuf *m; 2378 #ifdef INET6 2379 bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 2380 #endif 2381 2382 INP_WLOCK_ASSERT(inp); 2383 2384 tcp_timer_stop(tp); 2385 if (tp->t_fb->tfb_tcp_timer_stop_all) { 2386 tp->t_fb->tfb_tcp_timer_stop_all(tp); 2387 } 2388 2389 /* free the reassembly queue, if any */ 2390 tcp_reass_flush(tp); 2391 2392 #ifdef TCP_OFFLOAD 2393 /* Disconnect offload device, if any. */ 2394 if (tp->t_flags & TF_TOE) 2395 tcp_offload_detach(tp); 2396 #endif 2397 2398 tcp_free_sackholes(tp); 2399 2400 #ifdef TCPPCAP 2401 /* Free the TCP PCAP queues. */ 2402 tcp_pcap_drain(&(tp->t_inpkts)); 2403 tcp_pcap_drain(&(tp->t_outpkts)); 2404 #endif 2405 2406 /* Allow the CC algorithm to clean up after itself. */ 2407 if (CC_ALGO(tp)->cb_destroy != NULL) 2408 CC_ALGO(tp)->cb_destroy(&tp->t_ccv); 2409 CC_DATA(tp) = NULL; 2410 /* Detach from the CC algorithm */ 2411 cc_detach(tp); 2412 2413 #ifdef TCP_HHOOK 2414 khelp_destroy_osd(&tp->t_osd); 2415 #endif 2416 #ifdef STATS 2417 stats_blob_destroy(tp->t_stats); 2418 #endif 2419 2420 CC_ALGO(tp) = NULL; 2421 if ((m = STAILQ_FIRST(&tp->t_inqueue)) != NULL) { 2422 struct mbuf *prev; 2423 2424 STAILQ_INIT(&tp->t_inqueue); 2425 STAILQ_FOREACH_FROM_SAFE(m, &tp->t_inqueue, m_stailqpkt, prev) 2426 m_freem(m); 2427 } 2428 TCPSTATES_DEC(tp->t_state); 2429 2430 if (tp->t_fb->tfb_tcp_fb_fini) 2431 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 2432 MPASS(!tcp_in_hpts(tp)); 2433 #ifdef TCP_BLACKBOX 2434 tcp_log_tcpcbfini(tp); 2435 #endif 2436 2437 /* 2438 * If we got enough samples through the srtt filter, 2439 * save the rtt and rttvar in the routing entry. 2440 * 'Enough' is arbitrarily defined as 4 rtt samples. 2441 * 4 samples is enough for the srtt filter to converge 2442 * to within enough % of the correct value; fewer samples 2443 * and we could save a bogus rtt. The danger is not high 2444 * as tcp quickly recovers from everything. 2445 * XXX: Works very well but needs some more statistics! 2446 * 2447 * XXXRRS: Updating must be after the stack fini() since 2448 * that may be converting some internal representation of 2449 * say srtt etc into the general one used by other stacks. 2450 * Lets also at least protect against the so being NULL 2451 * as RW stated below. 2452 */ 2453 if ((tp->t_rttupdated >= 4) && (so != NULL)) { 2454 struct hc_metrics_lite metrics; 2455 uint32_t ssthresh; 2456 2457 bzero(&metrics, sizeof(metrics)); 2458 /* 2459 * Update the ssthresh always when the conditions below 2460 * are satisfied. This gives us better new start value 2461 * for the congestion avoidance for new connections. 2462 * ssthresh is only set if packet loss occurred on a session. 2463 * 2464 * XXXRW: 'so' may be NULL here, and/or socket buffer may be 2465 * being torn down. Ideally this code would not use 'so'. 2466 */ 2467 ssthresh = tp->snd_ssthresh; 2468 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) { 2469 /* 2470 * convert the limit from user data bytes to 2471 * packets then to packet data bytes. 2472 */ 2473 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg; 2474 if (ssthresh < 2) 2475 ssthresh = 2; 2476 ssthresh *= (tp->t_maxseg + 2477 #ifdef INET6 2478 (isipv6 ? sizeof (struct ip6_hdr) + 2479 sizeof (struct tcphdr) : 2480 #endif 2481 sizeof (struct tcpiphdr) 2482 #ifdef INET6 2483 ) 2484 #endif 2485 ); 2486 } else 2487 ssthresh = 0; 2488 metrics.rmx_ssthresh = ssthresh; 2489 2490 metrics.rmx_rtt = tp->t_srtt; 2491 metrics.rmx_rttvar = tp->t_rttvar; 2492 metrics.rmx_cwnd = tp->snd_cwnd; 2493 metrics.rmx_sendpipe = 0; 2494 metrics.rmx_recvpipe = 0; 2495 2496 tcp_hc_update(&inp->inp_inc, &metrics); 2497 } 2498 2499 refcount_release(&tp->t_fb->tfb_refcnt); 2500 } 2501 2502 /* 2503 * Attempt to close a TCP control block, marking it as dropped, and freeing 2504 * the socket if we hold the only reference. 2505 */ 2506 struct tcpcb * 2507 tcp_close(struct tcpcb *tp) 2508 { 2509 struct inpcb *inp = tptoinpcb(tp); 2510 struct socket *so = tptosocket(tp); 2511 2512 INP_WLOCK_ASSERT(inp); 2513 2514 #ifdef TCP_OFFLOAD 2515 if (tp->t_state == TCPS_LISTEN) 2516 tcp_offload_listen_stop(tp); 2517 #endif 2518 /* 2519 * This releases the TFO pending counter resource for TFO listen 2520 * sockets as well as passively-created TFO sockets that transition 2521 * from SYN_RECEIVED to CLOSED. 2522 */ 2523 if (tp->t_tfo_pending) { 2524 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2525 tp->t_tfo_pending = NULL; 2526 } 2527 #ifdef TCPHPTS 2528 tcp_hpts_remove(tp); 2529 #endif 2530 in_pcbdrop(inp); 2531 TCPSTAT_INC(tcps_closed); 2532 if (tp->t_state != TCPS_CLOSED) 2533 tcp_state_change(tp, TCPS_CLOSED); 2534 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL")); 2535 soisdisconnected(so); 2536 if (inp->inp_flags & INP_SOCKREF) { 2537 inp->inp_flags &= ~INP_SOCKREF; 2538 INP_WUNLOCK(inp); 2539 sorele(so); 2540 return (NULL); 2541 } 2542 return (tp); 2543 } 2544 2545 /* 2546 * Notify a tcp user of an asynchronous error; 2547 * store error as soft error, but wake up user 2548 * (for now, won't do anything until can select for soft error). 2549 * 2550 * Do not wake up user since there currently is no mechanism for 2551 * reporting soft errors (yet - a kqueue filter may be added). 2552 */ 2553 static struct inpcb * 2554 tcp_notify(struct inpcb *inp, int error) 2555 { 2556 struct tcpcb *tp; 2557 2558 INP_WLOCK_ASSERT(inp); 2559 2560 tp = intotcpcb(inp); 2561 KASSERT(tp != NULL, ("tcp_notify: tp == NULL")); 2562 2563 /* 2564 * Ignore some errors if we are hooked up. 2565 * If connection hasn't completed, has retransmitted several times, 2566 * and receives a second error, give up now. This is better 2567 * than waiting a long time to establish a connection that 2568 * can never complete. 2569 */ 2570 if (tp->t_state == TCPS_ESTABLISHED && 2571 (error == EHOSTUNREACH || error == ENETUNREACH || 2572 error == EHOSTDOWN)) { 2573 if (inp->inp_route.ro_nh) { 2574 NH_FREE(inp->inp_route.ro_nh); 2575 inp->inp_route.ro_nh = (struct nhop_object *)NULL; 2576 } 2577 return (inp); 2578 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 2579 tp->t_softerror) { 2580 tp = tcp_drop(tp, error); 2581 if (tp != NULL) 2582 return (inp); 2583 else 2584 return (NULL); 2585 } else { 2586 tp->t_softerror = error; 2587 return (inp); 2588 } 2589 #if 0 2590 wakeup( &so->so_timeo); 2591 sorwakeup(so); 2592 sowwakeup(so); 2593 #endif 2594 } 2595 2596 static int 2597 tcp_pcblist(SYSCTL_HANDLER_ARGS) 2598 { 2599 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo, 2600 INPLOOKUP_RLOCKPCB); 2601 struct xinpgen xig; 2602 struct inpcb *inp; 2603 int error; 2604 2605 if (req->newptr != NULL) 2606 return (EPERM); 2607 2608 if (req->oldptr == NULL) { 2609 int n; 2610 2611 n = V_tcbinfo.ipi_count + 2612 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 2613 n += imax(n / 8, 10); 2614 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb); 2615 return (0); 2616 } 2617 2618 if ((error = sysctl_wire_old_buffer(req, 0)) != 0) 2619 return (error); 2620 2621 bzero(&xig, sizeof(xig)); 2622 xig.xig_len = sizeof xig; 2623 xig.xig_count = V_tcbinfo.ipi_count + 2624 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 2625 xig.xig_gen = V_tcbinfo.ipi_gencnt; 2626 xig.xig_sogen = so_gencnt; 2627 error = SYSCTL_OUT(req, &xig, sizeof xig); 2628 if (error) 2629 return (error); 2630 2631 error = syncache_pcblist(req); 2632 if (error) 2633 return (error); 2634 2635 while ((inp = inp_next(&inpi)) != NULL) { 2636 if (inp->inp_gencnt <= xig.xig_gen && 2637 cr_canseeinpcb(req->td->td_ucred, inp) == 0) { 2638 struct xtcpcb xt; 2639 2640 tcp_inptoxtp(inp, &xt); 2641 error = SYSCTL_OUT(req, &xt, sizeof xt); 2642 if (error) { 2643 INP_RUNLOCK(inp); 2644 break; 2645 } else 2646 continue; 2647 } 2648 } 2649 2650 if (!error) { 2651 /* 2652 * Give the user an updated idea of our state. 2653 * If the generation differs from what we told 2654 * her before, she knows that something happened 2655 * while we were processing this request, and it 2656 * might be necessary to retry. 2657 */ 2658 xig.xig_gen = V_tcbinfo.ipi_gencnt; 2659 xig.xig_sogen = so_gencnt; 2660 xig.xig_count = V_tcbinfo.ipi_count + 2661 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 2662 error = SYSCTL_OUT(req, &xig, sizeof xig); 2663 } 2664 2665 return (error); 2666 } 2667 2668 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, 2669 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2670 NULL, 0, tcp_pcblist, "S,xtcpcb", 2671 "List of active TCP connections"); 2672 2673 #ifdef INET 2674 static int 2675 tcp_getcred(SYSCTL_HANDLER_ARGS) 2676 { 2677 struct xucred xuc; 2678 struct sockaddr_in addrs[2]; 2679 struct epoch_tracker et; 2680 struct inpcb *inp; 2681 int error; 2682 2683 error = priv_check(req->td, PRIV_NETINET_GETCRED); 2684 if (error) 2685 return (error); 2686 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 2687 if (error) 2688 return (error); 2689 NET_EPOCH_ENTER(et); 2690 inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 2691 addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL); 2692 NET_EPOCH_EXIT(et); 2693 if (inp != NULL) { 2694 if (error == 0) 2695 error = cr_canseeinpcb(req->td->td_ucred, inp); 2696 if (error == 0) 2697 cru2x(inp->inp_cred, &xuc); 2698 INP_RUNLOCK(inp); 2699 } else 2700 error = ENOENT; 2701 if (error == 0) 2702 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 2703 return (error); 2704 } 2705 2706 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, 2707 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT, 2708 0, 0, tcp_getcred, "S,xucred", 2709 "Get the xucred of a TCP connection"); 2710 #endif /* INET */ 2711 2712 #ifdef INET6 2713 static int 2714 tcp6_getcred(SYSCTL_HANDLER_ARGS) 2715 { 2716 struct epoch_tracker et; 2717 struct xucred xuc; 2718 struct sockaddr_in6 addrs[2]; 2719 struct inpcb *inp; 2720 int error; 2721 #ifdef INET 2722 int mapped = 0; 2723 #endif 2724 2725 error = priv_check(req->td, PRIV_NETINET_GETCRED); 2726 if (error) 2727 return (error); 2728 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 2729 if (error) 2730 return (error); 2731 if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 || 2732 (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) { 2733 return (error); 2734 } 2735 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 2736 #ifdef INET 2737 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 2738 mapped = 1; 2739 else 2740 #endif 2741 return (EINVAL); 2742 } 2743 2744 NET_EPOCH_ENTER(et); 2745 #ifdef INET 2746 if (mapped == 1) 2747 inp = in_pcblookup(&V_tcbinfo, 2748 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 2749 addrs[1].sin6_port, 2750 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 2751 addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL); 2752 else 2753 #endif 2754 inp = in6_pcblookup(&V_tcbinfo, 2755 &addrs[1].sin6_addr, addrs[1].sin6_port, 2756 &addrs[0].sin6_addr, addrs[0].sin6_port, 2757 INPLOOKUP_RLOCKPCB, NULL); 2758 NET_EPOCH_EXIT(et); 2759 if (inp != NULL) { 2760 if (error == 0) 2761 error = cr_canseeinpcb(req->td->td_ucred, inp); 2762 if (error == 0) 2763 cru2x(inp->inp_cred, &xuc); 2764 INP_RUNLOCK(inp); 2765 } else 2766 error = ENOENT; 2767 if (error == 0) 2768 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 2769 return (error); 2770 } 2771 2772 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, 2773 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT, 2774 0, 0, tcp6_getcred, "S,xucred", 2775 "Get the xucred of a TCP6 connection"); 2776 #endif /* INET6 */ 2777 2778 #ifdef INET 2779 /* Path MTU to try next when a fragmentation-needed message is received. */ 2780 static inline int 2781 tcp_next_pmtu(const struct icmp *icp, const struct ip *ip) 2782 { 2783 int mtu = ntohs(icp->icmp_nextmtu); 2784 2785 /* If no alternative MTU was proposed, try the next smaller one. */ 2786 if (!mtu) 2787 mtu = ip_next_mtu(ntohs(ip->ip_len), 1); 2788 if (mtu < V_tcp_minmss + sizeof(struct tcpiphdr)) 2789 mtu = V_tcp_minmss + sizeof(struct tcpiphdr); 2790 2791 return (mtu); 2792 } 2793 2794 static void 2795 tcp_ctlinput_with_port(struct icmp *icp, uint16_t port) 2796 { 2797 struct ip *ip; 2798 struct tcphdr *th; 2799 struct inpcb *inp; 2800 struct tcpcb *tp; 2801 struct inpcb *(*notify)(struct inpcb *, int); 2802 struct in_conninfo inc; 2803 tcp_seq icmp_tcp_seq; 2804 int errno, mtu; 2805 2806 errno = icmp_errmap(icp); 2807 switch (errno) { 2808 case 0: 2809 return; 2810 case EMSGSIZE: 2811 notify = tcp_mtudisc_notify; 2812 break; 2813 case ECONNREFUSED: 2814 if (V_icmp_may_rst) 2815 notify = tcp_drop_syn_sent; 2816 else 2817 notify = tcp_notify; 2818 break; 2819 case EHOSTUNREACH: 2820 if (V_icmp_may_rst && icp->icmp_type == ICMP_TIMXCEED) 2821 notify = tcp_drop_syn_sent; 2822 else 2823 notify = tcp_notify; 2824 break; 2825 default: 2826 notify = tcp_notify; 2827 } 2828 2829 ip = &icp->icmp_ip; 2830 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2831 icmp_tcp_seq = th->th_seq; 2832 inp = in_pcblookup(&V_tcbinfo, ip->ip_dst, th->th_dport, ip->ip_src, 2833 th->th_sport, INPLOOKUP_WLOCKPCB, NULL); 2834 if (inp != NULL) { 2835 tp = intotcpcb(inp); 2836 #ifdef TCP_OFFLOAD 2837 if (tp->t_flags & TF_TOE && errno == EMSGSIZE) { 2838 /* 2839 * MTU discovery for offloaded connections. Let 2840 * the TOE driver verify seq# and process it. 2841 */ 2842 mtu = tcp_next_pmtu(icp, ip); 2843 tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu); 2844 goto out; 2845 } 2846 #endif 2847 if (tp->t_port != port) 2848 goto out; 2849 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) && 2850 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) { 2851 if (errno == EMSGSIZE) { 2852 /* 2853 * MTU discovery: we got a needfrag and 2854 * will potentially try a lower MTU. 2855 */ 2856 mtu = tcp_next_pmtu(icp, ip); 2857 2858 /* 2859 * Only process the offered MTU if it 2860 * is smaller than the current one. 2861 */ 2862 if (mtu < tp->t_maxseg + 2863 sizeof(struct tcpiphdr)) { 2864 bzero(&inc, sizeof(inc)); 2865 inc.inc_faddr = ip->ip_dst; 2866 inc.inc_fibnum = 2867 inp->inp_inc.inc_fibnum; 2868 tcp_hc_updatemtu(&inc, mtu); 2869 inp = tcp_mtudisc(inp, mtu); 2870 } 2871 } else 2872 inp = (*notify)(inp, errno); 2873 } 2874 } else { 2875 bzero(&inc, sizeof(inc)); 2876 inc.inc_fport = th->th_dport; 2877 inc.inc_lport = th->th_sport; 2878 inc.inc_faddr = ip->ip_dst; 2879 inc.inc_laddr = ip->ip_src; 2880 syncache_unreach(&inc, icmp_tcp_seq, port); 2881 } 2882 out: 2883 if (inp != NULL) 2884 INP_WUNLOCK(inp); 2885 } 2886 2887 static void 2888 tcp_ctlinput(struct icmp *icmp) 2889 { 2890 tcp_ctlinput_with_port(icmp, htons(0)); 2891 } 2892 2893 static void 2894 tcp_ctlinput_viaudp(udp_tun_icmp_param_t param) 2895 { 2896 /* Its a tunneled TCP over UDP icmp */ 2897 struct icmp *icmp = param.icmp; 2898 struct ip *outer_ip, *inner_ip; 2899 struct udphdr *udp; 2900 struct tcphdr *th, ttemp; 2901 int i_hlen, o_len; 2902 uint16_t port; 2903 2904 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 2905 inner_ip = &icmp->icmp_ip; 2906 i_hlen = inner_ip->ip_hl << 2; 2907 o_len = ntohs(outer_ip->ip_len); 2908 if (o_len < 2909 (sizeof(struct ip) + 8 + i_hlen + sizeof(struct udphdr) + offsetof(struct tcphdr, th_ack))) { 2910 /* Not enough data present */ 2911 return; 2912 } 2913 /* Ok lets strip out the inner udphdr header by copying up on top of it the tcp hdr */ 2914 udp = (struct udphdr *)(((caddr_t)inner_ip) + i_hlen); 2915 if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) { 2916 return; 2917 } 2918 port = udp->uh_dport; 2919 th = (struct tcphdr *)(udp + 1); 2920 memcpy(&ttemp, th, sizeof(struct tcphdr)); 2921 memcpy(udp, &ttemp, sizeof(struct tcphdr)); 2922 /* Now adjust down the size of the outer IP header */ 2923 o_len -= sizeof(struct udphdr); 2924 outer_ip->ip_len = htons(o_len); 2925 /* Now call in to the normal handling code */ 2926 tcp_ctlinput_with_port(icmp, port); 2927 } 2928 #endif /* INET */ 2929 2930 #ifdef INET6 2931 static inline int 2932 tcp6_next_pmtu(const struct icmp6_hdr *icmp6) 2933 { 2934 int mtu = ntohl(icmp6->icmp6_mtu); 2935 2936 /* 2937 * If no alternative MTU was proposed, or the proposed MTU was too 2938 * small, set to the min. 2939 */ 2940 if (mtu < IPV6_MMTU) 2941 mtu = IPV6_MMTU - 8; /* XXXNP: what is the adjustment for? */ 2942 return (mtu); 2943 } 2944 2945 static void 2946 tcp6_ctlinput_with_port(struct ip6ctlparam *ip6cp, uint16_t port) 2947 { 2948 struct in6_addr *dst; 2949 struct inpcb *(*notify)(struct inpcb *, int); 2950 struct ip6_hdr *ip6; 2951 struct mbuf *m; 2952 struct inpcb *inp; 2953 struct tcpcb *tp; 2954 struct icmp6_hdr *icmp6; 2955 struct in_conninfo inc; 2956 struct tcp_ports { 2957 uint16_t th_sport; 2958 uint16_t th_dport; 2959 } t_ports; 2960 tcp_seq icmp_tcp_seq; 2961 unsigned int mtu; 2962 unsigned int off; 2963 int errno; 2964 2965 icmp6 = ip6cp->ip6c_icmp6; 2966 m = ip6cp->ip6c_m; 2967 ip6 = ip6cp->ip6c_ip6; 2968 off = ip6cp->ip6c_off; 2969 dst = &ip6cp->ip6c_finaldst->sin6_addr; 2970 2971 errno = icmp6_errmap(icmp6); 2972 switch (errno) { 2973 case 0: 2974 return; 2975 case EMSGSIZE: 2976 notify = tcp_mtudisc_notify; 2977 break; 2978 case ECONNREFUSED: 2979 if (V_icmp_may_rst) 2980 notify = tcp_drop_syn_sent; 2981 else 2982 notify = tcp_notify; 2983 break; 2984 case EHOSTUNREACH: 2985 /* 2986 * There are only four ICMPs that may reset connection: 2987 * - administratively prohibited 2988 * - port unreachable 2989 * - time exceeded in transit 2990 * - unknown next header 2991 */ 2992 if (V_icmp_may_rst && 2993 ((icmp6->icmp6_type == ICMP6_DST_UNREACH && 2994 (icmp6->icmp6_code == ICMP6_DST_UNREACH_ADMIN || 2995 icmp6->icmp6_code == ICMP6_DST_UNREACH_NOPORT)) || 2996 (icmp6->icmp6_type == ICMP6_TIME_EXCEEDED && 2997 icmp6->icmp6_code == ICMP6_TIME_EXCEED_TRANSIT) || 2998 (icmp6->icmp6_type == ICMP6_PARAM_PROB && 2999 icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER))) 3000 notify = tcp_drop_syn_sent; 3001 else 3002 notify = tcp_notify; 3003 break; 3004 default: 3005 notify = tcp_notify; 3006 } 3007 3008 /* Check if we can safely get the ports from the tcp hdr */ 3009 if (m == NULL || 3010 (m->m_pkthdr.len < 3011 (int32_t) (off + sizeof(struct tcp_ports)))) { 3012 return; 3013 } 3014 bzero(&t_ports, sizeof(struct tcp_ports)); 3015 m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports); 3016 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_dst, t_ports.th_dport, 3017 &ip6->ip6_src, t_ports.th_sport, INPLOOKUP_WLOCKPCB, NULL); 3018 off += sizeof(struct tcp_ports); 3019 if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) { 3020 goto out; 3021 } 3022 m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq); 3023 if (inp != NULL) { 3024 tp = intotcpcb(inp); 3025 #ifdef TCP_OFFLOAD 3026 if (tp->t_flags & TF_TOE && errno == EMSGSIZE) { 3027 /* MTU discovery for offloaded connections. */ 3028 mtu = tcp6_next_pmtu(icmp6); 3029 tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu); 3030 goto out; 3031 } 3032 #endif 3033 if (tp->t_port != port) 3034 goto out; 3035 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) && 3036 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) { 3037 if (errno == EMSGSIZE) { 3038 /* 3039 * MTU discovery: 3040 * If we got a needfrag set the MTU 3041 * in the route to the suggested new 3042 * value (if given) and then notify. 3043 */ 3044 mtu = tcp6_next_pmtu(icmp6); 3045 3046 bzero(&inc, sizeof(inc)); 3047 inc.inc_fibnum = M_GETFIB(m); 3048 inc.inc_flags |= INC_ISIPV6; 3049 inc.inc6_faddr = *dst; 3050 if (in6_setscope(&inc.inc6_faddr, 3051 m->m_pkthdr.rcvif, NULL)) 3052 goto out; 3053 /* 3054 * Only process the offered MTU if it 3055 * is smaller than the current one. 3056 */ 3057 if (mtu < tp->t_maxseg + 3058 sizeof (struct tcphdr) + 3059 sizeof (struct ip6_hdr)) { 3060 tcp_hc_updatemtu(&inc, mtu); 3061 tcp_mtudisc(inp, mtu); 3062 ICMP6STAT_INC(icp6s_pmtuchg); 3063 } 3064 } else 3065 inp = (*notify)(inp, errno); 3066 } 3067 } else { 3068 bzero(&inc, sizeof(inc)); 3069 inc.inc_fibnum = M_GETFIB(m); 3070 inc.inc_flags |= INC_ISIPV6; 3071 inc.inc_fport = t_ports.th_dport; 3072 inc.inc_lport = t_ports.th_sport; 3073 inc.inc6_faddr = *dst; 3074 inc.inc6_laddr = ip6->ip6_src; 3075 syncache_unreach(&inc, icmp_tcp_seq, port); 3076 } 3077 out: 3078 if (inp != NULL) 3079 INP_WUNLOCK(inp); 3080 } 3081 3082 static void 3083 tcp6_ctlinput(struct ip6ctlparam *ctl) 3084 { 3085 tcp6_ctlinput_with_port(ctl, htons(0)); 3086 } 3087 3088 static void 3089 tcp6_ctlinput_viaudp(udp_tun_icmp_param_t param) 3090 { 3091 struct ip6ctlparam *ip6cp = param.ip6cp; 3092 struct mbuf *m; 3093 struct udphdr *udp; 3094 uint16_t port; 3095 3096 m = m_pulldown(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(struct udphdr), NULL); 3097 if (m == NULL) { 3098 return; 3099 } 3100 udp = mtod(m, struct udphdr *); 3101 if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) { 3102 return; 3103 } 3104 port = udp->uh_dport; 3105 m_adj(m, sizeof(struct udphdr)); 3106 if ((m->m_flags & M_PKTHDR) == 0) { 3107 ip6cp->ip6c_m->m_pkthdr.len -= sizeof(struct udphdr); 3108 } 3109 /* Now call in to the normal handling code */ 3110 tcp6_ctlinput_with_port(ip6cp, port); 3111 } 3112 3113 #endif /* INET6 */ 3114 3115 static uint32_t 3116 tcp_keyed_hash(struct in_conninfo *inc, u_char *key, u_int len) 3117 { 3118 SIPHASH_CTX ctx; 3119 uint32_t hash[2]; 3120 3121 KASSERT(len >= SIPHASH_KEY_LENGTH, 3122 ("%s: keylen %u too short ", __func__, len)); 3123 SipHash24_Init(&ctx); 3124 SipHash_SetKey(&ctx, (uint8_t *)key); 3125 SipHash_Update(&ctx, &inc->inc_fport, sizeof(uint16_t)); 3126 SipHash_Update(&ctx, &inc->inc_lport, sizeof(uint16_t)); 3127 switch (inc->inc_flags & INC_ISIPV6) { 3128 #ifdef INET 3129 case 0: 3130 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(struct in_addr)); 3131 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(struct in_addr)); 3132 break; 3133 #endif 3134 #ifdef INET6 3135 case INC_ISIPV6: 3136 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(struct in6_addr)); 3137 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(struct in6_addr)); 3138 break; 3139 #endif 3140 } 3141 SipHash_Final((uint8_t *)hash, &ctx); 3142 3143 return (hash[0] ^ hash[1]); 3144 } 3145 3146 uint32_t 3147 tcp_new_ts_offset(struct in_conninfo *inc) 3148 { 3149 struct in_conninfo inc_store, *local_inc; 3150 3151 if (!V_tcp_ts_offset_per_conn) { 3152 memcpy(&inc_store, inc, sizeof(struct in_conninfo)); 3153 inc_store.inc_lport = 0; 3154 inc_store.inc_fport = 0; 3155 local_inc = &inc_store; 3156 } else { 3157 local_inc = inc; 3158 } 3159 return (tcp_keyed_hash(local_inc, V_ts_offset_secret, 3160 sizeof(V_ts_offset_secret))); 3161 } 3162 3163 /* 3164 * Following is where TCP initial sequence number generation occurs. 3165 * 3166 * There are two places where we must use initial sequence numbers: 3167 * 1. In SYN-ACK packets. 3168 * 2. In SYN packets. 3169 * 3170 * All ISNs for SYN-ACK packets are generated by the syncache. See 3171 * tcp_syncache.c for details. 3172 * 3173 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 3174 * depends on this property. In addition, these ISNs should be 3175 * unguessable so as to prevent connection hijacking. To satisfy 3176 * the requirements of this situation, the algorithm outlined in 3177 * RFC 1948 is used, with only small modifications. 3178 * 3179 * Implementation details: 3180 * 3181 * Time is based off the system timer, and is corrected so that it 3182 * increases by one megabyte per second. This allows for proper 3183 * recycling on high speed LANs while still leaving over an hour 3184 * before rollover. 3185 * 3186 * As reading the *exact* system time is too expensive to be done 3187 * whenever setting up a TCP connection, we increment the time 3188 * offset in two ways. First, a small random positive increment 3189 * is added to isn_offset for each connection that is set up. 3190 * Second, the function tcp_isn_tick fires once per clock tick 3191 * and increments isn_offset as necessary so that sequence numbers 3192 * are incremented at approximately ISN_BYTES_PER_SECOND. The 3193 * random positive increments serve only to ensure that the same 3194 * exact sequence number is never sent out twice (as could otherwise 3195 * happen when a port is recycled in less than the system tick 3196 * interval.) 3197 * 3198 * net.inet.tcp.isn_reseed_interval controls the number of seconds 3199 * between seeding of isn_secret. This is normally set to zero, 3200 * as reseeding should not be necessary. 3201 * 3202 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset, 3203 * isn_offset_old, and isn_ctx is performed using the ISN lock. In 3204 * general, this means holding an exclusive (write) lock. 3205 */ 3206 3207 #define ISN_BYTES_PER_SECOND 1048576 3208 #define ISN_STATIC_INCREMENT 4096 3209 #define ISN_RANDOM_INCREMENT (4096 - 1) 3210 #define ISN_SECRET_LENGTH SIPHASH_KEY_LENGTH 3211 3212 VNET_DEFINE_STATIC(u_char, isn_secret[ISN_SECRET_LENGTH]); 3213 VNET_DEFINE_STATIC(int, isn_last); 3214 VNET_DEFINE_STATIC(int, isn_last_reseed); 3215 VNET_DEFINE_STATIC(u_int32_t, isn_offset); 3216 VNET_DEFINE_STATIC(u_int32_t, isn_offset_old); 3217 3218 #define V_isn_secret VNET(isn_secret) 3219 #define V_isn_last VNET(isn_last) 3220 #define V_isn_last_reseed VNET(isn_last_reseed) 3221 #define V_isn_offset VNET(isn_offset) 3222 #define V_isn_offset_old VNET(isn_offset_old) 3223 3224 tcp_seq 3225 tcp_new_isn(struct in_conninfo *inc) 3226 { 3227 tcp_seq new_isn; 3228 u_int32_t projected_offset; 3229 3230 ISN_LOCK(); 3231 /* Seed if this is the first use, reseed if requested. */ 3232 if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) && 3233 (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz) 3234 < (u_int)ticks))) { 3235 arc4rand(&V_isn_secret, sizeof(V_isn_secret), 0); 3236 V_isn_last_reseed = ticks; 3237 } 3238 3239 /* Compute the hash and return the ISN. */ 3240 new_isn = (tcp_seq)tcp_keyed_hash(inc, V_isn_secret, 3241 sizeof(V_isn_secret)); 3242 V_isn_offset += ISN_STATIC_INCREMENT + 3243 (arc4random() & ISN_RANDOM_INCREMENT); 3244 if (ticks != V_isn_last) { 3245 projected_offset = V_isn_offset_old + 3246 ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last); 3247 if (SEQ_GT(projected_offset, V_isn_offset)) 3248 V_isn_offset = projected_offset; 3249 V_isn_offset_old = V_isn_offset; 3250 V_isn_last = ticks; 3251 } 3252 new_isn += V_isn_offset; 3253 ISN_UNLOCK(); 3254 return (new_isn); 3255 } 3256 3257 /* 3258 * When a specific ICMP unreachable message is received and the 3259 * connection state is SYN-SENT, drop the connection. This behavior 3260 * is controlled by the icmp_may_rst sysctl. 3261 */ 3262 static struct inpcb * 3263 tcp_drop_syn_sent(struct inpcb *inp, int errno) 3264 { 3265 struct tcpcb *tp; 3266 3267 NET_EPOCH_ASSERT(); 3268 INP_WLOCK_ASSERT(inp); 3269 3270 tp = intotcpcb(inp); 3271 if (tp->t_state != TCPS_SYN_SENT) 3272 return (inp); 3273 3274 if (IS_FASTOPEN(tp->t_flags)) 3275 tcp_fastopen_disable_path(tp); 3276 3277 tp = tcp_drop(tp, errno); 3278 if (tp != NULL) 3279 return (inp); 3280 else 3281 return (NULL); 3282 } 3283 3284 /* 3285 * When `need fragmentation' ICMP is received, update our idea of the MSS 3286 * based on the new value. Also nudge TCP to send something, since we 3287 * know the packet we just sent was dropped. 3288 * This duplicates some code in the tcp_mss() function in tcp_input.c. 3289 */ 3290 static struct inpcb * 3291 tcp_mtudisc_notify(struct inpcb *inp, int error) 3292 { 3293 3294 return (tcp_mtudisc(inp, -1)); 3295 } 3296 3297 static struct inpcb * 3298 tcp_mtudisc(struct inpcb *inp, int mtuoffer) 3299 { 3300 struct tcpcb *tp; 3301 struct socket *so; 3302 3303 INP_WLOCK_ASSERT(inp); 3304 3305 tp = intotcpcb(inp); 3306 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL")); 3307 3308 tcp_mss_update(tp, -1, mtuoffer, NULL, NULL); 3309 3310 so = inp->inp_socket; 3311 SOCKBUF_LOCK(&so->so_snd); 3312 /* If the mss is larger than the socket buffer, decrease the mss. */ 3313 if (so->so_snd.sb_hiwat < tp->t_maxseg) 3314 tp->t_maxseg = so->so_snd.sb_hiwat; 3315 SOCKBUF_UNLOCK(&so->so_snd); 3316 3317 TCPSTAT_INC(tcps_mturesent); 3318 tp->t_rtttime = 0; 3319 tp->snd_nxt = tp->snd_una; 3320 tcp_free_sackholes(tp); 3321 tp->snd_recover = tp->snd_max; 3322 if (tp->t_flags & TF_SACK_PERMIT) 3323 EXIT_FASTRECOVERY(tp->t_flags); 3324 if (tp->t_fb->tfb_tcp_mtu_chg != NULL) { 3325 /* 3326 * Conceptually the snd_nxt setting 3327 * and freeing sack holes should 3328 * be done by the default stacks 3329 * own tfb_tcp_mtu_chg(). 3330 */ 3331 tp->t_fb->tfb_tcp_mtu_chg(tp); 3332 } 3333 if (tcp_output(tp) < 0) 3334 return (NULL); 3335 else 3336 return (inp); 3337 } 3338 3339 #ifdef INET 3340 /* 3341 * Look-up the routing entry to the peer of this inpcb. If no route 3342 * is found and it cannot be allocated, then return 0. This routine 3343 * is called by TCP routines that access the rmx structure and by 3344 * tcp_mss_update to get the peer/interface MTU. 3345 */ 3346 uint32_t 3347 tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap) 3348 { 3349 struct nhop_object *nh; 3350 struct ifnet *ifp; 3351 uint32_t maxmtu = 0; 3352 3353 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer")); 3354 3355 if (inc->inc_faddr.s_addr != INADDR_ANY) { 3356 nh = fib4_lookup(inc->inc_fibnum, inc->inc_faddr, 0, NHR_NONE, 0); 3357 if (nh == NULL) 3358 return (0); 3359 3360 ifp = nh->nh_ifp; 3361 maxmtu = nh->nh_mtu; 3362 3363 /* Report additional interface capabilities. */ 3364 if (cap != NULL) { 3365 if (ifp->if_capenable & IFCAP_TSO4 && 3366 ifp->if_hwassist & CSUM_TSO) { 3367 cap->ifcap |= CSUM_TSO; 3368 cap->tsomax = ifp->if_hw_tsomax; 3369 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; 3370 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; 3371 } 3372 } 3373 } 3374 return (maxmtu); 3375 } 3376 #endif /* INET */ 3377 3378 #ifdef INET6 3379 uint32_t 3380 tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap) 3381 { 3382 struct nhop_object *nh; 3383 struct in6_addr dst6; 3384 uint32_t scopeid; 3385 struct ifnet *ifp; 3386 uint32_t maxmtu = 0; 3387 3388 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer")); 3389 3390 if (inc->inc_flags & INC_IPV6MINMTU) 3391 return (IPV6_MMTU); 3392 3393 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 3394 in6_splitscope(&inc->inc6_faddr, &dst6, &scopeid); 3395 nh = fib6_lookup(inc->inc_fibnum, &dst6, scopeid, NHR_NONE, 0); 3396 if (nh == NULL) 3397 return (0); 3398 3399 ifp = nh->nh_ifp; 3400 maxmtu = nh->nh_mtu; 3401 3402 /* Report additional interface capabilities. */ 3403 if (cap != NULL) { 3404 if (ifp->if_capenable & IFCAP_TSO6 && 3405 ifp->if_hwassist & CSUM_TSO) { 3406 cap->ifcap |= CSUM_TSO; 3407 cap->tsomax = ifp->if_hw_tsomax; 3408 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; 3409 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; 3410 } 3411 } 3412 } 3413 3414 return (maxmtu); 3415 } 3416 3417 /* 3418 * Handle setsockopt(IPV6_USE_MIN_MTU) by a TCP stack. 3419 * 3420 * XXXGL: we are updating inpcb here with INC_IPV6MINMTU flag. 3421 * The right place to do that is ip6_setpktopt() that has just been 3422 * executed. By the way it just filled ip6po_minmtu for us. 3423 */ 3424 void 3425 tcp6_use_min_mtu(struct tcpcb *tp) 3426 { 3427 struct inpcb *inp = tptoinpcb(tp); 3428 3429 INP_WLOCK_ASSERT(inp); 3430 /* 3431 * In case of the IPV6_USE_MIN_MTU socket 3432 * option, the INC_IPV6MINMTU flag to announce 3433 * a corresponding MSS during the initial 3434 * handshake. If the TCP connection is not in 3435 * the front states, just reduce the MSS being 3436 * used. This avoids the sending of TCP 3437 * segments which will be fragmented at the 3438 * IPv6 layer. 3439 */ 3440 inp->inp_inc.inc_flags |= INC_IPV6MINMTU; 3441 if ((tp->t_state >= TCPS_SYN_SENT) && 3442 (inp->inp_inc.inc_flags & INC_ISIPV6)) { 3443 struct ip6_pktopts *opt; 3444 3445 opt = inp->in6p_outputopts; 3446 if (opt != NULL && opt->ip6po_minmtu == IP6PO_MINMTU_ALL && 3447 tp->t_maxseg > TCP6_MSS) 3448 tp->t_maxseg = TCP6_MSS; 3449 } 3450 } 3451 #endif /* INET6 */ 3452 3453 /* 3454 * Calculate effective SMSS per RFC5681 definition for a given TCP 3455 * connection at its current state, taking into account SACK and etc. 3456 */ 3457 u_int 3458 tcp_maxseg(const struct tcpcb *tp) 3459 { 3460 u_int optlen; 3461 3462 if (tp->t_flags & TF_NOOPT) 3463 return (tp->t_maxseg); 3464 3465 /* 3466 * Here we have a simplified code from tcp_addoptions(), 3467 * without a proper loop, and having most of paddings hardcoded. 3468 * We might make mistakes with padding here in some edge cases, 3469 * but this is harmless, since result of tcp_maxseg() is used 3470 * only in cwnd and ssthresh estimations. 3471 */ 3472 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 3473 if (tp->t_flags & TF_RCVD_TSTMP) 3474 optlen = TCPOLEN_TSTAMP_APPA; 3475 else 3476 optlen = 0; 3477 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3478 if (tp->t_flags & TF_SIGNATURE) 3479 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE); 3480 #endif 3481 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0) { 3482 optlen += TCPOLEN_SACKHDR; 3483 optlen += tp->rcv_numsacks * TCPOLEN_SACK; 3484 optlen = PADTCPOLEN(optlen); 3485 } 3486 } else { 3487 if (tp->t_flags & TF_REQ_TSTMP) 3488 optlen = TCPOLEN_TSTAMP_APPA; 3489 else 3490 optlen = PADTCPOLEN(TCPOLEN_MAXSEG); 3491 if (tp->t_flags & TF_REQ_SCALE) 3492 optlen += PADTCPOLEN(TCPOLEN_WINDOW); 3493 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3494 if (tp->t_flags & TF_SIGNATURE) 3495 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE); 3496 #endif 3497 if (tp->t_flags & TF_SACK_PERMIT) 3498 optlen += PADTCPOLEN(TCPOLEN_SACK_PERMITTED); 3499 } 3500 #undef PAD 3501 optlen = min(optlen, TCP_MAXOLEN); 3502 return (tp->t_maxseg - optlen); 3503 } 3504 3505 3506 u_int 3507 tcp_fixed_maxseg(const struct tcpcb *tp) 3508 { 3509 int optlen; 3510 3511 if (tp->t_flags & TF_NOOPT) 3512 return (tp->t_maxseg); 3513 3514 /* 3515 * Here we have a simplified code from tcp_addoptions(), 3516 * without a proper loop, and having most of paddings hardcoded. 3517 * We only consider fixed options that we would send every 3518 * time I.e. SACK is not considered. This is important 3519 * for cc modules to figure out what the modulo of the 3520 * cwnd should be. 3521 */ 3522 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4) 3523 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 3524 if (tp->t_flags & TF_RCVD_TSTMP) 3525 optlen = TCPOLEN_TSTAMP_APPA; 3526 else 3527 optlen = 0; 3528 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3529 if (tp->t_flags & TF_SIGNATURE) 3530 optlen += PAD(TCPOLEN_SIGNATURE); 3531 #endif 3532 } else { 3533 if (tp->t_flags & TF_REQ_TSTMP) 3534 optlen = TCPOLEN_TSTAMP_APPA; 3535 else 3536 optlen = PAD(TCPOLEN_MAXSEG); 3537 if (tp->t_flags & TF_REQ_SCALE) 3538 optlen += PAD(TCPOLEN_WINDOW); 3539 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3540 if (tp->t_flags & TF_SIGNATURE) 3541 optlen += PAD(TCPOLEN_SIGNATURE); 3542 #endif 3543 if (tp->t_flags & TF_SACK_PERMIT) 3544 optlen += PAD(TCPOLEN_SACK_PERMITTED); 3545 } 3546 #undef PAD 3547 optlen = min(optlen, TCP_MAXOLEN); 3548 return (tp->t_maxseg - optlen); 3549 } 3550 3551 3552 3553 static int 3554 sysctl_drop(SYSCTL_HANDLER_ARGS) 3555 { 3556 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 3557 struct sockaddr_storage addrs[2]; 3558 struct inpcb *inp; 3559 struct tcpcb *tp; 3560 #ifdef INET 3561 struct sockaddr_in *fin = NULL, *lin = NULL; 3562 #endif 3563 struct epoch_tracker et; 3564 #ifdef INET6 3565 struct sockaddr_in6 *fin6, *lin6; 3566 #endif 3567 int error; 3568 3569 inp = NULL; 3570 #ifdef INET6 3571 fin6 = lin6 = NULL; 3572 #endif 3573 error = 0; 3574 3575 if (req->oldptr != NULL || req->oldlen != 0) 3576 return (EINVAL); 3577 if (req->newptr == NULL) 3578 return (EPERM); 3579 if (req->newlen < sizeof(addrs)) 3580 return (ENOMEM); 3581 error = SYSCTL_IN(req, &addrs, sizeof(addrs)); 3582 if (error) 3583 return (error); 3584 3585 switch (addrs[0].ss_family) { 3586 #ifdef INET6 3587 case AF_INET6: 3588 fin6 = (struct sockaddr_in6 *)&addrs[0]; 3589 lin6 = (struct sockaddr_in6 *)&addrs[1]; 3590 if (fin6->sin6_len != sizeof(struct sockaddr_in6) || 3591 lin6->sin6_len != sizeof(struct sockaddr_in6)) 3592 return (EINVAL); 3593 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) { 3594 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) 3595 return (EINVAL); 3596 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]); 3597 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]); 3598 #ifdef INET 3599 fin = (struct sockaddr_in *)&addrs[0]; 3600 lin = (struct sockaddr_in *)&addrs[1]; 3601 #endif 3602 break; 3603 } 3604 error = sa6_embedscope(fin6, V_ip6_use_defzone); 3605 if (error) 3606 return (error); 3607 error = sa6_embedscope(lin6, V_ip6_use_defzone); 3608 if (error) 3609 return (error); 3610 break; 3611 #endif 3612 #ifdef INET 3613 case AF_INET: 3614 fin = (struct sockaddr_in *)&addrs[0]; 3615 lin = (struct sockaddr_in *)&addrs[1]; 3616 if (fin->sin_len != sizeof(struct sockaddr_in) || 3617 lin->sin_len != sizeof(struct sockaddr_in)) 3618 return (EINVAL); 3619 break; 3620 #endif 3621 default: 3622 return (EINVAL); 3623 } 3624 NET_EPOCH_ENTER(et); 3625 switch (addrs[0].ss_family) { 3626 #ifdef INET6 3627 case AF_INET6: 3628 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr, 3629 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port, 3630 INPLOOKUP_WLOCKPCB, NULL); 3631 break; 3632 #endif 3633 #ifdef INET 3634 case AF_INET: 3635 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port, 3636 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL); 3637 break; 3638 #endif 3639 } 3640 if (inp != NULL) { 3641 if (!SOLISTENING(inp->inp_socket)) { 3642 tp = intotcpcb(inp); 3643 tp = tcp_drop(tp, ECONNABORTED); 3644 if (tp != NULL) 3645 INP_WUNLOCK(inp); 3646 } else 3647 INP_WUNLOCK(inp); 3648 } else 3649 error = ESRCH; 3650 NET_EPOCH_EXIT(et); 3651 return (error); 3652 } 3653 3654 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop, 3655 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3656 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_drop, "", 3657 "Drop TCP connection"); 3658 3659 static int 3660 tcp_sysctl_setsockopt(SYSCTL_HANDLER_ARGS) 3661 { 3662 return (sysctl_setsockopt(oidp, arg1, arg2, req, &V_tcbinfo, 3663 &tcp_ctloutput_set)); 3664 } 3665 3666 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, setsockopt, 3667 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3668 CTLFLAG_MPSAFE, NULL, 0, tcp_sysctl_setsockopt, "", 3669 "Set socket option for TCP endpoint"); 3670 3671 #ifdef KERN_TLS 3672 static int 3673 sysctl_switch_tls(SYSCTL_HANDLER_ARGS) 3674 { 3675 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 3676 struct sockaddr_storage addrs[2]; 3677 struct inpcb *inp; 3678 #ifdef INET 3679 struct sockaddr_in *fin = NULL, *lin = NULL; 3680 #endif 3681 struct epoch_tracker et; 3682 #ifdef INET6 3683 struct sockaddr_in6 *fin6, *lin6; 3684 #endif 3685 int error; 3686 3687 inp = NULL; 3688 #ifdef INET6 3689 fin6 = lin6 = NULL; 3690 #endif 3691 error = 0; 3692 3693 if (req->oldptr != NULL || req->oldlen != 0) 3694 return (EINVAL); 3695 if (req->newptr == NULL) 3696 return (EPERM); 3697 if (req->newlen < sizeof(addrs)) 3698 return (ENOMEM); 3699 error = SYSCTL_IN(req, &addrs, sizeof(addrs)); 3700 if (error) 3701 return (error); 3702 3703 switch (addrs[0].ss_family) { 3704 #ifdef INET6 3705 case AF_INET6: 3706 fin6 = (struct sockaddr_in6 *)&addrs[0]; 3707 lin6 = (struct sockaddr_in6 *)&addrs[1]; 3708 if (fin6->sin6_len != sizeof(struct sockaddr_in6) || 3709 lin6->sin6_len != sizeof(struct sockaddr_in6)) 3710 return (EINVAL); 3711 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) { 3712 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) 3713 return (EINVAL); 3714 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]); 3715 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]); 3716 #ifdef INET 3717 fin = (struct sockaddr_in *)&addrs[0]; 3718 lin = (struct sockaddr_in *)&addrs[1]; 3719 #endif 3720 break; 3721 } 3722 error = sa6_embedscope(fin6, V_ip6_use_defzone); 3723 if (error) 3724 return (error); 3725 error = sa6_embedscope(lin6, V_ip6_use_defzone); 3726 if (error) 3727 return (error); 3728 break; 3729 #endif 3730 #ifdef INET 3731 case AF_INET: 3732 fin = (struct sockaddr_in *)&addrs[0]; 3733 lin = (struct sockaddr_in *)&addrs[1]; 3734 if (fin->sin_len != sizeof(struct sockaddr_in) || 3735 lin->sin_len != sizeof(struct sockaddr_in)) 3736 return (EINVAL); 3737 break; 3738 #endif 3739 default: 3740 return (EINVAL); 3741 } 3742 NET_EPOCH_ENTER(et); 3743 switch (addrs[0].ss_family) { 3744 #ifdef INET6 3745 case AF_INET6: 3746 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr, 3747 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port, 3748 INPLOOKUP_WLOCKPCB, NULL); 3749 break; 3750 #endif 3751 #ifdef INET 3752 case AF_INET: 3753 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port, 3754 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL); 3755 break; 3756 #endif 3757 } 3758 NET_EPOCH_EXIT(et); 3759 if (inp != NULL) { 3760 struct socket *so; 3761 3762 so = inp->inp_socket; 3763 soref(so); 3764 error = ktls_set_tx_mode(so, 3765 arg2 == 0 ? TCP_TLS_MODE_SW : TCP_TLS_MODE_IFNET); 3766 INP_WUNLOCK(inp); 3767 sorele(so); 3768 } else 3769 error = ESRCH; 3770 return (error); 3771 } 3772 3773 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_sw_tls, 3774 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3775 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_switch_tls, "", 3776 "Switch TCP connection to SW TLS"); 3777 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_ifnet_tls, 3778 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3779 CTLFLAG_NEEDGIANT, NULL, 1, sysctl_switch_tls, "", 3780 "Switch TCP connection to ifnet TLS"); 3781 #endif 3782 3783 /* 3784 * Generate a standardized TCP log line for use throughout the 3785 * tcp subsystem. Memory allocation is done with M_NOWAIT to 3786 * allow use in the interrupt context. 3787 * 3788 * NB: The caller MUST free(s, M_TCPLOG) the returned string. 3789 * NB: The function may return NULL if memory allocation failed. 3790 * 3791 * Due to header inclusion and ordering limitations the struct ip 3792 * and ip6_hdr pointers have to be passed as void pointers. 3793 */ 3794 char * 3795 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr, 3796 const void *ip6hdr) 3797 { 3798 3799 /* Is logging enabled? */ 3800 if (V_tcp_log_in_vain == 0) 3801 return (NULL); 3802 3803 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr)); 3804 } 3805 3806 char * 3807 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr, 3808 const void *ip6hdr) 3809 { 3810 3811 /* Is logging enabled? */ 3812 if (tcp_log_debug == 0) 3813 return (NULL); 3814 3815 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr)); 3816 } 3817 3818 static char * 3819 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr, 3820 const void *ip6hdr) 3821 { 3822 char *s, *sp; 3823 size_t size; 3824 #ifdef INET 3825 const struct ip *ip = (const struct ip *)ip4hdr; 3826 #endif 3827 #ifdef INET6 3828 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)ip6hdr; 3829 #endif /* INET6 */ 3830 3831 /* 3832 * The log line looks like this: 3833 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>" 3834 */ 3835 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") + 3836 sizeof(PRINT_TH_FLAGS) + 1 + 3837 #ifdef INET6 3838 2 * INET6_ADDRSTRLEN; 3839 #else 3840 2 * INET_ADDRSTRLEN; 3841 #endif /* INET6 */ 3842 3843 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT); 3844 if (s == NULL) 3845 return (NULL); 3846 3847 strcat(s, "TCP: ["); 3848 sp = s + strlen(s); 3849 3850 if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) { 3851 inet_ntoa_r(inc->inc_faddr, sp); 3852 sp = s + strlen(s); 3853 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport)); 3854 sp = s + strlen(s); 3855 inet_ntoa_r(inc->inc_laddr, sp); 3856 sp = s + strlen(s); 3857 sprintf(sp, "]:%i", ntohs(inc->inc_lport)); 3858 #ifdef INET6 3859 } else if (inc) { 3860 ip6_sprintf(sp, &inc->inc6_faddr); 3861 sp = s + strlen(s); 3862 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport)); 3863 sp = s + strlen(s); 3864 ip6_sprintf(sp, &inc->inc6_laddr); 3865 sp = s + strlen(s); 3866 sprintf(sp, "]:%i", ntohs(inc->inc_lport)); 3867 } else if (ip6 && th) { 3868 ip6_sprintf(sp, &ip6->ip6_src); 3869 sp = s + strlen(s); 3870 sprintf(sp, "]:%i to [", ntohs(th->th_sport)); 3871 sp = s + strlen(s); 3872 ip6_sprintf(sp, &ip6->ip6_dst); 3873 sp = s + strlen(s); 3874 sprintf(sp, "]:%i", ntohs(th->th_dport)); 3875 #endif /* INET6 */ 3876 #ifdef INET 3877 } else if (ip && th) { 3878 inet_ntoa_r(ip->ip_src, sp); 3879 sp = s + strlen(s); 3880 sprintf(sp, "]:%i to [", ntohs(th->th_sport)); 3881 sp = s + strlen(s); 3882 inet_ntoa_r(ip->ip_dst, sp); 3883 sp = s + strlen(s); 3884 sprintf(sp, "]:%i", ntohs(th->th_dport)); 3885 #endif /* INET */ 3886 } else { 3887 free(s, M_TCPLOG); 3888 return (NULL); 3889 } 3890 sp = s + strlen(s); 3891 if (th) 3892 sprintf(sp, " tcpflags 0x%b", tcp_get_flags(th), PRINT_TH_FLAGS); 3893 if (*(s + size - 1) != '\0') 3894 panic("%s: string too long", __func__); 3895 return (s); 3896 } 3897 3898 /* 3899 * A subroutine which makes it easy to track TCP state changes with DTrace. 3900 * This function shouldn't be called for t_state initializations that don't 3901 * correspond to actual TCP state transitions. 3902 */ 3903 void 3904 tcp_state_change(struct tcpcb *tp, int newstate) 3905 { 3906 #if defined(KDTRACE_HOOKS) 3907 int pstate = tp->t_state; 3908 #endif 3909 3910 TCPSTATES_DEC(tp->t_state); 3911 TCPSTATES_INC(newstate); 3912 tp->t_state = newstate; 3913 TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, pstate); 3914 } 3915 3916 /* 3917 * Create an external-format (``xtcpcb'') structure using the information in 3918 * the kernel-format tcpcb structure pointed to by tp. This is done to 3919 * reduce the spew of irrelevant information over this interface, to isolate 3920 * user code from changes in the kernel structure, and potentially to provide 3921 * information-hiding if we decide that some of this information should be 3922 * hidden from users. 3923 */ 3924 void 3925 tcp_inptoxtp(const struct inpcb *inp, struct xtcpcb *xt) 3926 { 3927 struct tcpcb *tp = intotcpcb(inp); 3928 sbintime_t now; 3929 3930 bzero(xt, sizeof(*xt)); 3931 xt->t_state = tp->t_state; 3932 xt->t_logstate = tcp_get_bblog_state(tp); 3933 xt->t_flags = tp->t_flags; 3934 xt->t_sndzerowin = tp->t_sndzerowin; 3935 xt->t_sndrexmitpack = tp->t_sndrexmitpack; 3936 xt->t_rcvoopack = tp->t_rcvoopack; 3937 xt->t_rcv_wnd = tp->rcv_wnd; 3938 xt->t_snd_wnd = tp->snd_wnd; 3939 xt->t_snd_cwnd = tp->snd_cwnd; 3940 xt->t_snd_ssthresh = tp->snd_ssthresh; 3941 xt->t_dsack_bytes = tp->t_dsack_bytes; 3942 xt->t_dsack_tlp_bytes = tp->t_dsack_tlp_bytes; 3943 xt->t_dsack_pack = tp->t_dsack_pack; 3944 xt->t_maxseg = tp->t_maxseg; 3945 xt->xt_ecn = (tp->t_flags2 & TF2_ECN_PERMIT) ? 1 : 0 + 3946 (tp->t_flags2 & TF2_ACE_PERMIT) ? 2 : 0; 3947 3948 now = getsbinuptime(); 3949 #define COPYTIMER(which,where) do { \ 3950 if (tp->t_timers[which] != SBT_MAX) \ 3951 xt->where = (tp->t_timers[which] - now) / SBT_1MS; \ 3952 else \ 3953 xt->where = 0; \ 3954 } while (0) 3955 COPYTIMER(TT_DELACK, tt_delack); 3956 COPYTIMER(TT_REXMT, tt_rexmt); 3957 COPYTIMER(TT_PERSIST, tt_persist); 3958 COPYTIMER(TT_KEEP, tt_keep); 3959 COPYTIMER(TT_2MSL, tt_2msl); 3960 #undef COPYTIMER 3961 xt->t_rcvtime = 1000 * (ticks - tp->t_rcvtime) / hz; 3962 3963 xt->xt_encaps_port = tp->t_port; 3964 bcopy(tp->t_fb->tfb_tcp_block_name, xt->xt_stack, 3965 TCP_FUNCTION_NAME_LEN_MAX); 3966 bcopy(CC_ALGO(tp)->name, xt->xt_cc, TCP_CA_NAME_MAX); 3967 #ifdef TCP_BLACKBOX 3968 (void)tcp_log_get_id(tp, xt->xt_logid); 3969 #endif 3970 3971 xt->xt_len = sizeof(struct xtcpcb); 3972 in_pcbtoxinpcb(inp, &xt->xt_inp); 3973 /* 3974 * TCP doesn't use inp_ppcb pointer, we embed inpcb into tcpcb. 3975 * Fixup the pointer that in_pcbtoxinpcb() has set. When printing 3976 * TCP netstat(1) used to use this pointer, so this fixup needs to 3977 * stay for stable/14. 3978 */ 3979 xt->xt_inp.inp_ppcb = (uintptr_t)tp; 3980 } 3981 3982 void 3983 tcp_log_end_status(struct tcpcb *tp, uint8_t status) 3984 { 3985 uint32_t bit, i; 3986 3987 if ((tp == NULL) || 3988 (status > TCP_EI_STATUS_MAX_VALUE) || 3989 (status == 0)) { 3990 /* Invalid */ 3991 return; 3992 } 3993 if (status > (sizeof(uint32_t) * 8)) { 3994 /* Should this be a KASSERT? */ 3995 return; 3996 } 3997 bit = 1U << (status - 1); 3998 if (bit & tp->t_end_info_status) { 3999 /* already logged */ 4000 return; 4001 } 4002 for (i = 0; i < TCP_END_BYTE_INFO; i++) { 4003 if (tp->t_end_info_bytes[i] == TCP_EI_EMPTY_SLOT) { 4004 tp->t_end_info_bytes[i] = status; 4005 tp->t_end_info_status |= bit; 4006 break; 4007 } 4008 } 4009 } 4010 4011 int 4012 tcp_can_enable_pacing(void) 4013 { 4014 4015 if ((tcp_pacing_limit == -1) || 4016 (tcp_pacing_limit > number_of_tcp_connections_pacing)) { 4017 atomic_fetchadd_int(&number_of_tcp_connections_pacing, 1); 4018 shadow_num_connections = number_of_tcp_connections_pacing; 4019 return (1); 4020 } else { 4021 counter_u64_add(tcp_pacing_failures, 1); 4022 return (0); 4023 } 4024 } 4025 4026 static uint8_t tcp_pacing_warning = 0; 4027 4028 void 4029 tcp_decrement_paced_conn(void) 4030 { 4031 uint32_t ret; 4032 4033 ret = atomic_fetchadd_int(&number_of_tcp_connections_pacing, -1); 4034 shadow_num_connections = number_of_tcp_connections_pacing; 4035 KASSERT(ret != 0, ("tcp_paced_connection_exits -1 would cause wrap?")); 4036 if (ret == 0) { 4037 if (tcp_pacing_limit != -1) { 4038 printf("Warning all pacing is now disabled, count decrements invalidly!\n"); 4039 tcp_pacing_limit = 0; 4040 } else if (tcp_pacing_warning == 0) { 4041 printf("Warning pacing count is invalid, invalid decrement\n"); 4042 tcp_pacing_warning = 1; 4043 } 4044 } 4045 } 4046 4047 static void 4048 tcp_default_switch_failed(struct tcpcb *tp) 4049 { 4050 /* 4051 * If a switch fails we only need to 4052 * care about two things: 4053 * a) The t_flags2 4054 * and 4055 * b) The timer granularity. 4056 * Timeouts, at least for now, don't use the 4057 * old callout system in the other stacks so 4058 * those are hopefully safe. 4059 */ 4060 tcp_lro_features_off(tp); 4061 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS); 4062 } 4063 4064 #ifdef TCP_ACCOUNTING 4065 int 4066 tcp_do_ack_accounting(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to, uint32_t tiwin, int mss) 4067 { 4068 if (SEQ_LT(th->th_ack, tp->snd_una)) { 4069 /* Do we have a SACK? */ 4070 if (to->to_flags & TOF_SACK) { 4071 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 4072 tp->tcp_cnt_counters[ACK_SACK]++; 4073 } 4074 return (ACK_SACK); 4075 } else { 4076 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 4077 tp->tcp_cnt_counters[ACK_BEHIND]++; 4078 } 4079 return (ACK_BEHIND); 4080 } 4081 } else if (th->th_ack == tp->snd_una) { 4082 /* Do we have a SACK? */ 4083 if (to->to_flags & TOF_SACK) { 4084 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 4085 tp->tcp_cnt_counters[ACK_SACK]++; 4086 } 4087 return (ACK_SACK); 4088 } else if (tiwin != tp->snd_wnd) { 4089 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 4090 tp->tcp_cnt_counters[ACK_RWND]++; 4091 } 4092 return (ACK_RWND); 4093 } else { 4094 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 4095 tp->tcp_cnt_counters[ACK_DUPACK]++; 4096 } 4097 return (ACK_DUPACK); 4098 } 4099 } else { 4100 if (!SEQ_GT(th->th_ack, tp->snd_max)) { 4101 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 4102 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((th->th_ack - tp->snd_una) + mss - 1)/mss); 4103 } 4104 } 4105 if (to->to_flags & TOF_SACK) { 4106 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 4107 tp->tcp_cnt_counters[ACK_CUMACK_SACK]++; 4108 } 4109 return (ACK_CUMACK_SACK); 4110 } else { 4111 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 4112 tp->tcp_cnt_counters[ACK_CUMACK]++; 4113 } 4114 return (ACK_CUMACK); 4115 } 4116 } 4117 } 4118 #endif 4119 4120 void 4121 tcp_change_time_units(struct tcpcb *tp, int granularity) 4122 { 4123 if (tp->t_tmr_granularity == granularity) { 4124 /* We are there */ 4125 return; 4126 } 4127 if (granularity == TCP_TMR_GRANULARITY_USEC) { 4128 KASSERT((tp->t_tmr_granularity == TCP_TMR_GRANULARITY_TICKS), 4129 ("Granularity is not TICKS its %u in tp:%p", 4130 tp->t_tmr_granularity, tp)); 4131 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 4132 if (tp->t_srtt > 1) { 4133 uint32_t val, frac; 4134 4135 val = tp->t_srtt >> TCP_RTT_SHIFT; 4136 frac = tp->t_srtt & 0x1f; 4137 tp->t_srtt = TICKS_2_USEC(val); 4138 /* 4139 * frac is the fractional part of the srtt (if any) 4140 * but its in ticks and every bit represents 4141 * 1/32nd of a hz. 4142 */ 4143 if (frac) { 4144 if (hz == 1000) { 4145 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 4146 } else { 4147 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 4148 } 4149 tp->t_srtt += frac; 4150 } 4151 } 4152 if (tp->t_rttvar) { 4153 uint32_t val, frac; 4154 4155 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 4156 frac = tp->t_rttvar & 0x1f; 4157 tp->t_rttvar = TICKS_2_USEC(val); 4158 /* 4159 * frac is the fractional part of the srtt (if any) 4160 * but its in ticks and every bit represents 4161 * 1/32nd of a hz. 4162 */ 4163 if (frac) { 4164 if (hz == 1000) { 4165 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 4166 } else { 4167 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 4168 } 4169 tp->t_rttvar += frac; 4170 } 4171 } 4172 tp->t_tmr_granularity = TCP_TMR_GRANULARITY_USEC; 4173 } else if (granularity == TCP_TMR_GRANULARITY_TICKS) { 4174 /* Convert back to ticks, with */ 4175 KASSERT((tp->t_tmr_granularity == TCP_TMR_GRANULARITY_USEC), 4176 ("Granularity is not USEC its %u in tp:%p", 4177 tp->t_tmr_granularity, tp)); 4178 if (tp->t_srtt > 1) { 4179 uint32_t val, frac; 4180 4181 val = USEC_2_TICKS(tp->t_srtt); 4182 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 4183 tp->t_srtt = val << TCP_RTT_SHIFT; 4184 /* 4185 * frac is the fractional part here is left 4186 * over from converting to hz and shifting. 4187 * We need to convert this to the 5 bit 4188 * remainder. 4189 */ 4190 if (frac) { 4191 if (hz == 1000) { 4192 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 4193 } else { 4194 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 4195 } 4196 tp->t_srtt += frac; 4197 } 4198 } 4199 if (tp->t_rttvar) { 4200 uint32_t val, frac; 4201 4202 val = USEC_2_TICKS(tp->t_rttvar); 4203 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 4204 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 4205 /* 4206 * frac is the fractional part here is left 4207 * over from converting to hz and shifting. 4208 * We need to convert this to the 5 bit 4209 * remainder. 4210 */ 4211 if (frac) { 4212 if (hz == 1000) { 4213 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 4214 } else { 4215 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 4216 } 4217 tp->t_rttvar += frac; 4218 } 4219 } 4220 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 4221 tp->t_tmr_granularity = TCP_TMR_GRANULARITY_TICKS; 4222 } 4223 #ifdef INVARIANTS 4224 else { 4225 panic("Unknown granularity:%d tp:%p", 4226 granularity, tp); 4227 } 4228 #endif 4229 } 4230 4231 void 4232 tcp_handle_orphaned_packets(struct tcpcb *tp) 4233 { 4234 struct mbuf *save, *m, *prev; 4235 /* 4236 * Called when a stack switch is occuring from the fini() 4237 * of the old stack. We assue the init() as already been 4238 * run of the new stack and it has set the t_flags2 to 4239 * what it supports. This function will then deal with any 4240 * differences i.e. cleanup packets that maybe queued that 4241 * the newstack does not support. 4242 */ 4243 4244 if (tp->t_flags2 & TF2_MBUF_L_ACKS) 4245 return; 4246 if ((tp->t_flags2 & TF2_SUPPORTS_MBUFQ) == 0 && 4247 !STAILQ_EMPTY(&tp->t_inqueue)) { 4248 /* 4249 * It is unsafe to process the packets since a 4250 * reset may be lurking in them (its rare but it 4251 * can occur). If we were to find a RST, then we 4252 * would end up dropping the connection and the 4253 * INP lock, so when we return the caller (tcp_usrreq) 4254 * will blow up when it trys to unlock the inp. 4255 * This new stack does not do any fancy LRO features 4256 * so all we can do is toss the packets. 4257 */ 4258 m = STAILQ_FIRST(&tp->t_inqueue); 4259 STAILQ_INIT(&tp->t_inqueue); 4260 STAILQ_FOREACH_FROM_SAFE(m, &tp->t_inqueue, m_stailqpkt, save) 4261 m_freem(m); 4262 } else { 4263 /* 4264 * Here we have a stack that does mbuf queuing but 4265 * does not support compressed ack's. We must 4266 * walk all the mbufs and discard any compressed acks. 4267 */ 4268 STAILQ_FOREACH_SAFE(m, &tp->t_inqueue, m_stailqpkt, save) { 4269 if (m->m_flags & M_ACKCMP) { 4270 if (m == STAILQ_FIRST(&tp->t_inqueue)) 4271 STAILQ_REMOVE_HEAD(&tp->t_inqueue, 4272 m_stailqpkt); 4273 else 4274 STAILQ_REMOVE_AFTER(&tp->t_inqueue, 4275 prev, m_stailqpkt); 4276 m_freem(m); 4277 } else 4278 prev = m; 4279 } 4280 } 4281 } 4282 4283 #ifdef TCP_REQUEST_TRK 4284 uint32_t 4285 tcp_estimate_tls_overhead(struct socket *so, uint64_t tls_usr_bytes) 4286 { 4287 #ifdef KERN_TLS 4288 struct ktls_session *tls; 4289 uint32_t rec_oh, records; 4290 4291 tls = so->so_snd.sb_tls_info; 4292 if (tls == NULL) 4293 return (0); 4294 4295 rec_oh = tls->params.tls_hlen + tls->params.tls_tlen; 4296 records = ((tls_usr_bytes + tls->params.max_frame_len - 1)/tls->params.max_frame_len); 4297 return (records * rec_oh); 4298 #else 4299 return (0); 4300 #endif 4301 } 4302 4303 extern uint32_t tcp_stale_entry_time; 4304 uint32_t tcp_stale_entry_time = 250000; 4305 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, usrlog_stale, CTLFLAG_RW, 4306 &tcp_stale_entry_time, 250000, "Time that a tcpreq entry without a sendfile ages out"); 4307 4308 void 4309 tcp_req_log_req_info(struct tcpcb *tp, struct tcp_sendfile_track *req, 4310 uint16_t slot, uint8_t val, uint64_t offset, uint64_t nbytes) 4311 { 4312 if (tcp_bblogging_on(tp)) { 4313 union tcp_log_stackspecific log; 4314 struct timeval tv; 4315 4316 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4317 #ifdef TCPHPTS 4318 log.u_bbr.inhpts = tcp_in_hpts(tp); 4319 #endif 4320 log.u_bbr.flex8 = val; 4321 log.u_bbr.rttProp = req->timestamp; 4322 log.u_bbr.delRate = req->start; 4323 log.u_bbr.cur_del_rate = req->end; 4324 log.u_bbr.flex1 = req->start_seq; 4325 log.u_bbr.flex2 = req->end_seq; 4326 log.u_bbr.flex3 = req->flags; 4327 log.u_bbr.flex4 = ((req->localtime >> 32) & 0x00000000ffffffff); 4328 log.u_bbr.flex5 = (req->localtime & 0x00000000ffffffff); 4329 log.u_bbr.flex7 = slot; 4330 log.u_bbr.bw_inuse = offset; 4331 /* nbytes = flex6 | epoch */ 4332 log.u_bbr.flex6 = ((nbytes >> 32) & 0x00000000ffffffff); 4333 log.u_bbr.epoch = (nbytes & 0x00000000ffffffff); 4334 /* cspr = lt_epoch | pkts_out */ 4335 log.u_bbr.lt_epoch = ((req->cspr >> 32) & 0x00000000ffffffff); 4336 log.u_bbr.pkts_out |= (req->cspr & 0x00000000ffffffff); 4337 log.u_bbr.applimited = tp->t_tcpreq_closed; 4338 log.u_bbr.applimited <<= 8; 4339 log.u_bbr.applimited |= tp->t_tcpreq_open; 4340 log.u_bbr.applimited <<= 8; 4341 log.u_bbr.applimited |= tp->t_tcpreq_req; 4342 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4343 TCP_LOG_EVENTP(tp, NULL, 4344 &tptosocket(tp)->so_rcv, 4345 &tptosocket(tp)->so_snd, 4346 TCP_LOG_REQ_T, 0, 4347 0, &log, false, &tv); 4348 } 4349 } 4350 4351 void 4352 tcp_req_free_a_slot(struct tcpcb *tp, struct tcp_sendfile_track *ent) 4353 { 4354 if (tp->t_tcpreq_req > 0) 4355 tp->t_tcpreq_req--; 4356 if (ent->flags & TCP_TRK_TRACK_FLG_OPEN) { 4357 if (tp->t_tcpreq_open > 0) 4358 tp->t_tcpreq_open--; 4359 } else { 4360 if (tp->t_tcpreq_closed > 0) 4361 tp->t_tcpreq_closed--; 4362 } 4363 ent->flags = TCP_TRK_TRACK_FLG_EMPTY; 4364 } 4365 4366 static void 4367 tcp_req_check_for_stale_entries(struct tcpcb *tp, uint64_t ts, int rm_oldest) 4368 { 4369 struct tcp_sendfile_track *ent; 4370 uint64_t time_delta, oldest_delta; 4371 int i, oldest, oldest_set = 0, cnt_rm = 0; 4372 4373 for(i = 0; i < MAX_TCP_TRK_REQ; i++) { 4374 ent = &tp->t_tcpreq_info[i]; 4375 if (ent->flags != TCP_TRK_TRACK_FLG_USED) { 4376 /* 4377 * We only care about closed end ranges 4378 * that are allocated and have no sendfile 4379 * ever touching them. They would be in 4380 * state USED. 4381 */ 4382 continue; 4383 } 4384 if (ts >= ent->localtime) 4385 time_delta = ts - ent->localtime; 4386 else 4387 time_delta = 0; 4388 if (time_delta && 4389 ((oldest_delta < time_delta) || (oldest_set == 0))) { 4390 oldest_set = 1; 4391 oldest = i; 4392 oldest_delta = time_delta; 4393 } 4394 if (tcp_stale_entry_time && (time_delta >= tcp_stale_entry_time)) { 4395 /* 4396 * No sendfile in a our time-limit 4397 * time to purge it. 4398 */ 4399 cnt_rm++; 4400 tcp_req_log_req_info(tp, &tp->t_tcpreq_info[i], i, TCP_TRK_REQ_LOG_STALE, 4401 time_delta, 0); 4402 tcp_req_free_a_slot(tp, ent); 4403 } 4404 } 4405 if ((cnt_rm == 0) && rm_oldest && oldest_set) { 4406 ent = &tp->t_tcpreq_info[oldest]; 4407 tcp_req_log_req_info(tp, &tp->t_tcpreq_info[i], i, TCP_TRK_REQ_LOG_STALE, 4408 oldest_delta, 1); 4409 tcp_req_free_a_slot(tp, ent); 4410 } 4411 } 4412 4413 int 4414 tcp_req_check_for_comp(struct tcpcb *tp, tcp_seq ack_point) 4415 { 4416 int i, ret=0; 4417 struct tcp_sendfile_track *ent; 4418 4419 /* Clean up any old closed end requests that are now completed */ 4420 if (tp->t_tcpreq_req == 0) 4421 return(0); 4422 if (tp->t_tcpreq_closed == 0) 4423 return(0); 4424 for(i = 0; i < MAX_TCP_TRK_REQ; i++) { 4425 ent = &tp->t_tcpreq_info[i]; 4426 /* Skip empty ones */ 4427 if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) 4428 continue; 4429 /* Skip open ones */ 4430 if (ent->flags & TCP_TRK_TRACK_FLG_OPEN) 4431 continue; 4432 if (SEQ_GEQ(ack_point, ent->end_seq)) { 4433 /* We are past it -- free it */ 4434 tcp_req_log_req_info(tp, ent, 4435 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 4436 tcp_req_free_a_slot(tp, ent); 4437 ret++; 4438 } 4439 } 4440 return (ret); 4441 } 4442 4443 int 4444 tcp_req_is_entry_comp(struct tcpcb *tp, struct tcp_sendfile_track *ent, tcp_seq ack_point) 4445 { 4446 if (tp->t_tcpreq_req == 0) 4447 return(-1); 4448 if (tp->t_tcpreq_closed == 0) 4449 return(-1); 4450 if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) 4451 return(-1); 4452 if (SEQ_GEQ(ack_point, ent->end_seq)) { 4453 return (1); 4454 } 4455 return (0); 4456 } 4457 4458 struct tcp_sendfile_track * 4459 tcp_req_find_a_req_that_is_completed_by(struct tcpcb *tp, tcp_seq th_ack, int *ip) 4460 { 4461 /* 4462 * Given an ack point (th_ack) walk through our entries and 4463 * return the first one found that th_ack goes past the 4464 * end_seq. 4465 */ 4466 struct tcp_sendfile_track *ent; 4467 int i; 4468 4469 if (tp->t_tcpreq_req == 0) { 4470 /* none open */ 4471 return (NULL); 4472 } 4473 for(i = 0; i < MAX_TCP_TRK_REQ; i++) { 4474 ent = &tp->t_tcpreq_info[i]; 4475 if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) 4476 continue; 4477 if ((ent->flags & TCP_TRK_TRACK_FLG_OPEN) == 0) { 4478 if (SEQ_GEQ(th_ack, ent->end_seq)) { 4479 *ip = i; 4480 return (ent); 4481 } 4482 } 4483 } 4484 return (NULL); 4485 } 4486 4487 struct tcp_sendfile_track * 4488 tcp_req_find_req_for_seq(struct tcpcb *tp, tcp_seq seq) 4489 { 4490 struct tcp_sendfile_track *ent; 4491 int i; 4492 4493 if (tp->t_tcpreq_req == 0) { 4494 /* none open */ 4495 return (NULL); 4496 } 4497 for(i = 0; i < MAX_TCP_TRK_REQ; i++) { 4498 ent = &tp->t_tcpreq_info[i]; 4499 tcp_req_log_req_info(tp, ent, i, TCP_TRK_REQ_LOG_SEARCH, 4500 (uint64_t)seq, 0); 4501 if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) { 4502 continue; 4503 } 4504 if (ent->flags & TCP_TRK_TRACK_FLG_OPEN) { 4505 /* 4506 * An open end request only needs to 4507 * match the beginning seq or be 4508 * all we have (once we keep going on 4509 * a open end request we may have a seq 4510 * wrap). 4511 */ 4512 if ((SEQ_GEQ(seq, ent->start_seq)) || 4513 (tp->t_tcpreq_closed == 0)) 4514 return (ent); 4515 } else { 4516 /* 4517 * For this one we need to 4518 * be a bit more careful if its 4519 * completed at least. 4520 */ 4521 if ((SEQ_GEQ(seq, ent->start_seq)) && 4522 (SEQ_LT(seq, ent->end_seq))) { 4523 return (ent); 4524 } 4525 } 4526 } 4527 return (NULL); 4528 } 4529 4530 /* Should this be in its own file tcp_req.c ? */ 4531 struct tcp_sendfile_track * 4532 tcp_req_alloc_req_full(struct tcpcb *tp, struct tcp_snd_req *req, uint64_t ts, int rec_dups) 4533 { 4534 struct tcp_sendfile_track *fil; 4535 int i, allocated; 4536 4537 /* In case the stack does not check for completions do so now */ 4538 tcp_req_check_for_comp(tp, tp->snd_una); 4539 /* Check for stale entries */ 4540 if (tp->t_tcpreq_req) 4541 tcp_req_check_for_stale_entries(tp, ts, 4542 (tp->t_tcpreq_req >= MAX_TCP_TRK_REQ)); 4543 /* Check to see if this is a duplicate of one not started */ 4544 if (tp->t_tcpreq_req) { 4545 for(i = 0, allocated = 0; i < MAX_TCP_TRK_REQ; i++) { 4546 fil = &tp->t_tcpreq_info[i]; 4547 if (fil->flags != TCP_TRK_TRACK_FLG_USED) 4548 continue; 4549 if ((fil->timestamp == req->timestamp) && 4550 (fil->start == req->start) && 4551 ((fil->flags & TCP_TRK_TRACK_FLG_OPEN) || 4552 (fil->end == req->end))) { 4553 /* 4554 * We already have this request 4555 * and it has not been started with sendfile. 4556 * This probably means the user was returned 4557 * a 4xx of some sort and its going to age 4558 * out, lets not duplicate it. 4559 */ 4560 return(fil); 4561 } 4562 } 4563 } 4564 /* Ok if there is no room at the inn we are in trouble */ 4565 if (tp->t_tcpreq_req >= MAX_TCP_TRK_REQ) { 4566 tcp_trace_point(tp, TCP_TP_REQ_LOG_FAIL); 4567 for(i = 0; i < MAX_TCP_TRK_REQ; i++) { 4568 tcp_req_log_req_info(tp, &tp->t_tcpreq_info[i], 4569 i, TCP_TRK_REQ_LOG_ALLOCFAIL, 0, 0); 4570 } 4571 return (NULL); 4572 } 4573 for(i = 0, allocated = 0; i < MAX_TCP_TRK_REQ; i++) { 4574 fil = &tp->t_tcpreq_info[i]; 4575 if (fil->flags == TCP_TRK_TRACK_FLG_EMPTY) { 4576 allocated = 1; 4577 fil->flags = TCP_TRK_TRACK_FLG_USED; 4578 fil->timestamp = req->timestamp; 4579 fil->localtime = ts; 4580 fil->start = req->start; 4581 if (req->flags & TCP_LOG_HTTPD_RANGE_END) { 4582 fil->end = req->end; 4583 } else { 4584 fil->end = 0; 4585 fil->flags |= TCP_TRK_TRACK_FLG_OPEN; 4586 } 4587 /* 4588 * We can set the min boundaries to the TCP Sequence space, 4589 * but it might be found to be further up when sendfile 4590 * actually runs on this range (if it ever does). 4591 */ 4592 fil->sbcc_at_s = tptosocket(tp)->so_snd.sb_ccc; 4593 fil->start_seq = tp->snd_una + 4594 tptosocket(tp)->so_snd.sb_ccc; 4595 fil->end_seq = (fil->start_seq + ((uint32_t)(fil->end - fil->start))); 4596 if (tptosocket(tp)->so_snd.sb_tls_info) { 4597 /* 4598 * This session is doing TLS. Take a swag guess 4599 * at the overhead. 4600 */ 4601 fil->end_seq += tcp_estimate_tls_overhead( 4602 tptosocket(tp), (fil->end - fil->start)); 4603 } 4604 tp->t_tcpreq_req++; 4605 if (fil->flags & TCP_TRK_TRACK_FLG_OPEN) 4606 tp->t_tcpreq_open++; 4607 else 4608 tp->t_tcpreq_closed++; 4609 tcp_req_log_req_info(tp, fil, i, 4610 TCP_TRK_REQ_LOG_NEW, 0, 0); 4611 break; 4612 } else 4613 fil = NULL; 4614 } 4615 return (fil); 4616 } 4617 4618 void 4619 tcp_req_alloc_req(struct tcpcb *tp, union tcp_log_userdata *user, uint64_t ts) 4620 { 4621 (void)tcp_req_alloc_req_full(tp, &user->tcp_req, ts, 1); 4622 } 4623 #endif 4624 4625 void 4626 tcp_log_socket_option(struct tcpcb *tp, uint32_t option_num, uint32_t option_val, int err) 4627 { 4628 if (tcp_bblogging_on(tp)) { 4629 struct tcp_log_buffer *l; 4630 4631 l = tcp_log_event(tp, NULL, 4632 &tptosocket(tp)->so_rcv, 4633 &tptosocket(tp)->so_snd, 4634 TCP_LOG_SOCKET_OPT, 4635 err, 0, NULL, 1, 4636 NULL, NULL, 0, NULL); 4637 if (l) { 4638 l->tlb_flex1 = option_num; 4639 l->tlb_flex2 = option_val; 4640 } 4641 } 4642 } 4643 4644 uint32_t 4645 tcp_get_srtt(struct tcpcb *tp, int granularity) 4646 { 4647 uint32_t srtt; 4648 4649 KASSERT(granularity == TCP_TMR_GRANULARITY_USEC || 4650 granularity == TCP_TMR_GRANULARITY_TICKS, 4651 ("%s: called with unexpected granularity %d", __func__, 4652 granularity)); 4653 4654 srtt = tp->t_srtt; 4655 4656 /* 4657 * We only support two granularities. If the stored granularity 4658 * does not match the granularity requested by the caller, 4659 * convert the stored value to the requested unit of granularity. 4660 */ 4661 if (tp->t_tmr_granularity != granularity) { 4662 if (granularity == TCP_TMR_GRANULARITY_USEC) 4663 srtt = TICKS_2_USEC(srtt); 4664 else 4665 srtt = USEC_2_TICKS(srtt); 4666 } 4667 4668 /* 4669 * If the srtt is stored with ticks granularity, we need to 4670 * unshift to get the actual value. We do this after the 4671 * conversion above (if one was necessary) in order to maximize 4672 * precision. 4673 */ 4674 if (tp->t_tmr_granularity == TCP_TMR_GRANULARITY_TICKS) 4675 srtt = srtt >> TCP_RTT_SHIFT; 4676 4677 return (srtt); 4678 } 4679 4680 void 4681 tcp_account_for_send(struct tcpcb *tp, uint32_t len, uint8_t is_rxt, 4682 uint8_t is_tlp, bool hw_tls) 4683 { 4684 4685 if (is_tlp) { 4686 tp->t_sndtlppack++; 4687 tp->t_sndtlpbyte += len; 4688 } 4689 /* To get total bytes sent you must add t_snd_rxt_bytes to t_sndbytes */ 4690 if (is_rxt) 4691 tp->t_snd_rxt_bytes += len; 4692 else 4693 tp->t_sndbytes += len; 4694 4695 #ifdef KERN_TLS 4696 if (hw_tls && is_rxt && len != 0) { 4697 uint64_t rexmit_percent; 4698 4699 rexmit_percent = (1000ULL * tp->t_snd_rxt_bytes) / 4700 (10ULL * (tp->t_snd_rxt_bytes + tp->t_sndbytes)); 4701 if (rexmit_percent > ktls_ifnet_max_rexmit_pct) 4702 ktls_disable_ifnet(tp); 4703 } 4704 #endif 4705 } 4706