1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_ipsec.h" 40 #include "opt_kern_tls.h" 41 #include "opt_tcpdebug.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/arb.h> 46 #include <sys/callout.h> 47 #include <sys/eventhandler.h> 48 #ifdef TCP_HHOOK 49 #include <sys/hhook.h> 50 #endif 51 #include <sys/kernel.h> 52 #ifdef TCP_HHOOK 53 #include <sys/khelp.h> 54 #endif 55 #ifdef KERN_TLS 56 #include <sys/ktls.h> 57 #endif 58 #include <sys/qmath.h> 59 #include <sys/stats.h> 60 #include <sys/sysctl.h> 61 #include <sys/jail.h> 62 #include <sys/malloc.h> 63 #include <sys/refcount.h> 64 #include <sys/mbuf.h> 65 #include <sys/priv.h> 66 #include <sys/proc.h> 67 #include <sys/sdt.h> 68 #include <sys/socket.h> 69 #include <sys/socketvar.h> 70 #include <sys/protosw.h> 71 #include <sys/random.h> 72 73 #include <vm/uma.h> 74 75 #include <net/route.h> 76 #include <net/route/nhop.h> 77 #include <net/if.h> 78 #include <net/if_var.h> 79 #include <net/vnet.h> 80 81 #include <netinet/in.h> 82 #include <netinet/in_fib.h> 83 #include <netinet/in_kdtrace.h> 84 #include <netinet/in_pcb.h> 85 #include <netinet/in_systm.h> 86 #include <netinet/in_var.h> 87 #include <netinet/ip.h> 88 #include <netinet/ip_icmp.h> 89 #include <netinet/ip_var.h> 90 #ifdef INET6 91 #include <netinet/icmp6.h> 92 #include <netinet/ip6.h> 93 #include <netinet6/in6_fib.h> 94 #include <netinet6/in6_pcb.h> 95 #include <netinet6/ip6_var.h> 96 #include <netinet6/scope6_var.h> 97 #include <netinet6/nd6.h> 98 #endif 99 100 #include <netinet/tcp.h> 101 #ifdef INVARIANTS 102 #define TCPSTATES 103 #endif 104 #include <netinet/tcp_fsm.h> 105 #include <netinet/tcp_seq.h> 106 #include <netinet/tcp_timer.h> 107 #include <netinet/tcp_var.h> 108 #include <netinet/tcp_log_buf.h> 109 #include <netinet/tcp_syncache.h> 110 #include <netinet/tcp_hpts.h> 111 #include <netinet/cc/cc.h> 112 #ifdef INET6 113 #include <netinet6/tcp6_var.h> 114 #endif 115 #include <netinet/tcpip.h> 116 #include <netinet/tcp_fastopen.h> 117 #ifdef TCPPCAP 118 #include <netinet/tcp_pcap.h> 119 #endif 120 #ifdef TCPDEBUG 121 #include <netinet/tcp_debug.h> 122 #endif 123 #ifdef INET6 124 #include <netinet6/ip6protosw.h> 125 #endif 126 #ifdef TCP_OFFLOAD 127 #include <netinet/tcp_offload.h> 128 #endif 129 #include <netinet/udp.h> 130 #include <netinet/udp_var.h> 131 132 #include <netipsec/ipsec_support.h> 133 134 #include <machine/in_cksum.h> 135 #include <crypto/siphash/siphash.h> 136 137 #include <security/mac/mac_framework.h> 138 139 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS; 140 #ifdef INET6 141 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS; 142 #endif 143 144 #ifdef NETFLIX_EXP_DETECTION 145 /* Sack attack detection thresholds and such */ 146 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack_attack, 147 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 148 "Sack Attack detection thresholds"); 149 int32_t tcp_force_detection = 0; 150 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, force_detection, 151 CTLFLAG_RW, 152 &tcp_force_detection, 0, 153 "Do we force detection even if the INP has it off?"); 154 int32_t tcp_sack_to_ack_thresh = 700; /* 70 % */ 155 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sack_to_ack_thresh, 156 CTLFLAG_RW, 157 &tcp_sack_to_ack_thresh, 700, 158 "Percentage of sacks to acks we must see above (10.1 percent is 101)?"); 159 int32_t tcp_sack_to_move_thresh = 600; /* 60 % */ 160 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, move_thresh, 161 CTLFLAG_RW, 162 &tcp_sack_to_move_thresh, 600, 163 "Percentage of sack moves we must see above (10.1 percent is 101)"); 164 int32_t tcp_restoral_thresh = 650; /* 65 % (sack:2:ack -5%) */ 165 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, restore_thresh, 166 CTLFLAG_RW, 167 &tcp_restoral_thresh, 550, 168 "Percentage of sack to ack percentage we must see below to restore(10.1 percent is 101)"); 169 int32_t tcp_sad_decay_val = 800; 170 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, decay_per, 171 CTLFLAG_RW, 172 &tcp_sad_decay_val, 800, 173 "The decay percentage (10.1 percent equals 101 )"); 174 int32_t tcp_map_minimum = 500; 175 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, nummaps, 176 CTLFLAG_RW, 177 &tcp_map_minimum, 500, 178 "Number of Map enteries before we start detection"); 179 int32_t tcp_attack_on_turns_on_logging = 0; 180 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, attacks_logged, 181 CTLFLAG_RW, 182 &tcp_attack_on_turns_on_logging, 0, 183 "When we have a positive hit on attack, do we turn on logging?"); 184 int32_t tcp_sad_pacing_interval = 2000; 185 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_pacing_int, 186 CTLFLAG_RW, 187 &tcp_sad_pacing_interval, 2000, 188 "What is the minimum pacing interval for a classified attacker?"); 189 190 int32_t tcp_sad_low_pps = 100; 191 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_low_pps, 192 CTLFLAG_RW, 193 &tcp_sad_low_pps, 100, 194 "What is the input pps that below which we do not decay?"); 195 #endif 196 uint32_t tcp_ack_war_time_window = 1000; 197 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_timewindow, 198 CTLFLAG_RW, 199 &tcp_ack_war_time_window, 1000, 200 "If the tcp_stack does ack-war prevention how many milliseconds are in its time window?"); 201 uint32_t tcp_ack_war_cnt = 5; 202 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_cnt, 203 CTLFLAG_RW, 204 &tcp_ack_war_cnt, 5, 205 "If the tcp_stack does ack-war prevention how many acks can be sent in its time window?"); 206 207 struct rwlock tcp_function_lock; 208 209 static int 210 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS) 211 { 212 int error, new; 213 214 new = V_tcp_mssdflt; 215 error = sysctl_handle_int(oidp, &new, 0, req); 216 if (error == 0 && req->newptr) { 217 if (new < TCP_MINMSS) 218 error = EINVAL; 219 else 220 V_tcp_mssdflt = new; 221 } 222 return (error); 223 } 224 225 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, 226 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 227 &VNET_NAME(tcp_mssdflt), 0, &sysctl_net_inet_tcp_mss_check, "I", 228 "Default TCP Maximum Segment Size"); 229 230 #ifdef INET6 231 static int 232 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS) 233 { 234 int error, new; 235 236 new = V_tcp_v6mssdflt; 237 error = sysctl_handle_int(oidp, &new, 0, req); 238 if (error == 0 && req->newptr) { 239 if (new < TCP_MINMSS) 240 error = EINVAL; 241 else 242 V_tcp_v6mssdflt = new; 243 } 244 return (error); 245 } 246 247 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 248 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 249 &VNET_NAME(tcp_v6mssdflt), 0, &sysctl_net_inet_tcp_mss_v6_check, "I", 250 "Default TCP Maximum Segment Size for IPv6"); 251 #endif /* INET6 */ 252 253 /* 254 * Minimum MSS we accept and use. This prevents DoS attacks where 255 * we are forced to a ridiculous low MSS like 20 and send hundreds 256 * of packets instead of one. The effect scales with the available 257 * bandwidth and quickly saturates the CPU and network interface 258 * with packet generation and sending. Set to zero to disable MINMSS 259 * checking. This setting prevents us from sending too small packets. 260 */ 261 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS; 262 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_VNET | CTLFLAG_RW, 263 &VNET_NAME(tcp_minmss), 0, 264 "Minimum TCP Maximum Segment Size"); 265 266 VNET_DEFINE(int, tcp_do_rfc1323) = 1; 267 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_VNET | CTLFLAG_RW, 268 &VNET_NAME(tcp_do_rfc1323), 0, 269 "Enable rfc1323 (high performance TCP) extensions"); 270 271 /* 272 * As of June 2021, several TCP stacks violate RFC 7323 from September 2014. 273 * Some stacks negotiate TS, but never send them after connection setup. Some 274 * stacks negotiate TS, but don't send them when sending keep-alive segments. 275 * These include modern widely deployed TCP stacks. 276 * Therefore tolerating violations for now... 277 */ 278 VNET_DEFINE(int, tcp_tolerate_missing_ts) = 1; 279 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tolerate_missing_ts, CTLFLAG_VNET | CTLFLAG_RW, 280 &VNET_NAME(tcp_tolerate_missing_ts), 0, 281 "Tolerate missing TCP timestamps"); 282 283 VNET_DEFINE(int, tcp_ts_offset_per_conn) = 1; 284 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ts_offset_per_conn, CTLFLAG_VNET | CTLFLAG_RW, 285 &VNET_NAME(tcp_ts_offset_per_conn), 0, 286 "Initialize TCP timestamps per connection instead of per host pair"); 287 288 /* How many connections are pacing */ 289 static volatile uint32_t number_of_tcp_connections_pacing = 0; 290 static uint32_t shadow_num_connections = 0; 291 292 static int tcp_pacing_limit = 10000; 293 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pacing_limit, CTLFLAG_RW, 294 &tcp_pacing_limit, 1000, 295 "If the TCP stack does pacing, is there a limit (-1 = no, 0 = no pacing N = number of connections)"); 296 297 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pacing_count, CTLFLAG_RD, 298 &shadow_num_connections, 0, "Number of TCP connections being paced"); 299 300 static int tcp_log_debug = 0; 301 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW, 302 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments"); 303 304 static int tcp_tcbhashsize; 305 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 306 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 307 308 static int do_tcpdrain = 1; 309 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 310 "Enable tcp_drain routine for extra help when low on mbufs"); 311 312 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_VNET | CTLFLAG_RD, 313 &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs"); 314 315 VNET_DEFINE_STATIC(int, icmp_may_rst) = 1; 316 #define V_icmp_may_rst VNET(icmp_may_rst) 317 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_VNET | CTLFLAG_RW, 318 &VNET_NAME(icmp_may_rst), 0, 319 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 320 321 VNET_DEFINE_STATIC(int, tcp_isn_reseed_interval) = 0; 322 #define V_tcp_isn_reseed_interval VNET(tcp_isn_reseed_interval) 323 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_VNET | CTLFLAG_RW, 324 &VNET_NAME(tcp_isn_reseed_interval), 0, 325 "Seconds between reseeding of ISN secret"); 326 327 static int tcp_soreceive_stream; 328 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN, 329 &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets"); 330 331 VNET_DEFINE(uma_zone_t, sack_hole_zone); 332 #define V_sack_hole_zone VNET(sack_hole_zone) 333 VNET_DEFINE(uint32_t, tcp_map_entries_limit) = 0; /* unlimited */ 334 static int 335 sysctl_net_inet_tcp_map_limit_check(SYSCTL_HANDLER_ARGS) 336 { 337 int error; 338 uint32_t new; 339 340 new = V_tcp_map_entries_limit; 341 error = sysctl_handle_int(oidp, &new, 0, req); 342 if (error == 0 && req->newptr) { 343 /* only allow "0" and value > minimum */ 344 if (new > 0 && new < TCP_MIN_MAP_ENTRIES_LIMIT) 345 error = EINVAL; 346 else 347 V_tcp_map_entries_limit = new; 348 } 349 return (error); 350 } 351 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, map_limit, 352 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 353 &VNET_NAME(tcp_map_entries_limit), 0, 354 &sysctl_net_inet_tcp_map_limit_check, "IU", 355 "Total sendmap entries limit"); 356 357 VNET_DEFINE(uint32_t, tcp_map_split_limit) = 0; /* unlimited */ 358 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, split_limit, CTLFLAG_VNET | CTLFLAG_RW, 359 &VNET_NAME(tcp_map_split_limit), 0, 360 "Total sendmap split entries limit"); 361 362 #ifdef TCP_HHOOK 363 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]); 364 #endif 365 366 #define TS_OFFSET_SECRET_LENGTH SIPHASH_KEY_LENGTH 367 VNET_DEFINE_STATIC(u_char, ts_offset_secret[TS_OFFSET_SECRET_LENGTH]); 368 #define V_ts_offset_secret VNET(ts_offset_secret) 369 370 static int tcp_default_fb_init(struct tcpcb *tp); 371 static void tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged); 372 static int tcp_default_handoff_ok(struct tcpcb *tp); 373 static struct inpcb *tcp_notify(struct inpcb *, int); 374 static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int); 375 static struct inpcb *tcp_mtudisc(struct inpcb *, int); 376 static char * tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, 377 const void *ip4hdr, const void *ip6hdr); 378 379 static struct tcp_function_block tcp_def_funcblk = { 380 .tfb_tcp_block_name = "freebsd", 381 .tfb_tcp_output = tcp_default_output, 382 .tfb_tcp_do_segment = tcp_do_segment, 383 .tfb_tcp_ctloutput = tcp_default_ctloutput, 384 .tfb_tcp_handoff_ok = tcp_default_handoff_ok, 385 .tfb_tcp_fb_init = tcp_default_fb_init, 386 .tfb_tcp_fb_fini = tcp_default_fb_fini, 387 }; 388 389 static int tcp_fb_cnt = 0; 390 struct tcp_funchead t_functions; 391 static struct tcp_function_block *tcp_func_set_ptr = &tcp_def_funcblk; 392 393 void 394 tcp_record_dsack(struct tcpcb *tp, tcp_seq start, tcp_seq end, int tlp) 395 { 396 TCPSTAT_INC(tcps_dsack_count); 397 tp->t_dsack_pack++; 398 if (tlp == 0) { 399 if (SEQ_GT(end, start)) { 400 tp->t_dsack_bytes += (end - start); 401 TCPSTAT_ADD(tcps_dsack_bytes, (end - start)); 402 } else { 403 tp->t_dsack_tlp_bytes += (start - end); 404 TCPSTAT_ADD(tcps_dsack_bytes, (start - end)); 405 } 406 } else { 407 if (SEQ_GT(end, start)) { 408 tp->t_dsack_bytes += (end - start); 409 TCPSTAT_ADD(tcps_dsack_tlp_bytes, (end - start)); 410 } else { 411 tp->t_dsack_tlp_bytes += (start - end); 412 TCPSTAT_ADD(tcps_dsack_tlp_bytes, (start - end)); 413 } 414 } 415 } 416 417 static struct tcp_function_block * 418 find_tcp_functions_locked(struct tcp_function_set *fs) 419 { 420 struct tcp_function *f; 421 struct tcp_function_block *blk=NULL; 422 423 TAILQ_FOREACH(f, &t_functions, tf_next) { 424 if (strcmp(f->tf_name, fs->function_set_name) == 0) { 425 blk = f->tf_fb; 426 break; 427 } 428 } 429 return(blk); 430 } 431 432 static struct tcp_function_block * 433 find_tcp_fb_locked(struct tcp_function_block *blk, struct tcp_function **s) 434 { 435 struct tcp_function_block *rblk=NULL; 436 struct tcp_function *f; 437 438 TAILQ_FOREACH(f, &t_functions, tf_next) { 439 if (f->tf_fb == blk) { 440 rblk = blk; 441 if (s) { 442 *s = f; 443 } 444 break; 445 } 446 } 447 return (rblk); 448 } 449 450 struct tcp_function_block * 451 find_and_ref_tcp_functions(struct tcp_function_set *fs) 452 { 453 struct tcp_function_block *blk; 454 455 rw_rlock(&tcp_function_lock); 456 blk = find_tcp_functions_locked(fs); 457 if (blk) 458 refcount_acquire(&blk->tfb_refcnt); 459 rw_runlock(&tcp_function_lock); 460 return(blk); 461 } 462 463 struct tcp_function_block * 464 find_and_ref_tcp_fb(struct tcp_function_block *blk) 465 { 466 struct tcp_function_block *rblk; 467 468 rw_rlock(&tcp_function_lock); 469 rblk = find_tcp_fb_locked(blk, NULL); 470 if (rblk) 471 refcount_acquire(&rblk->tfb_refcnt); 472 rw_runlock(&tcp_function_lock); 473 return(rblk); 474 } 475 476 /* Find a matching alias for the given tcp_function_block. */ 477 int 478 find_tcp_function_alias(struct tcp_function_block *blk, 479 struct tcp_function_set *fs) 480 { 481 struct tcp_function *f; 482 int found; 483 484 found = 0; 485 rw_rlock(&tcp_function_lock); 486 TAILQ_FOREACH(f, &t_functions, tf_next) { 487 if ((f->tf_fb == blk) && 488 (strncmp(f->tf_name, blk->tfb_tcp_block_name, 489 TCP_FUNCTION_NAME_LEN_MAX) != 0)) { 490 /* Matching function block with different name. */ 491 strncpy(fs->function_set_name, f->tf_name, 492 TCP_FUNCTION_NAME_LEN_MAX); 493 found = 1; 494 break; 495 } 496 } 497 /* Null terminate the string appropriately. */ 498 if (found) { 499 fs->function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0'; 500 } else { 501 fs->function_set_name[0] = '\0'; 502 } 503 rw_runlock(&tcp_function_lock); 504 return (found); 505 } 506 507 static struct tcp_function_block * 508 find_and_ref_tcp_default_fb(void) 509 { 510 struct tcp_function_block *rblk; 511 512 rw_rlock(&tcp_function_lock); 513 rblk = tcp_func_set_ptr; 514 refcount_acquire(&rblk->tfb_refcnt); 515 rw_runlock(&tcp_function_lock); 516 return (rblk); 517 } 518 519 void 520 tcp_switch_back_to_default(struct tcpcb *tp) 521 { 522 struct tcp_function_block *tfb; 523 524 KASSERT(tp->t_fb != &tcp_def_funcblk, 525 ("%s: called by the built-in default stack", __func__)); 526 527 /* 528 * Release the old stack. This function will either find a new one 529 * or panic. 530 */ 531 if (tp->t_fb->tfb_tcp_fb_fini != NULL) 532 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0); 533 refcount_release(&tp->t_fb->tfb_refcnt); 534 535 /* 536 * Now, we'll find a new function block to use. 537 * Start by trying the current user-selected 538 * default, unless this stack is the user-selected 539 * default. 540 */ 541 tfb = find_and_ref_tcp_default_fb(); 542 if (tfb == tp->t_fb) { 543 refcount_release(&tfb->tfb_refcnt); 544 tfb = NULL; 545 } 546 /* Does the stack accept this connection? */ 547 if (tfb != NULL && tfb->tfb_tcp_handoff_ok != NULL && 548 (*tfb->tfb_tcp_handoff_ok)(tp)) { 549 refcount_release(&tfb->tfb_refcnt); 550 tfb = NULL; 551 } 552 /* Try to use that stack. */ 553 if (tfb != NULL) { 554 /* Initialize the new stack. If it succeeds, we are done. */ 555 tp->t_fb = tfb; 556 if (tp->t_fb->tfb_tcp_fb_init == NULL || 557 (*tp->t_fb->tfb_tcp_fb_init)(tp) == 0) 558 return; 559 560 /* 561 * Initialization failed. Release the reference count on 562 * the stack. 563 */ 564 refcount_release(&tfb->tfb_refcnt); 565 } 566 567 /* 568 * If that wasn't feasible, use the built-in default 569 * stack which is not allowed to reject anyone. 570 */ 571 tfb = find_and_ref_tcp_fb(&tcp_def_funcblk); 572 if (tfb == NULL) { 573 /* there always should be a default */ 574 panic("Can't refer to tcp_def_funcblk"); 575 } 576 if (tfb->tfb_tcp_handoff_ok != NULL) { 577 if ((*tfb->tfb_tcp_handoff_ok) (tp)) { 578 /* The default stack cannot say no */ 579 panic("Default stack rejects a new session?"); 580 } 581 } 582 tp->t_fb = tfb; 583 if (tp->t_fb->tfb_tcp_fb_init != NULL && 584 (*tp->t_fb->tfb_tcp_fb_init)(tp)) { 585 /* The default stack cannot fail */ 586 panic("Default stack initialization failed"); 587 } 588 } 589 590 static bool 591 tcp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 592 const struct sockaddr *sa, void *ctx) 593 { 594 struct ip *iph; 595 #ifdef INET6 596 struct ip6_hdr *ip6; 597 #endif 598 struct udphdr *uh; 599 struct tcphdr *th; 600 int thlen; 601 uint16_t port; 602 603 TCPSTAT_INC(tcps_tunneled_pkts); 604 if ((m->m_flags & M_PKTHDR) == 0) { 605 /* Can't handle one that is not a pkt hdr */ 606 TCPSTAT_INC(tcps_tunneled_errs); 607 goto out; 608 } 609 thlen = sizeof(struct tcphdr); 610 if (m->m_len < off + sizeof(struct udphdr) + thlen && 611 (m = m_pullup(m, off + sizeof(struct udphdr) + thlen)) == NULL) { 612 TCPSTAT_INC(tcps_tunneled_errs); 613 goto out; 614 } 615 iph = mtod(m, struct ip *); 616 uh = (struct udphdr *)((caddr_t)iph + off); 617 th = (struct tcphdr *)(uh + 1); 618 thlen = th->th_off << 2; 619 if (m->m_len < off + sizeof(struct udphdr) + thlen) { 620 m = m_pullup(m, off + sizeof(struct udphdr) + thlen); 621 if (m == NULL) { 622 TCPSTAT_INC(tcps_tunneled_errs); 623 goto out; 624 } else { 625 iph = mtod(m, struct ip *); 626 uh = (struct udphdr *)((caddr_t)iph + off); 627 th = (struct tcphdr *)(uh + 1); 628 } 629 } 630 m->m_pkthdr.tcp_tun_port = port = uh->uh_sport; 631 bcopy(th, uh, m->m_len - off); 632 m->m_len -= sizeof(struct udphdr); 633 m->m_pkthdr.len -= sizeof(struct udphdr); 634 /* 635 * We use the same algorithm for 636 * both UDP and TCP for c-sum. So 637 * the code in tcp_input will skip 638 * the checksum. So we do nothing 639 * with the flag (m->m_pkthdr.csum_flags). 640 */ 641 switch (iph->ip_v) { 642 #ifdef INET 643 case IPVERSION: 644 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 645 tcp_input_with_port(&m, &off, IPPROTO_TCP, port); 646 break; 647 #endif 648 #ifdef INET6 649 case IPV6_VERSION >> 4: 650 ip6 = mtod(m, struct ip6_hdr *); 651 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 652 tcp6_input_with_port(&m, &off, IPPROTO_TCP, port); 653 break; 654 #endif 655 default: 656 goto out; 657 break; 658 } 659 return (true); 660 out: 661 m_freem(m); 662 663 return (true); 664 } 665 666 static int 667 sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS) 668 { 669 int error=ENOENT; 670 struct tcp_function_set fs; 671 struct tcp_function_block *blk; 672 673 memset(&fs, 0, sizeof(fs)); 674 rw_rlock(&tcp_function_lock); 675 blk = find_tcp_fb_locked(tcp_func_set_ptr, NULL); 676 if (blk) { 677 /* Found him */ 678 strcpy(fs.function_set_name, blk->tfb_tcp_block_name); 679 fs.pcbcnt = blk->tfb_refcnt; 680 } 681 rw_runlock(&tcp_function_lock); 682 error = sysctl_handle_string(oidp, fs.function_set_name, 683 sizeof(fs.function_set_name), req); 684 685 /* Check for error or no change */ 686 if (error != 0 || req->newptr == NULL) 687 return(error); 688 689 rw_wlock(&tcp_function_lock); 690 blk = find_tcp_functions_locked(&fs); 691 if ((blk == NULL) || 692 (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) { 693 error = ENOENT; 694 goto done; 695 } 696 tcp_func_set_ptr = blk; 697 done: 698 rw_wunlock(&tcp_function_lock); 699 return (error); 700 } 701 702 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_default, 703 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 704 NULL, 0, sysctl_net_inet_default_tcp_functions, "A", 705 "Set/get the default TCP functions"); 706 707 static int 708 sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS) 709 { 710 int error, cnt, linesz; 711 struct tcp_function *f; 712 char *buffer, *cp; 713 size_t bufsz, outsz; 714 bool alias; 715 716 cnt = 0; 717 rw_rlock(&tcp_function_lock); 718 TAILQ_FOREACH(f, &t_functions, tf_next) { 719 cnt++; 720 } 721 rw_runlock(&tcp_function_lock); 722 723 bufsz = (cnt+2) * ((TCP_FUNCTION_NAME_LEN_MAX * 2) + 13) + 1; 724 buffer = malloc(bufsz, M_TEMP, M_WAITOK); 725 726 error = 0; 727 cp = buffer; 728 729 linesz = snprintf(cp, bufsz, "\n%-32s%c %-32s %s\n", "Stack", 'D', 730 "Alias", "PCB count"); 731 cp += linesz; 732 bufsz -= linesz; 733 outsz = linesz; 734 735 rw_rlock(&tcp_function_lock); 736 TAILQ_FOREACH(f, &t_functions, tf_next) { 737 alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name); 738 linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n", 739 f->tf_fb->tfb_tcp_block_name, 740 (f->tf_fb == tcp_func_set_ptr) ? '*' : ' ', 741 alias ? f->tf_name : "-", 742 f->tf_fb->tfb_refcnt); 743 if (linesz >= bufsz) { 744 error = EOVERFLOW; 745 break; 746 } 747 cp += linesz; 748 bufsz -= linesz; 749 outsz += linesz; 750 } 751 rw_runlock(&tcp_function_lock); 752 if (error == 0) 753 error = sysctl_handle_string(oidp, buffer, outsz + 1, req); 754 free(buffer, M_TEMP); 755 return (error); 756 } 757 758 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_available, 759 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 760 NULL, 0, sysctl_net_inet_list_available, "A", 761 "list available TCP Function sets"); 762 763 VNET_DEFINE(int, tcp_udp_tunneling_port) = TCP_TUNNELING_PORT_DEFAULT; 764 765 #ifdef INET 766 VNET_DEFINE(struct socket *, udp4_tun_socket) = NULL; 767 #define V_udp4_tun_socket VNET(udp4_tun_socket) 768 #endif 769 #ifdef INET6 770 VNET_DEFINE(struct socket *, udp6_tun_socket) = NULL; 771 #define V_udp6_tun_socket VNET(udp6_tun_socket) 772 #endif 773 774 static void 775 tcp_over_udp_stop(void) 776 { 777 /* 778 * This function assumes sysctl caller holds inp_rinfo_lock() 779 * for writing! 780 */ 781 #ifdef INET 782 if (V_udp4_tun_socket != NULL) { 783 soclose(V_udp4_tun_socket); 784 V_udp4_tun_socket = NULL; 785 } 786 #endif 787 #ifdef INET6 788 if (V_udp6_tun_socket != NULL) { 789 soclose(V_udp6_tun_socket); 790 V_udp6_tun_socket = NULL; 791 } 792 #endif 793 } 794 795 static int 796 tcp_over_udp_start(void) 797 { 798 uint16_t port; 799 int ret; 800 #ifdef INET 801 struct sockaddr_in sin; 802 #endif 803 #ifdef INET6 804 struct sockaddr_in6 sin6; 805 #endif 806 /* 807 * This function assumes sysctl caller holds inp_info_rlock() 808 * for writing! 809 */ 810 port = V_tcp_udp_tunneling_port; 811 if (ntohs(port) == 0) { 812 /* Must have a port set */ 813 return (EINVAL); 814 } 815 #ifdef INET 816 if (V_udp4_tun_socket != NULL) { 817 /* Already running -- must stop first */ 818 return (EALREADY); 819 } 820 #endif 821 #ifdef INET6 822 if (V_udp6_tun_socket != NULL) { 823 /* Already running -- must stop first */ 824 return (EALREADY); 825 } 826 #endif 827 #ifdef INET 828 if ((ret = socreate(PF_INET, &V_udp4_tun_socket, 829 SOCK_DGRAM, IPPROTO_UDP, 830 curthread->td_ucred, curthread))) { 831 tcp_over_udp_stop(); 832 return (ret); 833 } 834 /* Call the special UDP hook. */ 835 if ((ret = udp_set_kernel_tunneling(V_udp4_tun_socket, 836 tcp_recv_udp_tunneled_packet, 837 tcp_ctlinput_viaudp, 838 NULL))) { 839 tcp_over_udp_stop(); 840 return (ret); 841 } 842 /* Ok, we have a socket, bind it to the port. */ 843 memset(&sin, 0, sizeof(struct sockaddr_in)); 844 sin.sin_len = sizeof(struct sockaddr_in); 845 sin.sin_family = AF_INET; 846 sin.sin_port = htons(port); 847 if ((ret = sobind(V_udp4_tun_socket, 848 (struct sockaddr *)&sin, curthread))) { 849 tcp_over_udp_stop(); 850 return (ret); 851 } 852 #endif 853 #ifdef INET6 854 if ((ret = socreate(PF_INET6, &V_udp6_tun_socket, 855 SOCK_DGRAM, IPPROTO_UDP, 856 curthread->td_ucred, curthread))) { 857 tcp_over_udp_stop(); 858 return (ret); 859 } 860 /* Call the special UDP hook. */ 861 if ((ret = udp_set_kernel_tunneling(V_udp6_tun_socket, 862 tcp_recv_udp_tunneled_packet, 863 tcp6_ctlinput_viaudp, 864 NULL))) { 865 tcp_over_udp_stop(); 866 return (ret); 867 } 868 /* Ok, we have a socket, bind it to the port. */ 869 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 870 sin6.sin6_len = sizeof(struct sockaddr_in6); 871 sin6.sin6_family = AF_INET6; 872 sin6.sin6_port = htons(port); 873 if ((ret = sobind(V_udp6_tun_socket, 874 (struct sockaddr *)&sin6, curthread))) { 875 tcp_over_udp_stop(); 876 return (ret); 877 } 878 #endif 879 return (0); 880 } 881 882 static int 883 sysctl_net_inet_tcp_udp_tunneling_port_check(SYSCTL_HANDLER_ARGS) 884 { 885 int error; 886 uint32_t old, new; 887 888 old = V_tcp_udp_tunneling_port; 889 new = old; 890 error = sysctl_handle_int(oidp, &new, 0, req); 891 if ((error == 0) && 892 (req->newptr != NULL)) { 893 if ((new < TCP_TUNNELING_PORT_MIN) || 894 (new > TCP_TUNNELING_PORT_MAX)) { 895 error = EINVAL; 896 } else { 897 V_tcp_udp_tunneling_port = new; 898 if (old != 0) { 899 tcp_over_udp_stop(); 900 } 901 if (new != 0) { 902 error = tcp_over_udp_start(); 903 } 904 } 905 } 906 return (error); 907 } 908 909 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_port, 910 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 911 &VNET_NAME(tcp_udp_tunneling_port), 912 0, &sysctl_net_inet_tcp_udp_tunneling_port_check, "IU", 913 "Tunneling port for tcp over udp"); 914 915 VNET_DEFINE(int, tcp_udp_tunneling_overhead) = TCP_TUNNELING_OVERHEAD_DEFAULT; 916 917 static int 918 sysctl_net_inet_tcp_udp_tunneling_overhead_check(SYSCTL_HANDLER_ARGS) 919 { 920 int error, new; 921 922 new = V_tcp_udp_tunneling_overhead; 923 error = sysctl_handle_int(oidp, &new, 0, req); 924 if (error == 0 && req->newptr) { 925 if ((new < TCP_TUNNELING_OVERHEAD_MIN) || 926 (new > TCP_TUNNELING_OVERHEAD_MAX)) 927 error = EINVAL; 928 else 929 V_tcp_udp_tunneling_overhead = new; 930 } 931 return (error); 932 } 933 934 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_overhead, 935 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 936 &VNET_NAME(tcp_udp_tunneling_overhead), 937 0, &sysctl_net_inet_tcp_udp_tunneling_overhead_check, "IU", 938 "MSS reduction when using tcp over udp"); 939 940 /* 941 * Exports one (struct tcp_function_info) for each alias/name. 942 */ 943 static int 944 sysctl_net_inet_list_func_info(SYSCTL_HANDLER_ARGS) 945 { 946 int cnt, error; 947 struct tcp_function *f; 948 struct tcp_function_info tfi; 949 950 /* 951 * We don't allow writes. 952 */ 953 if (req->newptr != NULL) 954 return (EINVAL); 955 956 /* 957 * Wire the old buffer so we can directly copy the functions to 958 * user space without dropping the lock. 959 */ 960 if (req->oldptr != NULL) { 961 error = sysctl_wire_old_buffer(req, 0); 962 if (error) 963 return (error); 964 } 965 966 /* 967 * Walk the list and copy out matching entries. If INVARIANTS 968 * is compiled in, also walk the list to verify the length of 969 * the list matches what we have recorded. 970 */ 971 rw_rlock(&tcp_function_lock); 972 973 cnt = 0; 974 #ifndef INVARIANTS 975 if (req->oldptr == NULL) { 976 cnt = tcp_fb_cnt; 977 goto skip_loop; 978 } 979 #endif 980 TAILQ_FOREACH(f, &t_functions, tf_next) { 981 #ifdef INVARIANTS 982 cnt++; 983 #endif 984 if (req->oldptr != NULL) { 985 bzero(&tfi, sizeof(tfi)); 986 tfi.tfi_refcnt = f->tf_fb->tfb_refcnt; 987 tfi.tfi_id = f->tf_fb->tfb_id; 988 (void)strlcpy(tfi.tfi_alias, f->tf_name, 989 sizeof(tfi.tfi_alias)); 990 (void)strlcpy(tfi.tfi_name, 991 f->tf_fb->tfb_tcp_block_name, sizeof(tfi.tfi_name)); 992 error = SYSCTL_OUT(req, &tfi, sizeof(tfi)); 993 /* 994 * Don't stop on error, as that is the 995 * mechanism we use to accumulate length 996 * information if the buffer was too short. 997 */ 998 } 999 } 1000 KASSERT(cnt == tcp_fb_cnt, 1001 ("%s: cnt (%d) != tcp_fb_cnt (%d)", __func__, cnt, tcp_fb_cnt)); 1002 #ifndef INVARIANTS 1003 skip_loop: 1004 #endif 1005 rw_runlock(&tcp_function_lock); 1006 if (req->oldptr == NULL) 1007 error = SYSCTL_OUT(req, NULL, 1008 (cnt + 1) * sizeof(struct tcp_function_info)); 1009 1010 return (error); 1011 } 1012 1013 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, function_info, 1014 CTLTYPE_OPAQUE | CTLFLAG_SKIP | CTLFLAG_RD | CTLFLAG_MPSAFE, 1015 NULL, 0, sysctl_net_inet_list_func_info, "S,tcp_function_info", 1016 "List TCP function block name-to-ID mappings"); 1017 1018 /* 1019 * tfb_tcp_handoff_ok() function for the default stack. 1020 * Note that we'll basically try to take all comers. 1021 */ 1022 static int 1023 tcp_default_handoff_ok(struct tcpcb *tp) 1024 { 1025 1026 return (0); 1027 } 1028 1029 /* 1030 * tfb_tcp_fb_init() function for the default stack. 1031 * 1032 * This handles making sure we have appropriate timers set if you are 1033 * transitioning a socket that has some amount of setup done. 1034 * 1035 * The init() fuction from the default can *never* return non-zero i.e. 1036 * it is required to always succeed since it is the stack of last resort! 1037 */ 1038 static int 1039 tcp_default_fb_init(struct tcpcb *tp) 1040 { 1041 1042 struct socket *so; 1043 1044 INP_WLOCK_ASSERT(tp->t_inpcb); 1045 1046 KASSERT(tp->t_state >= 0 && tp->t_state < TCPS_TIME_WAIT, 1047 ("%s: connection %p in unexpected state %d", __func__, tp, 1048 tp->t_state)); 1049 1050 /* 1051 * Nothing to do for ESTABLISHED or LISTEN states. And, we don't 1052 * know what to do for unexpected states (which includes TIME_WAIT). 1053 */ 1054 if (tp->t_state <= TCPS_LISTEN || tp->t_state >= TCPS_TIME_WAIT) 1055 return (0); 1056 1057 /* 1058 * Make sure some kind of transmission timer is set if there is 1059 * outstanding data. 1060 */ 1061 so = tp->t_inpcb->inp_socket; 1062 if ((!TCPS_HAVEESTABLISHED(tp->t_state) || sbavail(&so->so_snd) || 1063 tp->snd_una != tp->snd_max) && !(tcp_timer_active(tp, TT_REXMT) || 1064 tcp_timer_active(tp, TT_PERSIST))) { 1065 /* 1066 * If the session has established and it looks like it should 1067 * be in the persist state, set the persist timer. Otherwise, 1068 * set the retransmit timer. 1069 */ 1070 if (TCPS_HAVEESTABLISHED(tp->t_state) && tp->snd_wnd == 0 && 1071 (int32_t)(tp->snd_nxt - tp->snd_una) < 1072 (int32_t)sbavail(&so->so_snd)) 1073 tcp_setpersist(tp); 1074 else 1075 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 1076 } 1077 1078 /* All non-embryonic sessions get a keepalive timer. */ 1079 if (!tcp_timer_active(tp, TT_KEEP)) 1080 tcp_timer_activate(tp, TT_KEEP, 1081 TCPS_HAVEESTABLISHED(tp->t_state) ? TP_KEEPIDLE(tp) : 1082 TP_KEEPINIT(tp)); 1083 1084 /* 1085 * Make sure critical variables are initialized 1086 * if transitioning while in Recovery. 1087 */ 1088 if IN_FASTRECOVERY(tp->t_flags) { 1089 if (tp->sackhint.recover_fs == 0) 1090 tp->sackhint.recover_fs = max(1, 1091 tp->snd_nxt - tp->snd_una); 1092 } 1093 1094 return (0); 1095 } 1096 1097 /* 1098 * tfb_tcp_fb_fini() function for the default stack. 1099 * 1100 * This changes state as necessary (or prudent) to prepare for another stack 1101 * to assume responsibility for the connection. 1102 */ 1103 static void 1104 tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged) 1105 { 1106 1107 INP_WLOCK_ASSERT(tp->t_inpcb); 1108 return; 1109 } 1110 1111 /* 1112 * Target size of TCP PCB hash tables. Must be a power of two. 1113 * 1114 * Note that this can be overridden by the kernel environment 1115 * variable net.inet.tcp.tcbhashsize 1116 */ 1117 #ifndef TCBHASHSIZE 1118 #define TCBHASHSIZE 0 1119 #endif 1120 1121 /* 1122 * XXX 1123 * Callouts should be moved into struct tcp directly. They are currently 1124 * separate because the tcpcb structure is exported to userland for sysctl 1125 * parsing purposes, which do not know about callouts. 1126 */ 1127 struct tcpcb_mem { 1128 struct tcpcb tcb; 1129 struct tcp_timer tt; 1130 struct cc_var ccv; 1131 #ifdef TCP_HHOOK 1132 struct osd osd; 1133 #endif 1134 }; 1135 1136 VNET_DEFINE_STATIC(uma_zone_t, tcpcb_zone); 1137 #define V_tcpcb_zone VNET(tcpcb_zone) 1138 1139 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers"); 1140 MALLOC_DEFINE(M_TCPFUNCTIONS, "tcpfunc", "TCP function set memory"); 1141 1142 static struct mtx isn_mtx; 1143 1144 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF) 1145 #define ISN_LOCK() mtx_lock(&isn_mtx) 1146 #define ISN_UNLOCK() mtx_unlock(&isn_mtx) 1147 1148 INPCBSTORAGE_DEFINE(tcpcbstor, "tcpinp", "tcp_inpcb", "tcp", "tcphash"); 1149 1150 /* 1151 * Take a value and get the next power of 2 that doesn't overflow. 1152 * Used to size the tcp_inpcb hash buckets. 1153 */ 1154 static int 1155 maketcp_hashsize(int size) 1156 { 1157 int hashsize; 1158 1159 /* 1160 * auto tune. 1161 * get the next power of 2 higher than maxsockets. 1162 */ 1163 hashsize = 1 << fls(size); 1164 /* catch overflow, and just go one power of 2 smaller */ 1165 if (hashsize < size) { 1166 hashsize = 1 << (fls(size) - 1); 1167 } 1168 return (hashsize); 1169 } 1170 1171 static volatile int next_tcp_stack_id = 1; 1172 1173 /* 1174 * Register a TCP function block with the name provided in the names 1175 * array. (Note that this function does NOT automatically register 1176 * blk->tfb_tcp_block_name as a stack name. Therefore, you should 1177 * explicitly include blk->tfb_tcp_block_name in the list of names if 1178 * you wish to register the stack with that name.) 1179 * 1180 * Either all name registrations will succeed or all will fail. If 1181 * a name registration fails, the function will update the num_names 1182 * argument to point to the array index of the name that encountered 1183 * the failure. 1184 * 1185 * Returns 0 on success, or an error code on failure. 1186 */ 1187 int 1188 register_tcp_functions_as_names(struct tcp_function_block *blk, int wait, 1189 const char *names[], int *num_names) 1190 { 1191 struct tcp_function *n; 1192 struct tcp_function_set fs; 1193 int error, i; 1194 1195 KASSERT(names != NULL && *num_names > 0, 1196 ("%s: Called with 0-length name list", __func__)); 1197 KASSERT(names != NULL, ("%s: Called with NULL name list", __func__)); 1198 KASSERT(rw_initialized(&tcp_function_lock), 1199 ("%s: called too early", __func__)); 1200 1201 if ((blk->tfb_tcp_output == NULL) || 1202 (blk->tfb_tcp_do_segment == NULL) || 1203 (blk->tfb_tcp_ctloutput == NULL) || 1204 (strlen(blk->tfb_tcp_block_name) == 0)) { 1205 /* 1206 * These functions are required and you 1207 * need a name. 1208 */ 1209 *num_names = 0; 1210 return (EINVAL); 1211 } 1212 if (blk->tfb_tcp_timer_stop_all || 1213 blk->tfb_tcp_timer_activate || 1214 blk->tfb_tcp_timer_active || 1215 blk->tfb_tcp_timer_stop) { 1216 /* 1217 * If you define one timer function you 1218 * must have them all. 1219 */ 1220 if ((blk->tfb_tcp_timer_stop_all == NULL) || 1221 (blk->tfb_tcp_timer_activate == NULL) || 1222 (blk->tfb_tcp_timer_active == NULL) || 1223 (blk->tfb_tcp_timer_stop == NULL)) { 1224 *num_names = 0; 1225 return (EINVAL); 1226 } 1227 } 1228 1229 if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) { 1230 *num_names = 0; 1231 return (EINVAL); 1232 } 1233 1234 refcount_init(&blk->tfb_refcnt, 0); 1235 blk->tfb_id = atomic_fetchadd_int(&next_tcp_stack_id, 1); 1236 for (i = 0; i < *num_names; i++) { 1237 n = malloc(sizeof(struct tcp_function), M_TCPFUNCTIONS, wait); 1238 if (n == NULL) { 1239 error = ENOMEM; 1240 goto cleanup; 1241 } 1242 n->tf_fb = blk; 1243 1244 (void)strlcpy(fs.function_set_name, names[i], 1245 sizeof(fs.function_set_name)); 1246 rw_wlock(&tcp_function_lock); 1247 if (find_tcp_functions_locked(&fs) != NULL) { 1248 /* Duplicate name space not allowed */ 1249 rw_wunlock(&tcp_function_lock); 1250 free(n, M_TCPFUNCTIONS); 1251 error = EALREADY; 1252 goto cleanup; 1253 } 1254 (void)strlcpy(n->tf_name, names[i], sizeof(n->tf_name)); 1255 TAILQ_INSERT_TAIL(&t_functions, n, tf_next); 1256 tcp_fb_cnt++; 1257 rw_wunlock(&tcp_function_lock); 1258 } 1259 return(0); 1260 1261 cleanup: 1262 /* 1263 * Deregister the names we just added. Because registration failed 1264 * for names[i], we don't need to deregister that name. 1265 */ 1266 *num_names = i; 1267 rw_wlock(&tcp_function_lock); 1268 while (--i >= 0) { 1269 TAILQ_FOREACH(n, &t_functions, tf_next) { 1270 if (!strncmp(n->tf_name, names[i], 1271 TCP_FUNCTION_NAME_LEN_MAX)) { 1272 TAILQ_REMOVE(&t_functions, n, tf_next); 1273 tcp_fb_cnt--; 1274 n->tf_fb = NULL; 1275 free(n, M_TCPFUNCTIONS); 1276 break; 1277 } 1278 } 1279 } 1280 rw_wunlock(&tcp_function_lock); 1281 return (error); 1282 } 1283 1284 /* 1285 * Register a TCP function block using the name provided in the name 1286 * argument. 1287 * 1288 * Returns 0 on success, or an error code on failure. 1289 */ 1290 int 1291 register_tcp_functions_as_name(struct tcp_function_block *blk, const char *name, 1292 int wait) 1293 { 1294 const char *name_list[1]; 1295 int num_names, rv; 1296 1297 num_names = 1; 1298 if (name != NULL) 1299 name_list[0] = name; 1300 else 1301 name_list[0] = blk->tfb_tcp_block_name; 1302 rv = register_tcp_functions_as_names(blk, wait, name_list, &num_names); 1303 return (rv); 1304 } 1305 1306 /* 1307 * Register a TCP function block using the name defined in 1308 * blk->tfb_tcp_block_name. 1309 * 1310 * Returns 0 on success, or an error code on failure. 1311 */ 1312 int 1313 register_tcp_functions(struct tcp_function_block *blk, int wait) 1314 { 1315 1316 return (register_tcp_functions_as_name(blk, NULL, wait)); 1317 } 1318 1319 /* 1320 * Deregister all names associated with a function block. This 1321 * functionally removes the function block from use within the system. 1322 * 1323 * When called with a true quiesce argument, mark the function block 1324 * as being removed so no more stacks will use it and determine 1325 * whether the removal would succeed. 1326 * 1327 * When called with a false quiesce argument, actually attempt the 1328 * removal. 1329 * 1330 * When called with a force argument, attempt to switch all TCBs to 1331 * use the default stack instead of returning EBUSY. 1332 * 1333 * Returns 0 on success (or if the removal would succeed, or an error 1334 * code on failure. 1335 */ 1336 int 1337 deregister_tcp_functions(struct tcp_function_block *blk, bool quiesce, 1338 bool force) 1339 { 1340 struct tcp_function *f; 1341 1342 if (blk == &tcp_def_funcblk) { 1343 /* You can't un-register the default */ 1344 return (EPERM); 1345 } 1346 rw_wlock(&tcp_function_lock); 1347 if (blk == tcp_func_set_ptr) { 1348 /* You can't free the current default */ 1349 rw_wunlock(&tcp_function_lock); 1350 return (EBUSY); 1351 } 1352 /* Mark the block so no more stacks can use it. */ 1353 blk->tfb_flags |= TCP_FUNC_BEING_REMOVED; 1354 /* 1355 * If TCBs are still attached to the stack, attempt to switch them 1356 * to the default stack. 1357 */ 1358 if (force && blk->tfb_refcnt) { 1359 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo, 1360 INPLOOKUP_WLOCKPCB); 1361 struct inpcb *inp; 1362 struct tcpcb *tp; 1363 VNET_ITERATOR_DECL(vnet_iter); 1364 1365 rw_wunlock(&tcp_function_lock); 1366 1367 VNET_LIST_RLOCK(); 1368 VNET_FOREACH(vnet_iter) { 1369 CURVNET_SET(vnet_iter); 1370 while ((inp = inp_next(&inpi)) != NULL) { 1371 if (inp->inp_flags & INP_TIMEWAIT) 1372 continue; 1373 tp = intotcpcb(inp); 1374 if (tp == NULL || tp->t_fb != blk) 1375 continue; 1376 tcp_switch_back_to_default(tp); 1377 } 1378 CURVNET_RESTORE(); 1379 } 1380 VNET_LIST_RUNLOCK(); 1381 1382 rw_wlock(&tcp_function_lock); 1383 } 1384 if (blk->tfb_refcnt) { 1385 /* TCBs still attached. */ 1386 rw_wunlock(&tcp_function_lock); 1387 return (EBUSY); 1388 } 1389 if (quiesce) { 1390 /* Skip removal. */ 1391 rw_wunlock(&tcp_function_lock); 1392 return (0); 1393 } 1394 /* Remove any function names that map to this function block. */ 1395 while (find_tcp_fb_locked(blk, &f) != NULL) { 1396 TAILQ_REMOVE(&t_functions, f, tf_next); 1397 tcp_fb_cnt--; 1398 f->tf_fb = NULL; 1399 free(f, M_TCPFUNCTIONS); 1400 } 1401 rw_wunlock(&tcp_function_lock); 1402 return (0); 1403 } 1404 1405 static void 1406 tcp_drain(void) 1407 { 1408 struct epoch_tracker et; 1409 VNET_ITERATOR_DECL(vnet_iter); 1410 1411 if (!do_tcpdrain) 1412 return; 1413 1414 NET_EPOCH_ENTER(et); 1415 VNET_LIST_RLOCK_NOSLEEP(); 1416 VNET_FOREACH(vnet_iter) { 1417 CURVNET_SET(vnet_iter); 1418 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo, 1419 INPLOOKUP_WLOCKPCB); 1420 struct inpcb *inpb; 1421 struct tcpcb *tcpb; 1422 1423 /* 1424 * Walk the tcpbs, if existing, and flush the reassembly queue, 1425 * if there is one... 1426 * XXX: The "Net/3" implementation doesn't imply that the TCP 1427 * reassembly queue should be flushed, but in a situation 1428 * where we're really low on mbufs, this is potentially 1429 * useful. 1430 */ 1431 while ((inpb = inp_next(&inpi)) != NULL) { 1432 if (inpb->inp_flags & INP_TIMEWAIT) 1433 continue; 1434 if ((tcpb = intotcpcb(inpb)) != NULL) { 1435 tcp_reass_flush(tcpb); 1436 tcp_clean_sackreport(tcpb); 1437 #ifdef TCP_BLACKBOX 1438 tcp_log_drain(tcpb); 1439 #endif 1440 #ifdef TCPPCAP 1441 if (tcp_pcap_aggressive_free) { 1442 /* Free the TCP PCAP queues. */ 1443 tcp_pcap_drain(&(tcpb->t_inpkts)); 1444 tcp_pcap_drain(&(tcpb->t_outpkts)); 1445 } 1446 #endif 1447 } 1448 } 1449 CURVNET_RESTORE(); 1450 } 1451 VNET_LIST_RUNLOCK_NOSLEEP(); 1452 NET_EPOCH_EXIT(et); 1453 } 1454 1455 static void 1456 tcp_vnet_init(void *arg __unused) 1457 { 1458 1459 #ifdef TCP_HHOOK 1460 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, 1461 &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) 1462 printf("%s: WARNING: unable to register helper hook\n", __func__); 1463 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, 1464 &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) 1465 printf("%s: WARNING: unable to register helper hook\n", __func__); 1466 #endif 1467 #ifdef STATS 1468 if (tcp_stats_init()) 1469 printf("%s: WARNING: unable to initialise TCP stats\n", 1470 __func__); 1471 #endif 1472 in_pcbinfo_init(&V_tcbinfo, &tcpcbstor, tcp_tcbhashsize, 1473 tcp_tcbhashsize); 1474 1475 /* 1476 * These have to be type stable for the benefit of the timers. 1477 */ 1478 V_tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem), 1479 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1480 uma_zone_set_max(V_tcpcb_zone, maxsockets); 1481 uma_zone_set_warning(V_tcpcb_zone, "kern.ipc.maxsockets limit reached"); 1482 1483 tcp_tw_init(); 1484 syncache_init(); 1485 tcp_hc_init(); 1486 1487 TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack); 1488 V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole), 1489 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1490 1491 tcp_fastopen_init(); 1492 1493 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK); 1494 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK); 1495 1496 V_tcp_msl = TCPTV_MSL; 1497 } 1498 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, 1499 tcp_vnet_init, NULL); 1500 1501 static void 1502 tcp_init(void *arg __unused) 1503 { 1504 const char *tcbhash_tuneable; 1505 int hashsize; 1506 1507 tcp_reass_global_init(); 1508 1509 /* XXX virtualize those below? */ 1510 tcp_delacktime = TCPTV_DELACK; 1511 tcp_keepinit = TCPTV_KEEP_INIT; 1512 tcp_keepidle = TCPTV_KEEP_IDLE; 1513 tcp_keepintvl = TCPTV_KEEPINTVL; 1514 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 1515 tcp_rexmit_initial = TCPTV_RTOBASE; 1516 if (tcp_rexmit_initial < 1) 1517 tcp_rexmit_initial = 1; 1518 tcp_rexmit_min = TCPTV_MIN; 1519 if (tcp_rexmit_min < 1) 1520 tcp_rexmit_min = 1; 1521 tcp_persmin = TCPTV_PERSMIN; 1522 tcp_persmax = TCPTV_PERSMAX; 1523 tcp_rexmit_slop = TCPTV_CPU_VAR; 1524 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT; 1525 1526 /* Setup the tcp function block list */ 1527 TAILQ_INIT(&t_functions); 1528 rw_init(&tcp_function_lock, "tcp_func_lock"); 1529 register_tcp_functions(&tcp_def_funcblk, M_WAITOK); 1530 #ifdef TCP_BLACKBOX 1531 /* Initialize the TCP logging data. */ 1532 tcp_log_init(); 1533 #endif 1534 arc4rand(&V_ts_offset_secret, sizeof(V_ts_offset_secret), 0); 1535 1536 if (tcp_soreceive_stream) { 1537 #ifdef INET 1538 tcp_protosw.pr_soreceive = soreceive_stream; 1539 #endif 1540 #ifdef INET6 1541 tcp6_protosw.pr_soreceive = soreceive_stream; 1542 #endif /* INET6 */ 1543 } 1544 1545 #ifdef INET6 1546 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 1547 #else /* INET6 */ 1548 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 1549 #endif /* INET6 */ 1550 if (max_protohdr < TCP_MINPROTOHDR) 1551 max_protohdr = TCP_MINPROTOHDR; 1552 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 1553 panic("tcp_init"); 1554 #undef TCP_MINPROTOHDR 1555 1556 ISN_LOCK_INIT(); 1557 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL, 1558 SHUTDOWN_PRI_DEFAULT); 1559 EVENTHANDLER_REGISTER(vm_lowmem, tcp_drain, NULL, LOWMEM_PRI_DEFAULT); 1560 EVENTHANDLER_REGISTER(mbuf_lowmem, tcp_drain, NULL, LOWMEM_PRI_DEFAULT); 1561 1562 tcp_inp_lro_direct_queue = counter_u64_alloc(M_WAITOK); 1563 tcp_inp_lro_wokeup_queue = counter_u64_alloc(M_WAITOK); 1564 tcp_inp_lro_compressed = counter_u64_alloc(M_WAITOK); 1565 tcp_inp_lro_locks_taken = counter_u64_alloc(M_WAITOK); 1566 tcp_extra_mbuf = counter_u64_alloc(M_WAITOK); 1567 tcp_would_have_but = counter_u64_alloc(M_WAITOK); 1568 tcp_comp_total = counter_u64_alloc(M_WAITOK); 1569 tcp_uncomp_total = counter_u64_alloc(M_WAITOK); 1570 tcp_bad_csums = counter_u64_alloc(M_WAITOK); 1571 #ifdef TCPPCAP 1572 tcp_pcap_init(); 1573 #endif 1574 1575 hashsize = TCBHASHSIZE; 1576 tcbhash_tuneable = "net.inet.tcp.tcbhashsize"; 1577 TUNABLE_INT_FETCH(tcbhash_tuneable, &hashsize); 1578 if (hashsize == 0) { 1579 /* 1580 * Auto tune the hash size based on maxsockets. 1581 * A perfect hash would have a 1:1 mapping 1582 * (hashsize = maxsockets) however it's been 1583 * suggested that O(2) average is better. 1584 */ 1585 hashsize = maketcp_hashsize(maxsockets / 4); 1586 /* 1587 * Our historical default is 512, 1588 * do not autotune lower than this. 1589 */ 1590 if (hashsize < 512) 1591 hashsize = 512; 1592 if (bootverbose) 1593 printf("%s: %s auto tuned to %d\n", __func__, 1594 tcbhash_tuneable, hashsize); 1595 } 1596 /* 1597 * We require a hashsize to be a power of two. 1598 * Previously if it was not a power of two we would just reset it 1599 * back to 512, which could be a nasty surprise if you did not notice 1600 * the error message. 1601 * Instead what we do is clip it to the closest power of two lower 1602 * than the specified hash value. 1603 */ 1604 if (!powerof2(hashsize)) { 1605 int oldhashsize = hashsize; 1606 1607 hashsize = maketcp_hashsize(hashsize); 1608 /* prevent absurdly low value */ 1609 if (hashsize < 16) 1610 hashsize = 16; 1611 printf("%s: WARNING: TCB hash size not a power of 2, " 1612 "clipped from %d to %d.\n", __func__, oldhashsize, 1613 hashsize); 1614 } 1615 tcp_tcbhashsize = hashsize; 1616 1617 #ifdef INET 1618 IPPROTO_REGISTER(IPPROTO_TCP, tcp_input, tcp_ctlinput); 1619 #endif 1620 #ifdef INET6 1621 IP6PROTO_REGISTER(IPPROTO_TCP, tcp6_input, tcp6_ctlinput); 1622 #endif 1623 } 1624 SYSINIT(tcp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, tcp_init, NULL); 1625 1626 #ifdef VIMAGE 1627 static void 1628 tcp_destroy(void *unused __unused) 1629 { 1630 int n; 1631 #ifdef TCP_HHOOK 1632 int error; 1633 #endif 1634 1635 /* 1636 * All our processes are gone, all our sockets should be cleaned 1637 * up, which means, we should be past the tcp_discardcb() calls. 1638 * Sleep to let all tcpcb timers really disappear and cleanup. 1639 */ 1640 for (;;) { 1641 INP_INFO_WLOCK(&V_tcbinfo); 1642 n = V_tcbinfo.ipi_count; 1643 INP_INFO_WUNLOCK(&V_tcbinfo); 1644 if (n == 0) 1645 break; 1646 pause("tcpdes", hz / 10); 1647 } 1648 tcp_hc_destroy(); 1649 syncache_destroy(); 1650 tcp_tw_destroy(); 1651 in_pcbinfo_destroy(&V_tcbinfo); 1652 /* tcp_discardcb() clears the sack_holes up. */ 1653 uma_zdestroy(V_sack_hole_zone); 1654 uma_zdestroy(V_tcpcb_zone); 1655 1656 /* 1657 * Cannot free the zone until all tcpcbs are released as we attach 1658 * the allocations to them. 1659 */ 1660 tcp_fastopen_destroy(); 1661 1662 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES); 1663 VNET_PCPUSTAT_FREE(tcpstat); 1664 1665 #ifdef TCP_HHOOK 1666 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]); 1667 if (error != 0) { 1668 printf("%s: WARNING: unable to deregister helper hook " 1669 "type=%d, id=%d: error %d returned\n", __func__, 1670 HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, error); 1671 } 1672 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_OUT]); 1673 if (error != 0) { 1674 printf("%s: WARNING: unable to deregister helper hook " 1675 "type=%d, id=%d: error %d returned\n", __func__, 1676 HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error); 1677 } 1678 #endif 1679 } 1680 VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL); 1681 #endif 1682 1683 void 1684 tcp_fini(void *xtp) 1685 { 1686 1687 } 1688 1689 /* 1690 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 1691 * tcp_template used to store this data in mbufs, but we now recopy it out 1692 * of the tcpcb each time to conserve mbufs. 1693 */ 1694 void 1695 tcpip_fillheaders(struct inpcb *inp, uint16_t port, void *ip_ptr, void *tcp_ptr) 1696 { 1697 struct tcphdr *th = (struct tcphdr *)tcp_ptr; 1698 1699 INP_WLOCK_ASSERT(inp); 1700 1701 #ifdef INET6 1702 if ((inp->inp_vflag & INP_IPV6) != 0) { 1703 struct ip6_hdr *ip6; 1704 1705 ip6 = (struct ip6_hdr *)ip_ptr; 1706 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 1707 (inp->inp_flow & IPV6_FLOWINFO_MASK); 1708 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 1709 (IPV6_VERSION & IPV6_VERSION_MASK); 1710 if (port == 0) 1711 ip6->ip6_nxt = IPPROTO_TCP; 1712 else 1713 ip6->ip6_nxt = IPPROTO_UDP; 1714 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 1715 ip6->ip6_src = inp->in6p_laddr; 1716 ip6->ip6_dst = inp->in6p_faddr; 1717 } 1718 #endif /* INET6 */ 1719 #if defined(INET6) && defined(INET) 1720 else 1721 #endif 1722 #ifdef INET 1723 { 1724 struct ip *ip; 1725 1726 ip = (struct ip *)ip_ptr; 1727 ip->ip_v = IPVERSION; 1728 ip->ip_hl = 5; 1729 ip->ip_tos = inp->inp_ip_tos; 1730 ip->ip_len = 0; 1731 ip->ip_id = 0; 1732 ip->ip_off = 0; 1733 ip->ip_ttl = inp->inp_ip_ttl; 1734 ip->ip_sum = 0; 1735 if (port == 0) 1736 ip->ip_p = IPPROTO_TCP; 1737 else 1738 ip->ip_p = IPPROTO_UDP; 1739 ip->ip_src = inp->inp_laddr; 1740 ip->ip_dst = inp->inp_faddr; 1741 } 1742 #endif /* INET */ 1743 th->th_sport = inp->inp_lport; 1744 th->th_dport = inp->inp_fport; 1745 th->th_seq = 0; 1746 th->th_ack = 0; 1747 th->th_off = 5; 1748 tcp_set_flags(th, 0); 1749 th->th_win = 0; 1750 th->th_urp = 0; 1751 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */ 1752 } 1753 1754 /* 1755 * Create template to be used to send tcp packets on a connection. 1756 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 1757 * use for this function is in keepalives, which use tcp_respond. 1758 */ 1759 struct tcptemp * 1760 tcpip_maketemplate(struct inpcb *inp) 1761 { 1762 struct tcptemp *t; 1763 1764 t = malloc(sizeof(*t), M_TEMP, M_NOWAIT); 1765 if (t == NULL) 1766 return (NULL); 1767 tcpip_fillheaders(inp, 0, (void *)&t->tt_ipgen, (void *)&t->tt_t); 1768 return (t); 1769 } 1770 1771 /* 1772 * Send a single message to the TCP at address specified by 1773 * the given TCP/IP header. If m == NULL, then we make a copy 1774 * of the tcpiphdr at th and send directly to the addressed host. 1775 * This is used to force keep alive messages out using the TCP 1776 * template for a connection. If flags are given then we send 1777 * a message back to the TCP which originated the segment th, 1778 * and discard the mbuf containing it and any other attached mbufs. 1779 * 1780 * In any case the ack and sequence number of the transmitted 1781 * segment are as specified by the parameters. 1782 * 1783 * NOTE: If m != NULL, then th must point to *inside* the mbuf. 1784 */ 1785 void 1786 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 1787 tcp_seq ack, tcp_seq seq, int flags) 1788 { 1789 struct tcpopt to; 1790 struct inpcb *inp; 1791 struct ip *ip; 1792 struct mbuf *optm; 1793 struct udphdr *uh = NULL; 1794 struct tcphdr *nth; 1795 struct tcp_log_buffer *lgb; 1796 u_char *optp; 1797 #ifdef INET6 1798 struct ip6_hdr *ip6; 1799 int isipv6; 1800 #endif /* INET6 */ 1801 int optlen, tlen, win, ulen; 1802 bool incl_opts; 1803 uint16_t port; 1804 int output_ret; 1805 #ifdef INVARIANTS 1806 int thflags = tcp_get_flags(th); 1807 #endif 1808 1809 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL")); 1810 NET_EPOCH_ASSERT(); 1811 1812 #ifdef INET6 1813 isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4); 1814 ip6 = ipgen; 1815 #endif /* INET6 */ 1816 ip = ipgen; 1817 1818 if (tp != NULL) { 1819 inp = tp->t_inpcb; 1820 KASSERT(inp != NULL, ("tcp control block w/o inpcb")); 1821 INP_LOCK_ASSERT(inp); 1822 } else 1823 inp = NULL; 1824 1825 if (m != NULL) { 1826 #ifdef INET6 1827 if (isipv6 && ip6 && (ip6->ip6_nxt == IPPROTO_UDP)) 1828 port = m->m_pkthdr.tcp_tun_port; 1829 else 1830 #endif 1831 if (ip && (ip->ip_p == IPPROTO_UDP)) 1832 port = m->m_pkthdr.tcp_tun_port; 1833 else 1834 port = 0; 1835 } else 1836 port = tp->t_port; 1837 1838 incl_opts = false; 1839 win = 0; 1840 if (tp != NULL) { 1841 if (!(flags & TH_RST)) { 1842 win = sbspace(&inp->inp_socket->so_rcv); 1843 if (win > TCP_MAXWIN << tp->rcv_scale) 1844 win = TCP_MAXWIN << tp->rcv_scale; 1845 } 1846 if ((tp->t_flags & TF_NOOPT) == 0) 1847 incl_opts = true; 1848 } 1849 if (m == NULL) { 1850 m = m_gethdr(M_NOWAIT, MT_DATA); 1851 if (m == NULL) 1852 return; 1853 m->m_data += max_linkhdr; 1854 #ifdef INET6 1855 if (isipv6) { 1856 bcopy((caddr_t)ip6, mtod(m, caddr_t), 1857 sizeof(struct ip6_hdr)); 1858 ip6 = mtod(m, struct ip6_hdr *); 1859 nth = (struct tcphdr *)(ip6 + 1); 1860 if (port) { 1861 /* Insert a UDP header */ 1862 uh = (struct udphdr *)nth; 1863 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1864 uh->uh_dport = port; 1865 nth = (struct tcphdr *)(uh + 1); 1866 } 1867 } else 1868 #endif /* INET6 */ 1869 { 1870 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 1871 ip = mtod(m, struct ip *); 1872 nth = (struct tcphdr *)(ip + 1); 1873 if (port) { 1874 /* Insert a UDP header */ 1875 uh = (struct udphdr *)nth; 1876 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1877 uh->uh_dport = port; 1878 nth = (struct tcphdr *)(uh + 1); 1879 } 1880 } 1881 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 1882 flags = TH_ACK; 1883 } else if ((!M_WRITABLE(m)) || (port != 0)) { 1884 struct mbuf *n; 1885 1886 /* Can't reuse 'm', allocate a new mbuf. */ 1887 n = m_gethdr(M_NOWAIT, MT_DATA); 1888 if (n == NULL) { 1889 m_freem(m); 1890 return; 1891 } 1892 1893 if (!m_dup_pkthdr(n, m, M_NOWAIT)) { 1894 m_freem(m); 1895 m_freem(n); 1896 return; 1897 } 1898 1899 n->m_data += max_linkhdr; 1900 /* m_len is set later */ 1901 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 1902 #ifdef INET6 1903 if (isipv6) { 1904 bcopy((caddr_t)ip6, mtod(n, caddr_t), 1905 sizeof(struct ip6_hdr)); 1906 ip6 = mtod(n, struct ip6_hdr *); 1907 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 1908 nth = (struct tcphdr *)(ip6 + 1); 1909 if (port) { 1910 /* Insert a UDP header */ 1911 uh = (struct udphdr *)nth; 1912 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1913 uh->uh_dport = port; 1914 nth = (struct tcphdr *)(uh + 1); 1915 } 1916 } else 1917 #endif /* INET6 */ 1918 { 1919 bcopy((caddr_t)ip, mtod(n, caddr_t), sizeof(struct ip)); 1920 ip = mtod(n, struct ip *); 1921 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t); 1922 nth = (struct tcphdr *)(ip + 1); 1923 if (port) { 1924 /* Insert a UDP header */ 1925 uh = (struct udphdr *)nth; 1926 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1927 uh->uh_dport = port; 1928 nth = (struct tcphdr *)(uh + 1); 1929 } 1930 } 1931 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 1932 xchg(nth->th_dport, nth->th_sport, uint16_t); 1933 th = nth; 1934 m_freem(m); 1935 m = n; 1936 } else { 1937 /* 1938 * reuse the mbuf. 1939 * XXX MRT We inherit the FIB, which is lucky. 1940 */ 1941 m_freem(m->m_next); 1942 m->m_next = NULL; 1943 m->m_data = (caddr_t)ipgen; 1944 /* m_len is set later */ 1945 #ifdef INET6 1946 if (isipv6) { 1947 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 1948 nth = (struct tcphdr *)(ip6 + 1); 1949 } else 1950 #endif /* INET6 */ 1951 { 1952 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t); 1953 nth = (struct tcphdr *)(ip + 1); 1954 } 1955 if (th != nth) { 1956 /* 1957 * this is usually a case when an extension header 1958 * exists between the IPv6 header and the 1959 * TCP header. 1960 */ 1961 nth->th_sport = th->th_sport; 1962 nth->th_dport = th->th_dport; 1963 } 1964 xchg(nth->th_dport, nth->th_sport, uint16_t); 1965 #undef xchg 1966 } 1967 tlen = 0; 1968 #ifdef INET6 1969 if (isipv6) 1970 tlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 1971 #endif 1972 #if defined(INET) && defined(INET6) 1973 else 1974 #endif 1975 #ifdef INET 1976 tlen = sizeof (struct tcpiphdr); 1977 #endif 1978 if (port) 1979 tlen += sizeof (struct udphdr); 1980 #ifdef INVARIANTS 1981 m->m_len = 0; 1982 KASSERT(M_TRAILINGSPACE(m) >= tlen, 1983 ("Not enough trailing space for message (m=%p, need=%d, have=%ld)", 1984 m, tlen, (long)M_TRAILINGSPACE(m))); 1985 #endif 1986 m->m_len = tlen; 1987 to.to_flags = 0; 1988 if (incl_opts) { 1989 /* Make sure we have room. */ 1990 if (M_TRAILINGSPACE(m) < TCP_MAXOLEN) { 1991 m->m_next = m_get(M_NOWAIT, MT_DATA); 1992 if (m->m_next) { 1993 optp = mtod(m->m_next, u_char *); 1994 optm = m->m_next; 1995 } else 1996 incl_opts = false; 1997 } else { 1998 optp = (u_char *) (nth + 1); 1999 optm = m; 2000 } 2001 } 2002 if (incl_opts) { 2003 /* Timestamps. */ 2004 if (tp->t_flags & TF_RCVD_TSTMP) { 2005 to.to_tsval = tcp_ts_getticks() + tp->ts_offset; 2006 to.to_tsecr = tp->ts_recent; 2007 to.to_flags |= TOF_TS; 2008 } 2009 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 2010 /* TCP-MD5 (RFC2385). */ 2011 if (tp->t_flags & TF_SIGNATURE) 2012 to.to_flags |= TOF_SIGNATURE; 2013 #endif 2014 /* Add the options. */ 2015 tlen += optlen = tcp_addoptions(&to, optp); 2016 2017 /* Update m_len in the correct mbuf. */ 2018 optm->m_len += optlen; 2019 } else 2020 optlen = 0; 2021 #ifdef INET6 2022 if (isipv6) { 2023 if (uh) { 2024 ulen = tlen - sizeof(struct ip6_hdr); 2025 uh->uh_ulen = htons(ulen); 2026 } 2027 ip6->ip6_flow = 0; 2028 ip6->ip6_vfc = IPV6_VERSION; 2029 if (port) 2030 ip6->ip6_nxt = IPPROTO_UDP; 2031 else 2032 ip6->ip6_nxt = IPPROTO_TCP; 2033 ip6->ip6_plen = htons(tlen - sizeof(*ip6)); 2034 } 2035 #endif 2036 #if defined(INET) && defined(INET6) 2037 else 2038 #endif 2039 #ifdef INET 2040 { 2041 if (uh) { 2042 ulen = tlen - sizeof(struct ip); 2043 uh->uh_ulen = htons(ulen); 2044 } 2045 ip->ip_len = htons(tlen); 2046 ip->ip_ttl = V_ip_defttl; 2047 if (port) { 2048 ip->ip_p = IPPROTO_UDP; 2049 } else { 2050 ip->ip_p = IPPROTO_TCP; 2051 } 2052 if (V_path_mtu_discovery) 2053 ip->ip_off |= htons(IP_DF); 2054 } 2055 #endif 2056 m->m_pkthdr.len = tlen; 2057 m->m_pkthdr.rcvif = NULL; 2058 #ifdef MAC 2059 if (inp != NULL) { 2060 /* 2061 * Packet is associated with a socket, so allow the 2062 * label of the response to reflect the socket label. 2063 */ 2064 INP_LOCK_ASSERT(inp); 2065 mac_inpcb_create_mbuf(inp, m); 2066 } else { 2067 /* 2068 * Packet is not associated with a socket, so possibly 2069 * update the label in place. 2070 */ 2071 mac_netinet_tcp_reply(m); 2072 } 2073 #endif 2074 nth->th_seq = htonl(seq); 2075 nth->th_ack = htonl(ack); 2076 nth->th_off = (sizeof (struct tcphdr) + optlen) >> 2; 2077 tcp_set_flags(nth, flags); 2078 if (tp != NULL) 2079 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 2080 else 2081 nth->th_win = htons((u_short)win); 2082 nth->th_urp = 0; 2083 2084 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 2085 if (to.to_flags & TOF_SIGNATURE) { 2086 if (!TCPMD5_ENABLED() || 2087 TCPMD5_OUTPUT(m, nth, to.to_signature) != 0) { 2088 m_freem(m); 2089 return; 2090 } 2091 } 2092 #endif 2093 2094 #ifdef INET6 2095 if (isipv6) { 2096 if (port) { 2097 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 2098 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 2099 uh->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 2100 nth->th_sum = 0; 2101 } else { 2102 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 2103 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2104 nth->th_sum = in6_cksum_pseudo(ip6, 2105 tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0); 2106 } 2107 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb : 2108 NULL, NULL); 2109 } 2110 #endif /* INET6 */ 2111 #if defined(INET6) && defined(INET) 2112 else 2113 #endif 2114 #ifdef INET 2115 { 2116 if (port) { 2117 uh->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 2118 htons(ulen + IPPROTO_UDP)); 2119 m->m_pkthdr.csum_flags = CSUM_UDP; 2120 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 2121 nth->th_sum = 0; 2122 } else { 2123 m->m_pkthdr.csum_flags = CSUM_TCP; 2124 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2125 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 2126 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 2127 } 2128 } 2129 #endif /* INET */ 2130 #ifdef TCPDEBUG 2131 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG)) 2132 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 2133 #endif 2134 TCP_PROBE3(debug__output, tp, th, m); 2135 if (flags & TH_RST) 2136 TCP_PROBE5(accept__refused, NULL, NULL, m, tp, nth); 2137 lgb = NULL; 2138 if ((tp != NULL) && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2139 if (INP_WLOCKED(inp)) { 2140 union tcp_log_stackspecific log; 2141 struct timeval tv; 2142 2143 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2144 log.u_bbr.inhpts = tp->t_inpcb->inp_in_hpts; 2145 log.u_bbr.flex8 = 4; 2146 log.u_bbr.pkts_out = tp->t_maxseg; 2147 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2148 log.u_bbr.delivered = 0; 2149 lgb = tcp_log_event_(tp, nth, NULL, NULL, TCP_LOG_OUT, 2150 ERRNO_UNK, 0, &log, false, NULL, NULL, 0, &tv); 2151 } else { 2152 /* 2153 * We can not log the packet, since we only own the 2154 * read lock, but a write lock is needed. The read lock 2155 * is not upgraded to a write lock, since only getting 2156 * the read lock was done intentionally to improve the 2157 * handling of SYN flooding attacks. 2158 * This happens only for pure SYN segments received in 2159 * the initial CLOSED state, or received in a more 2160 * advanced state than listen and the UDP encapsulation 2161 * port is unexpected. 2162 * The incoming SYN segments do not really belong to 2163 * the TCP connection and the handling does not change 2164 * the state of the TCP connection. Therefore, the 2165 * sending of the RST segments is not logged. Please 2166 * note that also the incoming SYN segments are not 2167 * logged. 2168 * 2169 * The following code ensures that the above description 2170 * is and stays correct. 2171 */ 2172 KASSERT((thflags & (TH_ACK|TH_SYN)) == TH_SYN && 2173 (tp->t_state == TCPS_CLOSED || 2174 (tp->t_state > TCPS_LISTEN && tp->t_port != port)), 2175 ("%s: Logging of TCP segment with flags 0x%b and " 2176 "UDP encapsulation port %u skipped in state %s", 2177 __func__, thflags, PRINT_TH_FLAGS, 2178 ntohs(port), tcpstates[tp->t_state])); 2179 } 2180 } 2181 2182 #ifdef INET6 2183 if (isipv6) { 2184 TCP_PROBE5(send, NULL, tp, ip6, tp, nth); 2185 output_ret = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp); 2186 } 2187 #endif /* INET6 */ 2188 #if defined(INET) && defined(INET6) 2189 else 2190 #endif 2191 #ifdef INET 2192 { 2193 TCP_PROBE5(send, NULL, tp, ip, tp, nth); 2194 output_ret = ip_output(m, NULL, NULL, 0, NULL, inp); 2195 } 2196 #endif 2197 if (lgb != NULL) 2198 lgb->tlb_errno = output_ret; 2199 } 2200 2201 /* 2202 * Create a new TCP control block, making an 2203 * empty reassembly queue and hooking it to the argument 2204 * protocol control block. The `inp' parameter must have 2205 * come from the zone allocator set up in tcp_init(). 2206 */ 2207 struct tcpcb * 2208 tcp_newtcpcb(struct inpcb *inp) 2209 { 2210 struct tcpcb_mem *tm; 2211 struct tcpcb *tp; 2212 #ifdef INET6 2213 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 2214 #endif /* INET6 */ 2215 2216 tm = uma_zalloc(V_tcpcb_zone, M_NOWAIT | M_ZERO); 2217 if (tm == NULL) 2218 return (NULL); 2219 tp = &tm->tcb; 2220 2221 /* Initialise cc_var struct for this tcpcb. */ 2222 tp->ccv = &tm->ccv; 2223 tp->ccv->type = IPPROTO_TCP; 2224 tp->ccv->ccvc.tcp = tp; 2225 rw_rlock(&tcp_function_lock); 2226 tp->t_fb = tcp_func_set_ptr; 2227 refcount_acquire(&tp->t_fb->tfb_refcnt); 2228 rw_runlock(&tcp_function_lock); 2229 /* 2230 * Use the current system default CC algorithm. 2231 */ 2232 cc_attach(tp, CC_DEFAULT_ALGO()); 2233 2234 /* 2235 * The tcpcb will hold a reference on its inpcb until tcp_discardcb() 2236 * is called. 2237 */ 2238 in_pcbref(inp); /* Reference for tcpcb */ 2239 tp->t_inpcb = inp; 2240 2241 if (CC_ALGO(tp)->cb_init != NULL) 2242 if (CC_ALGO(tp)->cb_init(tp->ccv, NULL) > 0) { 2243 cc_detach(tp); 2244 if (tp->t_fb->tfb_tcp_fb_fini) 2245 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 2246 in_pcbrele_wlocked(inp); 2247 refcount_release(&tp->t_fb->tfb_refcnt); 2248 uma_zfree(V_tcpcb_zone, tm); 2249 return (NULL); 2250 } 2251 2252 #ifdef TCP_HHOOK 2253 tp->osd = &tm->osd; 2254 if (khelp_init_osd(HELPER_CLASS_TCP, tp->osd)) { 2255 if (tp->t_fb->tfb_tcp_fb_fini) 2256 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 2257 in_pcbrele_wlocked(inp); 2258 refcount_release(&tp->t_fb->tfb_refcnt); 2259 uma_zfree(V_tcpcb_zone, tm); 2260 return (NULL); 2261 } 2262 #endif 2263 2264 #ifdef VIMAGE 2265 tp->t_vnet = inp->inp_vnet; 2266 #endif 2267 tp->t_timers = &tm->tt; 2268 TAILQ_INIT(&tp->t_segq); 2269 tp->t_maxseg = 2270 #ifdef INET6 2271 isipv6 ? V_tcp_v6mssdflt : 2272 #endif /* INET6 */ 2273 V_tcp_mssdflt; 2274 2275 /* Set up our timeouts. */ 2276 callout_init(&tp->t_timers->tt_rexmt, 1); 2277 callout_init(&tp->t_timers->tt_persist, 1); 2278 callout_init(&tp->t_timers->tt_keep, 1); 2279 callout_init(&tp->t_timers->tt_2msl, 1); 2280 callout_init(&tp->t_timers->tt_delack, 1); 2281 2282 if (V_tcp_do_rfc1323) 2283 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 2284 if (V_tcp_do_sack) 2285 tp->t_flags |= TF_SACK_PERMIT; 2286 TAILQ_INIT(&tp->snd_holes); 2287 2288 /* 2289 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 2290 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 2291 * reasonable initial retransmit time. 2292 */ 2293 tp->t_srtt = TCPTV_SRTTBASE; 2294 tp->t_rttvar = ((tcp_rexmit_initial - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 2295 tp->t_rttmin = tcp_rexmit_min; 2296 tp->t_rxtcur = tcp_rexmit_initial; 2297 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 2298 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 2299 tp->t_rcvtime = ticks; 2300 /* 2301 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 2302 * because the socket may be bound to an IPv6 wildcard address, 2303 * which may match an IPv4-mapped IPv6 address. 2304 */ 2305 inp->inp_ip_ttl = V_ip_defttl; 2306 inp->inp_ppcb = tp; 2307 #ifdef TCPPCAP 2308 /* 2309 * Init the TCP PCAP queues. 2310 */ 2311 tcp_pcap_tcpcb_init(tp); 2312 #endif 2313 #ifdef TCP_BLACKBOX 2314 /* Initialize the per-TCPCB log data. */ 2315 tcp_log_tcpcbinit(tp); 2316 #endif 2317 tp->t_pacing_rate = -1; 2318 if (tp->t_fb->tfb_tcp_fb_init) { 2319 if ((*tp->t_fb->tfb_tcp_fb_init)(tp)) { 2320 refcount_release(&tp->t_fb->tfb_refcnt); 2321 in_pcbrele_wlocked(inp); 2322 uma_zfree(V_tcpcb_zone, tm); 2323 return (NULL); 2324 } 2325 } 2326 #ifdef STATS 2327 if (V_tcp_perconn_stats_enable == 1) 2328 tp->t_stats = stats_blob_alloc(V_tcp_perconn_stats_dflt_tpl, 0); 2329 #endif 2330 if (V_tcp_do_lrd) 2331 tp->t_flags |= TF_LRD; 2332 return (tp); /* XXX */ 2333 } 2334 2335 /* 2336 * Drop a TCP connection, reporting 2337 * the specified error. If connection is synchronized, 2338 * then send a RST to peer. 2339 */ 2340 struct tcpcb * 2341 tcp_drop(struct tcpcb *tp, int errno) 2342 { 2343 struct socket *so = tp->t_inpcb->inp_socket; 2344 2345 NET_EPOCH_ASSERT(); 2346 INP_WLOCK_ASSERT(tp->t_inpcb); 2347 2348 if (TCPS_HAVERCVDSYN(tp->t_state)) { 2349 tcp_state_change(tp, TCPS_CLOSED); 2350 /* Don't use tcp_output() here due to possible recursion. */ 2351 (void)tcp_output_nodrop(tp); 2352 TCPSTAT_INC(tcps_drops); 2353 } else 2354 TCPSTAT_INC(tcps_conndrops); 2355 if (errno == ETIMEDOUT && tp->t_softerror) 2356 errno = tp->t_softerror; 2357 so->so_error = errno; 2358 return (tcp_close(tp)); 2359 } 2360 2361 void 2362 tcp_discardcb(struct tcpcb *tp) 2363 { 2364 struct inpcb *inp = tp->t_inpcb; 2365 2366 INP_WLOCK_ASSERT(inp); 2367 2368 /* 2369 * Make sure that all of our timers are stopped before we delete the 2370 * PCB. 2371 * 2372 * If stopping a timer fails, we schedule a discard function in same 2373 * callout, and the last discard function called will take care of 2374 * deleting the tcpcb. 2375 */ 2376 tp->t_timers->tt_draincnt = 0; 2377 tcp_timer_stop(tp, TT_REXMT); 2378 tcp_timer_stop(tp, TT_PERSIST); 2379 tcp_timer_stop(tp, TT_KEEP); 2380 tcp_timer_stop(tp, TT_2MSL); 2381 tcp_timer_stop(tp, TT_DELACK); 2382 if (tp->t_fb->tfb_tcp_timer_stop_all) { 2383 /* 2384 * Call the stop-all function of the methods, 2385 * this function should call the tcp_timer_stop() 2386 * method with each of the function specific timeouts. 2387 * That stop will be called via the tfb_tcp_timer_stop() 2388 * which should use the async drain function of the 2389 * callout system (see tcp_var.h). 2390 */ 2391 tp->t_fb->tfb_tcp_timer_stop_all(tp); 2392 } 2393 2394 /* free the reassembly queue, if any */ 2395 tcp_reass_flush(tp); 2396 2397 #ifdef TCP_OFFLOAD 2398 /* Disconnect offload device, if any. */ 2399 if (tp->t_flags & TF_TOE) 2400 tcp_offload_detach(tp); 2401 #endif 2402 2403 tcp_free_sackholes(tp); 2404 2405 #ifdef TCPPCAP 2406 /* Free the TCP PCAP queues. */ 2407 tcp_pcap_drain(&(tp->t_inpkts)); 2408 tcp_pcap_drain(&(tp->t_outpkts)); 2409 #endif 2410 2411 /* Allow the CC algorithm to clean up after itself. */ 2412 if (CC_ALGO(tp)->cb_destroy != NULL) 2413 CC_ALGO(tp)->cb_destroy(tp->ccv); 2414 CC_DATA(tp) = NULL; 2415 /* Detach from the CC algorithm */ 2416 cc_detach(tp); 2417 2418 #ifdef TCP_HHOOK 2419 khelp_destroy_osd(tp->osd); 2420 #endif 2421 #ifdef STATS 2422 stats_blob_destroy(tp->t_stats); 2423 #endif 2424 2425 CC_ALGO(tp) = NULL; 2426 inp->inp_ppcb = NULL; 2427 if (tp->t_timers->tt_draincnt == 0) { 2428 bool released __diagused; 2429 2430 released = tcp_freecb(tp); 2431 KASSERT(!released, ("%s: inp %p should not have been released " 2432 "here", __func__, inp)); 2433 } 2434 } 2435 2436 bool 2437 tcp_freecb(struct tcpcb *tp) 2438 { 2439 struct inpcb *inp = tp->t_inpcb; 2440 struct socket *so = inp->inp_socket; 2441 #ifdef INET6 2442 bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 2443 #endif 2444 2445 INP_WLOCK_ASSERT(inp); 2446 MPASS(tp->t_timers->tt_draincnt == 0); 2447 2448 /* We own the last reference on tcpcb, let's free it. */ 2449 #ifdef TCP_BLACKBOX 2450 tcp_log_tcpcbfini(tp); 2451 #endif 2452 TCPSTATES_DEC(tp->t_state); 2453 if (tp->t_fb->tfb_tcp_fb_fini) 2454 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 2455 2456 /* 2457 * If we got enough samples through the srtt filter, 2458 * save the rtt and rttvar in the routing entry. 2459 * 'Enough' is arbitrarily defined as 4 rtt samples. 2460 * 4 samples is enough for the srtt filter to converge 2461 * to within enough % of the correct value; fewer samples 2462 * and we could save a bogus rtt. The danger is not high 2463 * as tcp quickly recovers from everything. 2464 * XXX: Works very well but needs some more statistics! 2465 * 2466 * XXXRRS: Updating must be after the stack fini() since 2467 * that may be converting some internal representation of 2468 * say srtt etc into the general one used by other stacks. 2469 * Lets also at least protect against the so being NULL 2470 * as RW stated below. 2471 */ 2472 if ((tp->t_rttupdated >= 4) && (so != NULL)) { 2473 struct hc_metrics_lite metrics; 2474 uint32_t ssthresh; 2475 2476 bzero(&metrics, sizeof(metrics)); 2477 /* 2478 * Update the ssthresh always when the conditions below 2479 * are satisfied. This gives us better new start value 2480 * for the congestion avoidance for new connections. 2481 * ssthresh is only set if packet loss occurred on a session. 2482 * 2483 * XXXRW: 'so' may be NULL here, and/or socket buffer may be 2484 * being torn down. Ideally this code would not use 'so'. 2485 */ 2486 ssthresh = tp->snd_ssthresh; 2487 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) { 2488 /* 2489 * convert the limit from user data bytes to 2490 * packets then to packet data bytes. 2491 */ 2492 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg; 2493 if (ssthresh < 2) 2494 ssthresh = 2; 2495 ssthresh *= (tp->t_maxseg + 2496 #ifdef INET6 2497 (isipv6 ? sizeof (struct ip6_hdr) + 2498 sizeof (struct tcphdr) : 2499 #endif 2500 sizeof (struct tcpiphdr) 2501 #ifdef INET6 2502 ) 2503 #endif 2504 ); 2505 } else 2506 ssthresh = 0; 2507 metrics.rmx_ssthresh = ssthresh; 2508 2509 metrics.rmx_rtt = tp->t_srtt; 2510 metrics.rmx_rttvar = tp->t_rttvar; 2511 metrics.rmx_cwnd = tp->snd_cwnd; 2512 metrics.rmx_sendpipe = 0; 2513 metrics.rmx_recvpipe = 0; 2514 2515 tcp_hc_update(&inp->inp_inc, &metrics); 2516 } 2517 2518 refcount_release(&tp->t_fb->tfb_refcnt); 2519 uma_zfree(V_tcpcb_zone, tp); 2520 2521 return (in_pcbrele_wlocked(inp)); 2522 } 2523 2524 /* 2525 * Attempt to close a TCP control block, marking it as dropped, and freeing 2526 * the socket if we hold the only reference. 2527 */ 2528 struct tcpcb * 2529 tcp_close(struct tcpcb *tp) 2530 { 2531 struct inpcb *inp = tp->t_inpcb; 2532 struct socket *so; 2533 2534 INP_WLOCK_ASSERT(inp); 2535 2536 #ifdef TCP_OFFLOAD 2537 if (tp->t_state == TCPS_LISTEN) 2538 tcp_offload_listen_stop(tp); 2539 #endif 2540 /* 2541 * This releases the TFO pending counter resource for TFO listen 2542 * sockets as well as passively-created TFO sockets that transition 2543 * from SYN_RECEIVED to CLOSED. 2544 */ 2545 if (tp->t_tfo_pending) { 2546 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2547 tp->t_tfo_pending = NULL; 2548 } 2549 #ifdef TCPHPTS 2550 tcp_hpts_remove(inp); 2551 #endif 2552 in_pcbdrop(inp); 2553 TCPSTAT_INC(tcps_closed); 2554 if (tp->t_state != TCPS_CLOSED) 2555 tcp_state_change(tp, TCPS_CLOSED); 2556 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL")); 2557 so = inp->inp_socket; 2558 soisdisconnected(so); 2559 if (inp->inp_flags & INP_SOCKREF) { 2560 inp->inp_flags &= ~INP_SOCKREF; 2561 INP_WUNLOCK(inp); 2562 sorele(so); 2563 return (NULL); 2564 } 2565 return (tp); 2566 } 2567 2568 /* 2569 * Notify a tcp user of an asynchronous error; 2570 * store error as soft error, but wake up user 2571 * (for now, won't do anything until can select for soft error). 2572 * 2573 * Do not wake up user since there currently is no mechanism for 2574 * reporting soft errors (yet - a kqueue filter may be added). 2575 */ 2576 static struct inpcb * 2577 tcp_notify(struct inpcb *inp, int error) 2578 { 2579 struct tcpcb *tp; 2580 2581 INP_WLOCK_ASSERT(inp); 2582 2583 if ((inp->inp_flags & INP_TIMEWAIT) || 2584 (inp->inp_flags & INP_DROPPED)) 2585 return (inp); 2586 2587 tp = intotcpcb(inp); 2588 KASSERT(tp != NULL, ("tcp_notify: tp == NULL")); 2589 2590 /* 2591 * Ignore some errors if we are hooked up. 2592 * If connection hasn't completed, has retransmitted several times, 2593 * and receives a second error, give up now. This is better 2594 * than waiting a long time to establish a connection that 2595 * can never complete. 2596 */ 2597 if (tp->t_state == TCPS_ESTABLISHED && 2598 (error == EHOSTUNREACH || error == ENETUNREACH || 2599 error == EHOSTDOWN)) { 2600 if (inp->inp_route.ro_nh) { 2601 NH_FREE(inp->inp_route.ro_nh); 2602 inp->inp_route.ro_nh = (struct nhop_object *)NULL; 2603 } 2604 return (inp); 2605 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 2606 tp->t_softerror) { 2607 tp = tcp_drop(tp, error); 2608 if (tp != NULL) 2609 return (inp); 2610 else 2611 return (NULL); 2612 } else { 2613 tp->t_softerror = error; 2614 return (inp); 2615 } 2616 #if 0 2617 wakeup( &so->so_timeo); 2618 sorwakeup(so); 2619 sowwakeup(so); 2620 #endif 2621 } 2622 2623 static int 2624 tcp_pcblist(SYSCTL_HANDLER_ARGS) 2625 { 2626 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo, 2627 INPLOOKUP_RLOCKPCB); 2628 struct xinpgen xig; 2629 struct inpcb *inp; 2630 int error; 2631 2632 if (req->newptr != NULL) 2633 return (EPERM); 2634 2635 if (req->oldptr == NULL) { 2636 int n; 2637 2638 n = V_tcbinfo.ipi_count + 2639 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 2640 n += imax(n / 8, 10); 2641 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb); 2642 return (0); 2643 } 2644 2645 if ((error = sysctl_wire_old_buffer(req, 0)) != 0) 2646 return (error); 2647 2648 bzero(&xig, sizeof(xig)); 2649 xig.xig_len = sizeof xig; 2650 xig.xig_count = V_tcbinfo.ipi_count + 2651 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 2652 xig.xig_gen = V_tcbinfo.ipi_gencnt; 2653 xig.xig_sogen = so_gencnt; 2654 error = SYSCTL_OUT(req, &xig, sizeof xig); 2655 if (error) 2656 return (error); 2657 2658 error = syncache_pcblist(req); 2659 if (error) 2660 return (error); 2661 2662 while ((inp = inp_next(&inpi)) != NULL) { 2663 if (inp->inp_gencnt <= xig.xig_gen) { 2664 int crerr; 2665 2666 /* 2667 * XXX: This use of cr_cansee(), introduced with 2668 * TCP state changes, is not quite right, but for 2669 * now, better than nothing. 2670 */ 2671 if (inp->inp_flags & INP_TIMEWAIT) { 2672 if (intotw(inp) != NULL) 2673 crerr = cr_cansee(req->td->td_ucred, 2674 intotw(inp)->tw_cred); 2675 else 2676 crerr = EINVAL; /* Skip this inp. */ 2677 } else 2678 crerr = cr_canseeinpcb(req->td->td_ucred, inp); 2679 if (crerr == 0) { 2680 struct xtcpcb xt; 2681 2682 tcp_inptoxtp(inp, &xt); 2683 error = SYSCTL_OUT(req, &xt, sizeof xt); 2684 if (error) { 2685 INP_RUNLOCK(inp); 2686 break; 2687 } else 2688 continue; 2689 } 2690 } 2691 } 2692 2693 if (!error) { 2694 /* 2695 * Give the user an updated idea of our state. 2696 * If the generation differs from what we told 2697 * her before, she knows that something happened 2698 * while we were processing this request, and it 2699 * might be necessary to retry. 2700 */ 2701 xig.xig_gen = V_tcbinfo.ipi_gencnt; 2702 xig.xig_sogen = so_gencnt; 2703 xig.xig_count = V_tcbinfo.ipi_count + 2704 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 2705 error = SYSCTL_OUT(req, &xig, sizeof xig); 2706 } 2707 2708 return (error); 2709 } 2710 2711 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, 2712 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2713 NULL, 0, tcp_pcblist, "S,xtcpcb", 2714 "List of active TCP connections"); 2715 2716 #ifdef INET 2717 static int 2718 tcp_getcred(SYSCTL_HANDLER_ARGS) 2719 { 2720 struct xucred xuc; 2721 struct sockaddr_in addrs[2]; 2722 struct epoch_tracker et; 2723 struct inpcb *inp; 2724 int error; 2725 2726 error = priv_check(req->td, PRIV_NETINET_GETCRED); 2727 if (error) 2728 return (error); 2729 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 2730 if (error) 2731 return (error); 2732 NET_EPOCH_ENTER(et); 2733 inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 2734 addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL); 2735 NET_EPOCH_EXIT(et); 2736 if (inp != NULL) { 2737 if (inp->inp_socket == NULL) 2738 error = ENOENT; 2739 if (error == 0) 2740 error = cr_canseeinpcb(req->td->td_ucred, inp); 2741 if (error == 0) 2742 cru2x(inp->inp_cred, &xuc); 2743 INP_RUNLOCK(inp); 2744 } else 2745 error = ENOENT; 2746 if (error == 0) 2747 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 2748 return (error); 2749 } 2750 2751 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, 2752 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT, 2753 0, 0, tcp_getcred, "S,xucred", 2754 "Get the xucred of a TCP connection"); 2755 #endif /* INET */ 2756 2757 #ifdef INET6 2758 static int 2759 tcp6_getcred(SYSCTL_HANDLER_ARGS) 2760 { 2761 struct epoch_tracker et; 2762 struct xucred xuc; 2763 struct sockaddr_in6 addrs[2]; 2764 struct inpcb *inp; 2765 int error; 2766 #ifdef INET 2767 int mapped = 0; 2768 #endif 2769 2770 error = priv_check(req->td, PRIV_NETINET_GETCRED); 2771 if (error) 2772 return (error); 2773 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 2774 if (error) 2775 return (error); 2776 if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 || 2777 (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) { 2778 return (error); 2779 } 2780 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 2781 #ifdef INET 2782 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 2783 mapped = 1; 2784 else 2785 #endif 2786 return (EINVAL); 2787 } 2788 2789 NET_EPOCH_ENTER(et); 2790 #ifdef INET 2791 if (mapped == 1) 2792 inp = in_pcblookup(&V_tcbinfo, 2793 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 2794 addrs[1].sin6_port, 2795 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 2796 addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL); 2797 else 2798 #endif 2799 inp = in6_pcblookup(&V_tcbinfo, 2800 &addrs[1].sin6_addr, addrs[1].sin6_port, 2801 &addrs[0].sin6_addr, addrs[0].sin6_port, 2802 INPLOOKUP_RLOCKPCB, NULL); 2803 NET_EPOCH_EXIT(et); 2804 if (inp != NULL) { 2805 if (inp->inp_socket == NULL) 2806 error = ENOENT; 2807 if (error == 0) 2808 error = cr_canseeinpcb(req->td->td_ucred, inp); 2809 if (error == 0) 2810 cru2x(inp->inp_cred, &xuc); 2811 INP_RUNLOCK(inp); 2812 } else 2813 error = ENOENT; 2814 if (error == 0) 2815 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 2816 return (error); 2817 } 2818 2819 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, 2820 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT, 2821 0, 0, tcp6_getcred, "S,xucred", 2822 "Get the xucred of a TCP6 connection"); 2823 #endif /* INET6 */ 2824 2825 #ifdef INET 2826 /* Path MTU to try next when a fragmentation-needed message is received. */ 2827 static inline int 2828 tcp_next_pmtu(const struct icmp *icp, const struct ip *ip) 2829 { 2830 int mtu = ntohs(icp->icmp_nextmtu); 2831 2832 /* If no alternative MTU was proposed, try the next smaller one. */ 2833 if (!mtu) 2834 mtu = ip_next_mtu(ntohs(ip->ip_len), 1); 2835 if (mtu < V_tcp_minmss + sizeof(struct tcpiphdr)) 2836 mtu = V_tcp_minmss + sizeof(struct tcpiphdr); 2837 2838 return (mtu); 2839 } 2840 2841 static void 2842 tcp_ctlinput_with_port(int cmd, struct sockaddr *sa, void *vip, uint16_t port) 2843 { 2844 struct ip *ip = vip; 2845 struct tcphdr *th; 2846 struct in_addr faddr; 2847 struct inpcb *inp; 2848 struct tcpcb *tp; 2849 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 2850 struct icmp *icp; 2851 struct in_conninfo inc; 2852 tcp_seq icmp_tcp_seq; 2853 int mtu; 2854 2855 faddr = ((struct sockaddr_in *)sa)->sin_addr; 2856 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 2857 return; 2858 2859 if (cmd == PRC_MSGSIZE) 2860 notify = tcp_mtudisc_notify; 2861 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 2862 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL || 2863 cmd == PRC_TIMXCEED_INTRANS) && ip) 2864 notify = tcp_drop_syn_sent; 2865 2866 /* 2867 * Hostdead is ugly because it goes linearly through all PCBs. 2868 * XXX: We never get this from ICMP, otherwise it makes an 2869 * excellent DoS attack on machines with many connections. 2870 */ 2871 else if (cmd == PRC_HOSTDEAD) 2872 ip = NULL; 2873 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 2874 return; 2875 2876 if (ip == NULL) { 2877 in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify); 2878 return; 2879 } 2880 2881 icp = (struct icmp *)((caddr_t)ip - offsetof(struct icmp, icmp_ip)); 2882 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2883 inp = in_pcblookup(&V_tcbinfo, faddr, th->th_dport, ip->ip_src, 2884 th->th_sport, INPLOOKUP_WLOCKPCB, NULL); 2885 if (inp != NULL && PRC_IS_REDIRECT(cmd)) { 2886 /* signal EHOSTDOWN, as it flushes the cached route */ 2887 inp = (*notify)(inp, EHOSTDOWN); 2888 goto out; 2889 } 2890 icmp_tcp_seq = th->th_seq; 2891 if (inp != NULL) { 2892 if (!(inp->inp_flags & INP_TIMEWAIT) && 2893 !(inp->inp_flags & INP_DROPPED) && 2894 !(inp->inp_socket == NULL)) { 2895 tp = intotcpcb(inp); 2896 #ifdef TCP_OFFLOAD 2897 if (tp->t_flags & TF_TOE && cmd == PRC_MSGSIZE) { 2898 /* 2899 * MTU discovery for offloaded connections. Let 2900 * the TOE driver verify seq# and process it. 2901 */ 2902 mtu = tcp_next_pmtu(icp, ip); 2903 tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu); 2904 goto out; 2905 } 2906 #endif 2907 if (tp->t_port != port) { 2908 goto out; 2909 } 2910 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) && 2911 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) { 2912 if (cmd == PRC_MSGSIZE) { 2913 /* 2914 * MTU discovery: we got a needfrag and 2915 * will potentially try a lower MTU. 2916 */ 2917 mtu = tcp_next_pmtu(icp, ip); 2918 2919 /* 2920 * Only process the offered MTU if it 2921 * is smaller than the current one. 2922 */ 2923 if (mtu < tp->t_maxseg + 2924 sizeof(struct tcpiphdr)) { 2925 bzero(&inc, sizeof(inc)); 2926 inc.inc_faddr = faddr; 2927 inc.inc_fibnum = 2928 inp->inp_inc.inc_fibnum; 2929 tcp_hc_updatemtu(&inc, mtu); 2930 inp = tcp_mtudisc(inp, mtu); 2931 } 2932 } else 2933 inp = (*notify)(inp, 2934 inetctlerrmap[cmd]); 2935 } 2936 } 2937 } else { 2938 bzero(&inc, sizeof(inc)); 2939 inc.inc_fport = th->th_dport; 2940 inc.inc_lport = th->th_sport; 2941 inc.inc_faddr = faddr; 2942 inc.inc_laddr = ip->ip_src; 2943 syncache_unreach(&inc, icmp_tcp_seq, port); 2944 } 2945 out: 2946 if (inp != NULL) 2947 INP_WUNLOCK(inp); 2948 } 2949 2950 void 2951 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 2952 { 2953 tcp_ctlinput_with_port(cmd, sa, vip, htons(0)); 2954 } 2955 2956 void 2957 tcp_ctlinput_viaudp(int cmd, struct sockaddr *sa, void *vip, void *unused) 2958 { 2959 /* Its a tunneled TCP over UDP icmp */ 2960 struct ip *outer_ip, *inner_ip; 2961 struct icmp *icmp; 2962 struct udphdr *udp; 2963 struct tcphdr *th, ttemp; 2964 int i_hlen, o_len; 2965 uint16_t port; 2966 2967 inner_ip = (struct ip *)vip; 2968 icmp = (struct icmp *)((caddr_t)inner_ip - 2969 (sizeof(struct icmp) - sizeof(struct ip))); 2970 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 2971 i_hlen = inner_ip->ip_hl << 2; 2972 o_len = ntohs(outer_ip->ip_len); 2973 if (o_len < 2974 (sizeof(struct ip) + 8 + i_hlen + sizeof(struct udphdr) + offsetof(struct tcphdr, th_ack))) { 2975 /* Not enough data present */ 2976 return; 2977 } 2978 /* Ok lets strip out the inner udphdr header by copying up on top of it the tcp hdr */ 2979 udp = (struct udphdr *)(((caddr_t)inner_ip) + i_hlen); 2980 if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) { 2981 return; 2982 } 2983 port = udp->uh_dport; 2984 th = (struct tcphdr *)(udp + 1); 2985 memcpy(&ttemp, th, sizeof(struct tcphdr)); 2986 memcpy(udp, &ttemp, sizeof(struct tcphdr)); 2987 /* Now adjust down the size of the outer IP header */ 2988 o_len -= sizeof(struct udphdr); 2989 outer_ip->ip_len = htons(o_len); 2990 /* Now call in to the normal handling code */ 2991 tcp_ctlinput_with_port(cmd, sa, vip, port); 2992 } 2993 #endif /* INET */ 2994 2995 #ifdef INET6 2996 static inline int 2997 tcp6_next_pmtu(const struct icmp6_hdr *icmp6) 2998 { 2999 int mtu = ntohl(icmp6->icmp6_mtu); 3000 3001 /* 3002 * If no alternative MTU was proposed, or the proposed MTU was too 3003 * small, set to the min. 3004 */ 3005 if (mtu < IPV6_MMTU) 3006 mtu = IPV6_MMTU - 8; /* XXXNP: what is the adjustment for? */ 3007 return (mtu); 3008 } 3009 3010 static void 3011 tcp6_ctlinput_with_port(int cmd, struct sockaddr *sa, void *d, uint16_t port) 3012 { 3013 struct in6_addr *dst; 3014 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 3015 struct ip6_hdr *ip6; 3016 struct mbuf *m; 3017 struct inpcb *inp; 3018 struct tcpcb *tp; 3019 struct icmp6_hdr *icmp6; 3020 struct ip6ctlparam *ip6cp = NULL; 3021 const struct sockaddr_in6 *sa6_src = NULL; 3022 struct in_conninfo inc; 3023 struct tcp_ports { 3024 uint16_t th_sport; 3025 uint16_t th_dport; 3026 } t_ports; 3027 tcp_seq icmp_tcp_seq; 3028 unsigned int mtu; 3029 unsigned int off; 3030 3031 if (sa->sa_family != AF_INET6 || 3032 sa->sa_len != sizeof(struct sockaddr_in6)) 3033 return; 3034 3035 /* if the parameter is from icmp6, decode it. */ 3036 if (d != NULL) { 3037 ip6cp = (struct ip6ctlparam *)d; 3038 icmp6 = ip6cp->ip6c_icmp6; 3039 m = ip6cp->ip6c_m; 3040 ip6 = ip6cp->ip6c_ip6; 3041 off = ip6cp->ip6c_off; 3042 sa6_src = ip6cp->ip6c_src; 3043 dst = ip6cp->ip6c_finaldst; 3044 } else { 3045 m = NULL; 3046 ip6 = NULL; 3047 off = 0; /* fool gcc */ 3048 sa6_src = &sa6_any; 3049 dst = NULL; 3050 } 3051 3052 if (cmd == PRC_MSGSIZE) 3053 notify = tcp_mtudisc_notify; 3054 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 3055 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL || 3056 cmd == PRC_TIMXCEED_INTRANS) && ip6 != NULL) 3057 notify = tcp_drop_syn_sent; 3058 3059 /* 3060 * Hostdead is ugly because it goes linearly through all PCBs. 3061 * XXX: We never get this from ICMP, otherwise it makes an 3062 * excellent DoS attack on machines with many connections. 3063 */ 3064 else if (cmd == PRC_HOSTDEAD) 3065 ip6 = NULL; 3066 else if ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0) 3067 return; 3068 3069 if (ip6 == NULL) { 3070 in6_pcbnotify(&V_tcbinfo, sa, 0, 3071 (const struct sockaddr *)sa6_src, 3072 0, cmd, NULL, notify); 3073 return; 3074 } 3075 3076 /* Check if we can safely get the ports from the tcp hdr */ 3077 if (m == NULL || 3078 (m->m_pkthdr.len < 3079 (int32_t) (off + sizeof(struct tcp_ports)))) { 3080 return; 3081 } 3082 bzero(&t_ports, sizeof(struct tcp_ports)); 3083 m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports); 3084 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_dst, t_ports.th_dport, 3085 &ip6->ip6_src, t_ports.th_sport, INPLOOKUP_WLOCKPCB, NULL); 3086 if (inp != NULL && PRC_IS_REDIRECT(cmd)) { 3087 /* signal EHOSTDOWN, as it flushes the cached route */ 3088 inp = (*notify)(inp, EHOSTDOWN); 3089 goto out; 3090 } 3091 off += sizeof(struct tcp_ports); 3092 if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) { 3093 goto out; 3094 } 3095 m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq); 3096 if (inp != NULL) { 3097 if (!(inp->inp_flags & INP_TIMEWAIT) && 3098 !(inp->inp_flags & INP_DROPPED) && 3099 !(inp->inp_socket == NULL)) { 3100 tp = intotcpcb(inp); 3101 #ifdef TCP_OFFLOAD 3102 if (tp->t_flags & TF_TOE && cmd == PRC_MSGSIZE) { 3103 /* MTU discovery for offloaded connections. */ 3104 mtu = tcp6_next_pmtu(icmp6); 3105 tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu); 3106 goto out; 3107 } 3108 #endif 3109 if (tp->t_port != port) { 3110 goto out; 3111 } 3112 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) && 3113 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) { 3114 if (cmd == PRC_MSGSIZE) { 3115 /* 3116 * MTU discovery: 3117 * If we got a needfrag set the MTU 3118 * in the route to the suggested new 3119 * value (if given) and then notify. 3120 */ 3121 mtu = tcp6_next_pmtu(icmp6); 3122 3123 bzero(&inc, sizeof(inc)); 3124 inc.inc_fibnum = M_GETFIB(m); 3125 inc.inc_flags |= INC_ISIPV6; 3126 inc.inc6_faddr = *dst; 3127 if (in6_setscope(&inc.inc6_faddr, 3128 m->m_pkthdr.rcvif, NULL)) 3129 goto out; 3130 /* 3131 * Only process the offered MTU if it 3132 * is smaller than the current one. 3133 */ 3134 if (mtu < tp->t_maxseg + 3135 sizeof (struct tcphdr) + 3136 sizeof (struct ip6_hdr)) { 3137 tcp_hc_updatemtu(&inc, mtu); 3138 tcp_mtudisc(inp, mtu); 3139 ICMP6STAT_INC(icp6s_pmtuchg); 3140 } 3141 } else 3142 inp = (*notify)(inp, 3143 inet6ctlerrmap[cmd]); 3144 } 3145 } 3146 } else { 3147 bzero(&inc, sizeof(inc)); 3148 inc.inc_fibnum = M_GETFIB(m); 3149 inc.inc_flags |= INC_ISIPV6; 3150 inc.inc_fport = t_ports.th_dport; 3151 inc.inc_lport = t_ports.th_sport; 3152 inc.inc6_faddr = *dst; 3153 inc.inc6_laddr = ip6->ip6_src; 3154 syncache_unreach(&inc, icmp_tcp_seq, port); 3155 } 3156 out: 3157 if (inp != NULL) 3158 INP_WUNLOCK(inp); 3159 } 3160 3161 void 3162 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d) 3163 { 3164 tcp6_ctlinput_with_port(cmd, sa, d, htons(0)); 3165 } 3166 3167 void 3168 tcp6_ctlinput_viaudp(int cmd, struct sockaddr *sa, void *d, void *unused) 3169 { 3170 struct ip6ctlparam *ip6cp; 3171 struct mbuf *m; 3172 struct udphdr *udp; 3173 uint16_t port; 3174 3175 ip6cp = (struct ip6ctlparam *)d; 3176 m = m_pulldown(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(struct udphdr), NULL); 3177 if (m == NULL) { 3178 return; 3179 } 3180 udp = mtod(m, struct udphdr *); 3181 if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) { 3182 return; 3183 } 3184 port = udp->uh_dport; 3185 m_adj(m, sizeof(struct udphdr)); 3186 if ((m->m_flags & M_PKTHDR) == 0) { 3187 ip6cp->ip6c_m->m_pkthdr.len -= sizeof(struct udphdr); 3188 } 3189 /* Now call in to the normal handling code */ 3190 tcp6_ctlinput_with_port(cmd, sa, d, port); 3191 } 3192 3193 #endif /* INET6 */ 3194 3195 static uint32_t 3196 tcp_keyed_hash(struct in_conninfo *inc, u_char *key, u_int len) 3197 { 3198 SIPHASH_CTX ctx; 3199 uint32_t hash[2]; 3200 3201 KASSERT(len >= SIPHASH_KEY_LENGTH, 3202 ("%s: keylen %u too short ", __func__, len)); 3203 SipHash24_Init(&ctx); 3204 SipHash_SetKey(&ctx, (uint8_t *)key); 3205 SipHash_Update(&ctx, &inc->inc_fport, sizeof(uint16_t)); 3206 SipHash_Update(&ctx, &inc->inc_lport, sizeof(uint16_t)); 3207 switch (inc->inc_flags & INC_ISIPV6) { 3208 #ifdef INET 3209 case 0: 3210 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(struct in_addr)); 3211 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(struct in_addr)); 3212 break; 3213 #endif 3214 #ifdef INET6 3215 case INC_ISIPV6: 3216 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(struct in6_addr)); 3217 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(struct in6_addr)); 3218 break; 3219 #endif 3220 } 3221 SipHash_Final((uint8_t *)hash, &ctx); 3222 3223 return (hash[0] ^ hash[1]); 3224 } 3225 3226 uint32_t 3227 tcp_new_ts_offset(struct in_conninfo *inc) 3228 { 3229 struct in_conninfo inc_store, *local_inc; 3230 3231 if (!V_tcp_ts_offset_per_conn) { 3232 memcpy(&inc_store, inc, sizeof(struct in_conninfo)); 3233 inc_store.inc_lport = 0; 3234 inc_store.inc_fport = 0; 3235 local_inc = &inc_store; 3236 } else { 3237 local_inc = inc; 3238 } 3239 return (tcp_keyed_hash(local_inc, V_ts_offset_secret, 3240 sizeof(V_ts_offset_secret))); 3241 } 3242 3243 /* 3244 * Following is where TCP initial sequence number generation occurs. 3245 * 3246 * There are two places where we must use initial sequence numbers: 3247 * 1. In SYN-ACK packets. 3248 * 2. In SYN packets. 3249 * 3250 * All ISNs for SYN-ACK packets are generated by the syncache. See 3251 * tcp_syncache.c for details. 3252 * 3253 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 3254 * depends on this property. In addition, these ISNs should be 3255 * unguessable so as to prevent connection hijacking. To satisfy 3256 * the requirements of this situation, the algorithm outlined in 3257 * RFC 1948 is used, with only small modifications. 3258 * 3259 * Implementation details: 3260 * 3261 * Time is based off the system timer, and is corrected so that it 3262 * increases by one megabyte per second. This allows for proper 3263 * recycling on high speed LANs while still leaving over an hour 3264 * before rollover. 3265 * 3266 * As reading the *exact* system time is too expensive to be done 3267 * whenever setting up a TCP connection, we increment the time 3268 * offset in two ways. First, a small random positive increment 3269 * is added to isn_offset for each connection that is set up. 3270 * Second, the function tcp_isn_tick fires once per clock tick 3271 * and increments isn_offset as necessary so that sequence numbers 3272 * are incremented at approximately ISN_BYTES_PER_SECOND. The 3273 * random positive increments serve only to ensure that the same 3274 * exact sequence number is never sent out twice (as could otherwise 3275 * happen when a port is recycled in less than the system tick 3276 * interval.) 3277 * 3278 * net.inet.tcp.isn_reseed_interval controls the number of seconds 3279 * between seeding of isn_secret. This is normally set to zero, 3280 * as reseeding should not be necessary. 3281 * 3282 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset, 3283 * isn_offset_old, and isn_ctx is performed using the ISN lock. In 3284 * general, this means holding an exclusive (write) lock. 3285 */ 3286 3287 #define ISN_BYTES_PER_SECOND 1048576 3288 #define ISN_STATIC_INCREMENT 4096 3289 #define ISN_RANDOM_INCREMENT (4096 - 1) 3290 #define ISN_SECRET_LENGTH SIPHASH_KEY_LENGTH 3291 3292 VNET_DEFINE_STATIC(u_char, isn_secret[ISN_SECRET_LENGTH]); 3293 VNET_DEFINE_STATIC(int, isn_last); 3294 VNET_DEFINE_STATIC(int, isn_last_reseed); 3295 VNET_DEFINE_STATIC(u_int32_t, isn_offset); 3296 VNET_DEFINE_STATIC(u_int32_t, isn_offset_old); 3297 3298 #define V_isn_secret VNET(isn_secret) 3299 #define V_isn_last VNET(isn_last) 3300 #define V_isn_last_reseed VNET(isn_last_reseed) 3301 #define V_isn_offset VNET(isn_offset) 3302 #define V_isn_offset_old VNET(isn_offset_old) 3303 3304 tcp_seq 3305 tcp_new_isn(struct in_conninfo *inc) 3306 { 3307 tcp_seq new_isn; 3308 u_int32_t projected_offset; 3309 3310 ISN_LOCK(); 3311 /* Seed if this is the first use, reseed if requested. */ 3312 if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) && 3313 (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz) 3314 < (u_int)ticks))) { 3315 arc4rand(&V_isn_secret, sizeof(V_isn_secret), 0); 3316 V_isn_last_reseed = ticks; 3317 } 3318 3319 /* Compute the hash and return the ISN. */ 3320 new_isn = (tcp_seq)tcp_keyed_hash(inc, V_isn_secret, 3321 sizeof(V_isn_secret)); 3322 V_isn_offset += ISN_STATIC_INCREMENT + 3323 (arc4random() & ISN_RANDOM_INCREMENT); 3324 if (ticks != V_isn_last) { 3325 projected_offset = V_isn_offset_old + 3326 ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last); 3327 if (SEQ_GT(projected_offset, V_isn_offset)) 3328 V_isn_offset = projected_offset; 3329 V_isn_offset_old = V_isn_offset; 3330 V_isn_last = ticks; 3331 } 3332 new_isn += V_isn_offset; 3333 ISN_UNLOCK(); 3334 return (new_isn); 3335 } 3336 3337 /* 3338 * When a specific ICMP unreachable message is received and the 3339 * connection state is SYN-SENT, drop the connection. This behavior 3340 * is controlled by the icmp_may_rst sysctl. 3341 */ 3342 struct inpcb * 3343 tcp_drop_syn_sent(struct inpcb *inp, int errno) 3344 { 3345 struct tcpcb *tp; 3346 3347 NET_EPOCH_ASSERT(); 3348 INP_WLOCK_ASSERT(inp); 3349 3350 if ((inp->inp_flags & INP_TIMEWAIT) || 3351 (inp->inp_flags & INP_DROPPED)) 3352 return (inp); 3353 3354 tp = intotcpcb(inp); 3355 if (tp->t_state != TCPS_SYN_SENT) 3356 return (inp); 3357 3358 if (IS_FASTOPEN(tp->t_flags)) 3359 tcp_fastopen_disable_path(tp); 3360 3361 tp = tcp_drop(tp, errno); 3362 if (tp != NULL) 3363 return (inp); 3364 else 3365 return (NULL); 3366 } 3367 3368 /* 3369 * When `need fragmentation' ICMP is received, update our idea of the MSS 3370 * based on the new value. Also nudge TCP to send something, since we 3371 * know the packet we just sent was dropped. 3372 * This duplicates some code in the tcp_mss() function in tcp_input.c. 3373 */ 3374 static struct inpcb * 3375 tcp_mtudisc_notify(struct inpcb *inp, int error) 3376 { 3377 3378 return (tcp_mtudisc(inp, -1)); 3379 } 3380 3381 static struct inpcb * 3382 tcp_mtudisc(struct inpcb *inp, int mtuoffer) 3383 { 3384 struct tcpcb *tp; 3385 struct socket *so; 3386 3387 INP_WLOCK_ASSERT(inp); 3388 if ((inp->inp_flags & INP_TIMEWAIT) || 3389 (inp->inp_flags & INP_DROPPED)) 3390 return (inp); 3391 3392 tp = intotcpcb(inp); 3393 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL")); 3394 3395 tcp_mss_update(tp, -1, mtuoffer, NULL, NULL); 3396 3397 so = inp->inp_socket; 3398 SOCKBUF_LOCK(&so->so_snd); 3399 /* If the mss is larger than the socket buffer, decrease the mss. */ 3400 if (so->so_snd.sb_hiwat < tp->t_maxseg) 3401 tp->t_maxseg = so->so_snd.sb_hiwat; 3402 SOCKBUF_UNLOCK(&so->so_snd); 3403 3404 TCPSTAT_INC(tcps_mturesent); 3405 tp->t_rtttime = 0; 3406 tp->snd_nxt = tp->snd_una; 3407 tcp_free_sackholes(tp); 3408 tp->snd_recover = tp->snd_max; 3409 if (tp->t_flags & TF_SACK_PERMIT) 3410 EXIT_FASTRECOVERY(tp->t_flags); 3411 if (tp->t_fb->tfb_tcp_mtu_chg != NULL) { 3412 /* 3413 * Conceptually the snd_nxt setting 3414 * and freeing sack holes should 3415 * be done by the default stacks 3416 * own tfb_tcp_mtu_chg(). 3417 */ 3418 tp->t_fb->tfb_tcp_mtu_chg(tp); 3419 } 3420 if (tcp_output(tp) < 0) 3421 return (NULL); 3422 else 3423 return (inp); 3424 } 3425 3426 #ifdef INET 3427 /* 3428 * Look-up the routing entry to the peer of this inpcb. If no route 3429 * is found and it cannot be allocated, then return 0. This routine 3430 * is called by TCP routines that access the rmx structure and by 3431 * tcp_mss_update to get the peer/interface MTU. 3432 */ 3433 uint32_t 3434 tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap) 3435 { 3436 struct nhop_object *nh; 3437 struct ifnet *ifp; 3438 uint32_t maxmtu = 0; 3439 3440 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer")); 3441 3442 if (inc->inc_faddr.s_addr != INADDR_ANY) { 3443 nh = fib4_lookup(inc->inc_fibnum, inc->inc_faddr, 0, NHR_NONE, 0); 3444 if (nh == NULL) 3445 return (0); 3446 3447 ifp = nh->nh_ifp; 3448 maxmtu = nh->nh_mtu; 3449 3450 /* Report additional interface capabilities. */ 3451 if (cap != NULL) { 3452 if (ifp->if_capenable & IFCAP_TSO4 && 3453 ifp->if_hwassist & CSUM_TSO) { 3454 cap->ifcap |= CSUM_TSO; 3455 cap->tsomax = ifp->if_hw_tsomax; 3456 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; 3457 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; 3458 } 3459 } 3460 } 3461 return (maxmtu); 3462 } 3463 #endif /* INET */ 3464 3465 #ifdef INET6 3466 uint32_t 3467 tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap) 3468 { 3469 struct nhop_object *nh; 3470 struct in6_addr dst6; 3471 uint32_t scopeid; 3472 struct ifnet *ifp; 3473 uint32_t maxmtu = 0; 3474 3475 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer")); 3476 3477 if (inc->inc_flags & INC_IPV6MINMTU) 3478 return (IPV6_MMTU); 3479 3480 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 3481 in6_splitscope(&inc->inc6_faddr, &dst6, &scopeid); 3482 nh = fib6_lookup(inc->inc_fibnum, &dst6, scopeid, NHR_NONE, 0); 3483 if (nh == NULL) 3484 return (0); 3485 3486 ifp = nh->nh_ifp; 3487 maxmtu = nh->nh_mtu; 3488 3489 /* Report additional interface capabilities. */ 3490 if (cap != NULL) { 3491 if (ifp->if_capenable & IFCAP_TSO6 && 3492 ifp->if_hwassist & CSUM_TSO) { 3493 cap->ifcap |= CSUM_TSO; 3494 cap->tsomax = ifp->if_hw_tsomax; 3495 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; 3496 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; 3497 } 3498 } 3499 } 3500 3501 return (maxmtu); 3502 } 3503 3504 /* 3505 * Handle setsockopt(IPV6_USE_MIN_MTU) by a TCP stack. 3506 * 3507 * XXXGL: we are updating inpcb here with INC_IPV6MINMTU flag. 3508 * The right place to do that is ip6_setpktopt() that has just been 3509 * executed. By the way it just filled ip6po_minmtu for us. 3510 */ 3511 void 3512 tcp6_use_min_mtu(struct tcpcb *tp) 3513 { 3514 struct inpcb *inp = tp->t_inpcb; 3515 3516 INP_WLOCK_ASSERT(inp); 3517 /* 3518 * In case of the IPV6_USE_MIN_MTU socket 3519 * option, the INC_IPV6MINMTU flag to announce 3520 * a corresponding MSS during the initial 3521 * handshake. If the TCP connection is not in 3522 * the front states, just reduce the MSS being 3523 * used. This avoids the sending of TCP 3524 * segments which will be fragmented at the 3525 * IPv6 layer. 3526 */ 3527 inp->inp_inc.inc_flags |= INC_IPV6MINMTU; 3528 if ((tp->t_state >= TCPS_SYN_SENT) && 3529 (inp->inp_inc.inc_flags & INC_ISIPV6)) { 3530 struct ip6_pktopts *opt; 3531 3532 opt = inp->in6p_outputopts; 3533 if (opt != NULL && opt->ip6po_minmtu == IP6PO_MINMTU_ALL && 3534 tp->t_maxseg > TCP6_MSS) 3535 tp->t_maxseg = TCP6_MSS; 3536 } 3537 } 3538 #endif /* INET6 */ 3539 3540 /* 3541 * Calculate effective SMSS per RFC5681 definition for a given TCP 3542 * connection at its current state, taking into account SACK and etc. 3543 */ 3544 u_int 3545 tcp_maxseg(const struct tcpcb *tp) 3546 { 3547 u_int optlen; 3548 3549 if (tp->t_flags & TF_NOOPT) 3550 return (tp->t_maxseg); 3551 3552 /* 3553 * Here we have a simplified code from tcp_addoptions(), 3554 * without a proper loop, and having most of paddings hardcoded. 3555 * We might make mistakes with padding here in some edge cases, 3556 * but this is harmless, since result of tcp_maxseg() is used 3557 * only in cwnd and ssthresh estimations. 3558 */ 3559 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 3560 if (tp->t_flags & TF_RCVD_TSTMP) 3561 optlen = TCPOLEN_TSTAMP_APPA; 3562 else 3563 optlen = 0; 3564 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3565 if (tp->t_flags & TF_SIGNATURE) 3566 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE); 3567 #endif 3568 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0) { 3569 optlen += TCPOLEN_SACKHDR; 3570 optlen += tp->rcv_numsacks * TCPOLEN_SACK; 3571 optlen = PADTCPOLEN(optlen); 3572 } 3573 } else { 3574 if (tp->t_flags & TF_REQ_TSTMP) 3575 optlen = TCPOLEN_TSTAMP_APPA; 3576 else 3577 optlen = PADTCPOLEN(TCPOLEN_MAXSEG); 3578 if (tp->t_flags & TF_REQ_SCALE) 3579 optlen += PADTCPOLEN(TCPOLEN_WINDOW); 3580 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3581 if (tp->t_flags & TF_SIGNATURE) 3582 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE); 3583 #endif 3584 if (tp->t_flags & TF_SACK_PERMIT) 3585 optlen += PADTCPOLEN(TCPOLEN_SACK_PERMITTED); 3586 } 3587 #undef PAD 3588 optlen = min(optlen, TCP_MAXOLEN); 3589 return (tp->t_maxseg - optlen); 3590 } 3591 3592 3593 u_int 3594 tcp_fixed_maxseg(const struct tcpcb *tp) 3595 { 3596 int optlen; 3597 3598 if (tp->t_flags & TF_NOOPT) 3599 return (tp->t_maxseg); 3600 3601 /* 3602 * Here we have a simplified code from tcp_addoptions(), 3603 * without a proper loop, and having most of paddings hardcoded. 3604 * We only consider fixed options that we would send every 3605 * time I.e. SACK is not considered. This is important 3606 * for cc modules to figure out what the modulo of the 3607 * cwnd should be. 3608 */ 3609 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4) 3610 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 3611 if (tp->t_flags & TF_RCVD_TSTMP) 3612 optlen = TCPOLEN_TSTAMP_APPA; 3613 else 3614 optlen = 0; 3615 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3616 if (tp->t_flags & TF_SIGNATURE) 3617 optlen += PAD(TCPOLEN_SIGNATURE); 3618 #endif 3619 } else { 3620 if (tp->t_flags & TF_REQ_TSTMP) 3621 optlen = TCPOLEN_TSTAMP_APPA; 3622 else 3623 optlen = PAD(TCPOLEN_MAXSEG); 3624 if (tp->t_flags & TF_REQ_SCALE) 3625 optlen += PAD(TCPOLEN_WINDOW); 3626 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3627 if (tp->t_flags & TF_SIGNATURE) 3628 optlen += PAD(TCPOLEN_SIGNATURE); 3629 #endif 3630 if (tp->t_flags & TF_SACK_PERMIT) 3631 optlen += PAD(TCPOLEN_SACK_PERMITTED); 3632 } 3633 #undef PAD 3634 optlen = min(optlen, TCP_MAXOLEN); 3635 return (tp->t_maxseg - optlen); 3636 } 3637 3638 3639 3640 static int 3641 sysctl_drop(SYSCTL_HANDLER_ARGS) 3642 { 3643 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 3644 struct sockaddr_storage addrs[2]; 3645 struct inpcb *inp; 3646 struct tcpcb *tp; 3647 struct tcptw *tw; 3648 #ifdef INET 3649 struct sockaddr_in *fin = NULL, *lin = NULL; 3650 #endif 3651 struct epoch_tracker et; 3652 #ifdef INET6 3653 struct sockaddr_in6 *fin6, *lin6; 3654 #endif 3655 int error; 3656 3657 inp = NULL; 3658 #ifdef INET6 3659 fin6 = lin6 = NULL; 3660 #endif 3661 error = 0; 3662 3663 if (req->oldptr != NULL || req->oldlen != 0) 3664 return (EINVAL); 3665 if (req->newptr == NULL) 3666 return (EPERM); 3667 if (req->newlen < sizeof(addrs)) 3668 return (ENOMEM); 3669 error = SYSCTL_IN(req, &addrs, sizeof(addrs)); 3670 if (error) 3671 return (error); 3672 3673 switch (addrs[0].ss_family) { 3674 #ifdef INET6 3675 case AF_INET6: 3676 fin6 = (struct sockaddr_in6 *)&addrs[0]; 3677 lin6 = (struct sockaddr_in6 *)&addrs[1]; 3678 if (fin6->sin6_len != sizeof(struct sockaddr_in6) || 3679 lin6->sin6_len != sizeof(struct sockaddr_in6)) 3680 return (EINVAL); 3681 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) { 3682 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) 3683 return (EINVAL); 3684 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]); 3685 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]); 3686 #ifdef INET 3687 fin = (struct sockaddr_in *)&addrs[0]; 3688 lin = (struct sockaddr_in *)&addrs[1]; 3689 #endif 3690 break; 3691 } 3692 error = sa6_embedscope(fin6, V_ip6_use_defzone); 3693 if (error) 3694 return (error); 3695 error = sa6_embedscope(lin6, V_ip6_use_defzone); 3696 if (error) 3697 return (error); 3698 break; 3699 #endif 3700 #ifdef INET 3701 case AF_INET: 3702 fin = (struct sockaddr_in *)&addrs[0]; 3703 lin = (struct sockaddr_in *)&addrs[1]; 3704 if (fin->sin_len != sizeof(struct sockaddr_in) || 3705 lin->sin_len != sizeof(struct sockaddr_in)) 3706 return (EINVAL); 3707 break; 3708 #endif 3709 default: 3710 return (EINVAL); 3711 } 3712 NET_EPOCH_ENTER(et); 3713 switch (addrs[0].ss_family) { 3714 #ifdef INET6 3715 case AF_INET6: 3716 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr, 3717 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port, 3718 INPLOOKUP_WLOCKPCB, NULL); 3719 break; 3720 #endif 3721 #ifdef INET 3722 case AF_INET: 3723 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port, 3724 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL); 3725 break; 3726 #endif 3727 } 3728 if (inp != NULL) { 3729 if (inp->inp_flags & INP_TIMEWAIT) { 3730 /* 3731 * XXXRW: There currently exists a state where an 3732 * inpcb is present, but its timewait state has been 3733 * discarded. For now, don't allow dropping of this 3734 * type of inpcb. 3735 */ 3736 tw = intotw(inp); 3737 if (tw != NULL) 3738 tcp_twclose(tw, 0); 3739 else 3740 INP_WUNLOCK(inp); 3741 } else if ((inp->inp_flags & INP_DROPPED) == 0 && 3742 !SOLISTENING(inp->inp_socket)) { 3743 tp = intotcpcb(inp); 3744 tp = tcp_drop(tp, ECONNABORTED); 3745 if (tp != NULL) 3746 INP_WUNLOCK(inp); 3747 } else 3748 INP_WUNLOCK(inp); 3749 } else 3750 error = ESRCH; 3751 NET_EPOCH_EXIT(et); 3752 return (error); 3753 } 3754 3755 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop, 3756 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3757 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_drop, "", 3758 "Drop TCP connection"); 3759 3760 static int 3761 tcp_sysctl_setsockopt(SYSCTL_HANDLER_ARGS) 3762 { 3763 return (sysctl_setsockopt(oidp, arg1, arg2, req, &V_tcbinfo, 3764 &tcp_ctloutput_set)); 3765 } 3766 3767 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, setsockopt, 3768 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3769 CTLFLAG_MPSAFE, NULL, 0, tcp_sysctl_setsockopt, "", 3770 "Set socket option for TCP endpoint"); 3771 3772 #ifdef KERN_TLS 3773 static int 3774 sysctl_switch_tls(SYSCTL_HANDLER_ARGS) 3775 { 3776 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 3777 struct sockaddr_storage addrs[2]; 3778 struct inpcb *inp; 3779 #ifdef INET 3780 struct sockaddr_in *fin = NULL, *lin = NULL; 3781 #endif 3782 struct epoch_tracker et; 3783 #ifdef INET6 3784 struct sockaddr_in6 *fin6, *lin6; 3785 #endif 3786 int error; 3787 3788 inp = NULL; 3789 #ifdef INET6 3790 fin6 = lin6 = NULL; 3791 #endif 3792 error = 0; 3793 3794 if (req->oldptr != NULL || req->oldlen != 0) 3795 return (EINVAL); 3796 if (req->newptr == NULL) 3797 return (EPERM); 3798 if (req->newlen < sizeof(addrs)) 3799 return (ENOMEM); 3800 error = SYSCTL_IN(req, &addrs, sizeof(addrs)); 3801 if (error) 3802 return (error); 3803 3804 switch (addrs[0].ss_family) { 3805 #ifdef INET6 3806 case AF_INET6: 3807 fin6 = (struct sockaddr_in6 *)&addrs[0]; 3808 lin6 = (struct sockaddr_in6 *)&addrs[1]; 3809 if (fin6->sin6_len != sizeof(struct sockaddr_in6) || 3810 lin6->sin6_len != sizeof(struct sockaddr_in6)) 3811 return (EINVAL); 3812 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) { 3813 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) 3814 return (EINVAL); 3815 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]); 3816 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]); 3817 #ifdef INET 3818 fin = (struct sockaddr_in *)&addrs[0]; 3819 lin = (struct sockaddr_in *)&addrs[1]; 3820 #endif 3821 break; 3822 } 3823 error = sa6_embedscope(fin6, V_ip6_use_defzone); 3824 if (error) 3825 return (error); 3826 error = sa6_embedscope(lin6, V_ip6_use_defzone); 3827 if (error) 3828 return (error); 3829 break; 3830 #endif 3831 #ifdef INET 3832 case AF_INET: 3833 fin = (struct sockaddr_in *)&addrs[0]; 3834 lin = (struct sockaddr_in *)&addrs[1]; 3835 if (fin->sin_len != sizeof(struct sockaddr_in) || 3836 lin->sin_len != sizeof(struct sockaddr_in)) 3837 return (EINVAL); 3838 break; 3839 #endif 3840 default: 3841 return (EINVAL); 3842 } 3843 NET_EPOCH_ENTER(et); 3844 switch (addrs[0].ss_family) { 3845 #ifdef INET6 3846 case AF_INET6: 3847 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr, 3848 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port, 3849 INPLOOKUP_WLOCKPCB, NULL); 3850 break; 3851 #endif 3852 #ifdef INET 3853 case AF_INET: 3854 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port, 3855 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL); 3856 break; 3857 #endif 3858 } 3859 NET_EPOCH_EXIT(et); 3860 if (inp != NULL) { 3861 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) != 0 || 3862 inp->inp_socket == NULL) { 3863 error = ECONNRESET; 3864 INP_WUNLOCK(inp); 3865 } else { 3866 struct socket *so; 3867 3868 so = inp->inp_socket; 3869 soref(so); 3870 error = ktls_set_tx_mode(so, 3871 arg2 == 0 ? TCP_TLS_MODE_SW : TCP_TLS_MODE_IFNET); 3872 INP_WUNLOCK(inp); 3873 sorele(so); 3874 } 3875 } else 3876 error = ESRCH; 3877 return (error); 3878 } 3879 3880 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_sw_tls, 3881 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3882 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_switch_tls, "", 3883 "Switch TCP connection to SW TLS"); 3884 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_ifnet_tls, 3885 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3886 CTLFLAG_NEEDGIANT, NULL, 1, sysctl_switch_tls, "", 3887 "Switch TCP connection to ifnet TLS"); 3888 #endif 3889 3890 /* 3891 * Generate a standardized TCP log line for use throughout the 3892 * tcp subsystem. Memory allocation is done with M_NOWAIT to 3893 * allow use in the interrupt context. 3894 * 3895 * NB: The caller MUST free(s, M_TCPLOG) the returned string. 3896 * NB: The function may return NULL if memory allocation failed. 3897 * 3898 * Due to header inclusion and ordering limitations the struct ip 3899 * and ip6_hdr pointers have to be passed as void pointers. 3900 */ 3901 char * 3902 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr, 3903 const void *ip6hdr) 3904 { 3905 3906 /* Is logging enabled? */ 3907 if (V_tcp_log_in_vain == 0) 3908 return (NULL); 3909 3910 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr)); 3911 } 3912 3913 char * 3914 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr, 3915 const void *ip6hdr) 3916 { 3917 3918 /* Is logging enabled? */ 3919 if (tcp_log_debug == 0) 3920 return (NULL); 3921 3922 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr)); 3923 } 3924 3925 static char * 3926 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr, 3927 const void *ip6hdr) 3928 { 3929 char *s, *sp; 3930 size_t size; 3931 #ifdef INET 3932 const struct ip *ip = (const struct ip *)ip4hdr; 3933 #endif 3934 #ifdef INET6 3935 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)ip6hdr; 3936 #endif /* INET6 */ 3937 3938 /* 3939 * The log line looks like this: 3940 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>" 3941 */ 3942 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") + 3943 sizeof(PRINT_TH_FLAGS) + 1 + 3944 #ifdef INET6 3945 2 * INET6_ADDRSTRLEN; 3946 #else 3947 2 * INET_ADDRSTRLEN; 3948 #endif /* INET6 */ 3949 3950 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT); 3951 if (s == NULL) 3952 return (NULL); 3953 3954 strcat(s, "TCP: ["); 3955 sp = s + strlen(s); 3956 3957 if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) { 3958 inet_ntoa_r(inc->inc_faddr, sp); 3959 sp = s + strlen(s); 3960 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport)); 3961 sp = s + strlen(s); 3962 inet_ntoa_r(inc->inc_laddr, sp); 3963 sp = s + strlen(s); 3964 sprintf(sp, "]:%i", ntohs(inc->inc_lport)); 3965 #ifdef INET6 3966 } else if (inc) { 3967 ip6_sprintf(sp, &inc->inc6_faddr); 3968 sp = s + strlen(s); 3969 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport)); 3970 sp = s + strlen(s); 3971 ip6_sprintf(sp, &inc->inc6_laddr); 3972 sp = s + strlen(s); 3973 sprintf(sp, "]:%i", ntohs(inc->inc_lport)); 3974 } else if (ip6 && th) { 3975 ip6_sprintf(sp, &ip6->ip6_src); 3976 sp = s + strlen(s); 3977 sprintf(sp, "]:%i to [", ntohs(th->th_sport)); 3978 sp = s + strlen(s); 3979 ip6_sprintf(sp, &ip6->ip6_dst); 3980 sp = s + strlen(s); 3981 sprintf(sp, "]:%i", ntohs(th->th_dport)); 3982 #endif /* INET6 */ 3983 #ifdef INET 3984 } else if (ip && th) { 3985 inet_ntoa_r(ip->ip_src, sp); 3986 sp = s + strlen(s); 3987 sprintf(sp, "]:%i to [", ntohs(th->th_sport)); 3988 sp = s + strlen(s); 3989 inet_ntoa_r(ip->ip_dst, sp); 3990 sp = s + strlen(s); 3991 sprintf(sp, "]:%i", ntohs(th->th_dport)); 3992 #endif /* INET */ 3993 } else { 3994 free(s, M_TCPLOG); 3995 return (NULL); 3996 } 3997 sp = s + strlen(s); 3998 if (th) 3999 sprintf(sp, " tcpflags 0x%b", tcp_get_flags(th), PRINT_TH_FLAGS); 4000 if (*(s + size - 1) != '\0') 4001 panic("%s: string too long", __func__); 4002 return (s); 4003 } 4004 4005 /* 4006 * A subroutine which makes it easy to track TCP state changes with DTrace. 4007 * This function shouldn't be called for t_state initializations that don't 4008 * correspond to actual TCP state transitions. 4009 */ 4010 void 4011 tcp_state_change(struct tcpcb *tp, int newstate) 4012 { 4013 #if defined(KDTRACE_HOOKS) 4014 int pstate = tp->t_state; 4015 #endif 4016 4017 TCPSTATES_DEC(tp->t_state); 4018 TCPSTATES_INC(newstate); 4019 tp->t_state = newstate; 4020 TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, pstate); 4021 } 4022 4023 /* 4024 * Create an external-format (``xtcpcb'') structure using the information in 4025 * the kernel-format tcpcb structure pointed to by tp. This is done to 4026 * reduce the spew of irrelevant information over this interface, to isolate 4027 * user code from changes in the kernel structure, and potentially to provide 4028 * information-hiding if we decide that some of this information should be 4029 * hidden from users. 4030 */ 4031 void 4032 tcp_inptoxtp(const struct inpcb *inp, struct xtcpcb *xt) 4033 { 4034 struct tcpcb *tp = intotcpcb(inp); 4035 struct tcptw *tw = intotw(inp); 4036 sbintime_t now; 4037 4038 bzero(xt, sizeof(*xt)); 4039 if (inp->inp_flags & INP_TIMEWAIT) { 4040 xt->t_state = TCPS_TIME_WAIT; 4041 xt->xt_encaps_port = tw->t_port; 4042 } else { 4043 xt->t_state = tp->t_state; 4044 xt->t_logstate = tp->t_logstate; 4045 xt->t_flags = tp->t_flags; 4046 xt->t_sndzerowin = tp->t_sndzerowin; 4047 xt->t_sndrexmitpack = tp->t_sndrexmitpack; 4048 xt->t_rcvoopack = tp->t_rcvoopack; 4049 xt->t_rcv_wnd = tp->rcv_wnd; 4050 xt->t_snd_wnd = tp->snd_wnd; 4051 xt->t_snd_cwnd = tp->snd_cwnd; 4052 xt->t_snd_ssthresh = tp->snd_ssthresh; 4053 xt->t_dsack_bytes = tp->t_dsack_bytes; 4054 xt->t_dsack_tlp_bytes = tp->t_dsack_tlp_bytes; 4055 xt->t_dsack_pack = tp->t_dsack_pack; 4056 xt->t_maxseg = tp->t_maxseg; 4057 xt->xt_ecn = (tp->t_flags2 & TF2_ECN_PERMIT) ? 1 : 0 + 4058 (tp->t_flags2 & TF2_ACE_PERMIT) ? 2 : 0; 4059 4060 now = getsbinuptime(); 4061 #define COPYTIMER(ttt) do { \ 4062 if (callout_active(&tp->t_timers->ttt)) \ 4063 xt->ttt = (tp->t_timers->ttt.c_time - now) / \ 4064 SBT_1MS; \ 4065 else \ 4066 xt->ttt = 0; \ 4067 } while (0) 4068 COPYTIMER(tt_delack); 4069 COPYTIMER(tt_rexmt); 4070 COPYTIMER(tt_persist); 4071 COPYTIMER(tt_keep); 4072 COPYTIMER(tt_2msl); 4073 #undef COPYTIMER 4074 xt->t_rcvtime = 1000 * (ticks - tp->t_rcvtime) / hz; 4075 4076 xt->xt_encaps_port = tp->t_port; 4077 bcopy(tp->t_fb->tfb_tcp_block_name, xt->xt_stack, 4078 TCP_FUNCTION_NAME_LEN_MAX); 4079 bcopy(CC_ALGO(tp)->name, xt->xt_cc, 4080 TCP_CA_NAME_MAX); 4081 #ifdef TCP_BLACKBOX 4082 (void)tcp_log_get_id(tp, xt->xt_logid); 4083 #endif 4084 } 4085 4086 xt->xt_len = sizeof(struct xtcpcb); 4087 in_pcbtoxinpcb(inp, &xt->xt_inp); 4088 if (inp->inp_socket == NULL) 4089 xt->xt_inp.xi_socket.xso_protocol = IPPROTO_TCP; 4090 } 4091 4092 void 4093 tcp_log_end_status(struct tcpcb *tp, uint8_t status) 4094 { 4095 uint32_t bit, i; 4096 4097 if ((tp == NULL) || 4098 (status > TCP_EI_STATUS_MAX_VALUE) || 4099 (status == 0)) { 4100 /* Invalid */ 4101 return; 4102 } 4103 if (status > (sizeof(uint32_t) * 8)) { 4104 /* Should this be a KASSERT? */ 4105 return; 4106 } 4107 bit = 1U << (status - 1); 4108 if (bit & tp->t_end_info_status) { 4109 /* already logged */ 4110 return; 4111 } 4112 for (i = 0; i < TCP_END_BYTE_INFO; i++) { 4113 if (tp->t_end_info_bytes[i] == TCP_EI_EMPTY_SLOT) { 4114 tp->t_end_info_bytes[i] = status; 4115 tp->t_end_info_status |= bit; 4116 break; 4117 } 4118 } 4119 } 4120 4121 int 4122 tcp_can_enable_pacing(void) 4123 { 4124 4125 if ((tcp_pacing_limit == -1) || 4126 (tcp_pacing_limit > number_of_tcp_connections_pacing)) { 4127 atomic_fetchadd_int(&number_of_tcp_connections_pacing, 1); 4128 shadow_num_connections = number_of_tcp_connections_pacing; 4129 return (1); 4130 } else { 4131 return (0); 4132 } 4133 } 4134 4135 static uint8_t tcp_pacing_warning = 0; 4136 4137 void 4138 tcp_decrement_paced_conn(void) 4139 { 4140 uint32_t ret; 4141 4142 ret = atomic_fetchadd_int(&number_of_tcp_connections_pacing, -1); 4143 shadow_num_connections = number_of_tcp_connections_pacing; 4144 KASSERT(ret != 0, ("tcp_paced_connection_exits -1 would cause wrap?")); 4145 if (ret == 0) { 4146 if (tcp_pacing_limit != -1) { 4147 printf("Warning all pacing is now disabled, count decrements invalidly!\n"); 4148 tcp_pacing_limit = 0; 4149 } else if (tcp_pacing_warning == 0) { 4150 printf("Warning pacing count is invalid, invalid decrement\n"); 4151 tcp_pacing_warning = 1; 4152 } 4153 } 4154 } 4155