1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_ipsec.h" 40 #include "opt_kern_tls.h" 41 #include "opt_tcpdebug.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/arb.h> 46 #include <sys/callout.h> 47 #include <sys/eventhandler.h> 48 #ifdef TCP_HHOOK 49 #include <sys/hhook.h> 50 #endif 51 #include <sys/kernel.h> 52 #ifdef TCP_HHOOK 53 #include <sys/khelp.h> 54 #endif 55 #ifdef KERN_TLS 56 #include <sys/ktls.h> 57 #endif 58 #include <sys/qmath.h> 59 #include <sys/stats.h> 60 #include <sys/sysctl.h> 61 #include <sys/jail.h> 62 #include <sys/malloc.h> 63 #include <sys/refcount.h> 64 #include <sys/mbuf.h> 65 #ifdef INET6 66 #include <sys/domain.h> 67 #endif 68 #include <sys/priv.h> 69 #include <sys/proc.h> 70 #include <sys/sdt.h> 71 #include <sys/socket.h> 72 #include <sys/socketvar.h> 73 #include <sys/protosw.h> 74 #include <sys/random.h> 75 76 #include <vm/uma.h> 77 78 #include <net/route.h> 79 #include <net/route/nhop.h> 80 #include <net/if.h> 81 #include <net/if_var.h> 82 #include <net/vnet.h> 83 84 #include <netinet/in.h> 85 #include <netinet/in_fib.h> 86 #include <netinet/in_kdtrace.h> 87 #include <netinet/in_pcb.h> 88 #include <netinet/in_systm.h> 89 #include <netinet/in_var.h> 90 #include <netinet/ip.h> 91 #include <netinet/ip_icmp.h> 92 #include <netinet/ip_var.h> 93 #ifdef INET6 94 #include <netinet/icmp6.h> 95 #include <netinet/ip6.h> 96 #include <netinet6/in6_fib.h> 97 #include <netinet6/in6_pcb.h> 98 #include <netinet6/ip6_var.h> 99 #include <netinet6/scope6_var.h> 100 #include <netinet6/nd6.h> 101 #endif 102 103 #include <netinet/tcp.h> 104 #include <netinet/tcp_fsm.h> 105 #include <netinet/tcp_seq.h> 106 #include <netinet/tcp_timer.h> 107 #include <netinet/tcp_var.h> 108 #include <netinet/tcp_log_buf.h> 109 #include <netinet/tcp_syncache.h> 110 #include <netinet/tcp_hpts.h> 111 #include <netinet/cc/cc.h> 112 #ifdef INET6 113 #include <netinet6/tcp6_var.h> 114 #endif 115 #include <netinet/tcpip.h> 116 #include <netinet/tcp_fastopen.h> 117 #ifdef TCPPCAP 118 #include <netinet/tcp_pcap.h> 119 #endif 120 #ifdef TCPDEBUG 121 #include <netinet/tcp_debug.h> 122 #endif 123 #ifdef INET6 124 #include <netinet6/ip6protosw.h> 125 #endif 126 #ifdef TCP_OFFLOAD 127 #include <netinet/tcp_offload.h> 128 #endif 129 #include <netinet/udp.h> 130 #include <netinet/udp_var.h> 131 132 #include <netipsec/ipsec_support.h> 133 134 #include <machine/in_cksum.h> 135 #include <crypto/siphash/siphash.h> 136 137 #include <security/mac/mac_framework.h> 138 139 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS; 140 #ifdef INET6 141 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS; 142 #endif 143 144 #ifdef NETFLIX_EXP_DETECTION 145 /* Sack attack detection thresholds and such */ 146 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack_attack, 147 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 148 "Sack Attack detection thresholds"); 149 int32_t tcp_force_detection = 0; 150 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, force_detection, 151 CTLFLAG_RW, 152 &tcp_force_detection, 0, 153 "Do we force detection even if the INP has it off?"); 154 int32_t tcp_sack_to_ack_thresh = 700; /* 70 % */ 155 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sack_to_ack_thresh, 156 CTLFLAG_RW, 157 &tcp_sack_to_ack_thresh, 700, 158 "Percentage of sacks to acks we must see above (10.1 percent is 101)?"); 159 int32_t tcp_sack_to_move_thresh = 600; /* 60 % */ 160 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, move_thresh, 161 CTLFLAG_RW, 162 &tcp_sack_to_move_thresh, 600, 163 "Percentage of sack moves we must see above (10.1 percent is 101)"); 164 int32_t tcp_restoral_thresh = 650; /* 65 % (sack:2:ack -5%) */ 165 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, restore_thresh, 166 CTLFLAG_RW, 167 &tcp_restoral_thresh, 550, 168 "Percentage of sack to ack percentage we must see below to restore(10.1 percent is 101)"); 169 int32_t tcp_sad_decay_val = 800; 170 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, decay_per, 171 CTLFLAG_RW, 172 &tcp_sad_decay_val, 800, 173 "The decay percentage (10.1 percent equals 101 )"); 174 int32_t tcp_map_minimum = 500; 175 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, nummaps, 176 CTLFLAG_RW, 177 &tcp_map_minimum, 500, 178 "Number of Map enteries before we start detection"); 179 int32_t tcp_attack_on_turns_on_logging = 0; 180 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, attacks_logged, 181 CTLFLAG_RW, 182 &tcp_attack_on_turns_on_logging, 0, 183 "When we have a positive hit on attack, do we turn on logging?"); 184 int32_t tcp_sad_pacing_interval = 2000; 185 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_pacing_int, 186 CTLFLAG_RW, 187 &tcp_sad_pacing_interval, 2000, 188 "What is the minimum pacing interval for a classified attacker?"); 189 190 int32_t tcp_sad_low_pps = 100; 191 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_low_pps, 192 CTLFLAG_RW, 193 &tcp_sad_low_pps, 100, 194 "What is the input pps that below which we do not decay?"); 195 #endif 196 uint32_t tcp_ack_war_time_window = 1000; 197 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_timewindow, 198 CTLFLAG_RW, 199 &tcp_ack_war_time_window, 1000, 200 "If the tcp_stack does ack-war prevention how many milliseconds are in its time window?"); 201 uint32_t tcp_ack_war_cnt = 5; 202 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_cnt, 203 CTLFLAG_RW, 204 &tcp_ack_war_cnt, 5, 205 "If the tcp_stack does ack-war prevention how many acks can be sent in its time window?"); 206 207 struct rwlock tcp_function_lock; 208 209 static int 210 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS) 211 { 212 int error, new; 213 214 new = V_tcp_mssdflt; 215 error = sysctl_handle_int(oidp, &new, 0, req); 216 if (error == 0 && req->newptr) { 217 if (new < TCP_MINMSS) 218 error = EINVAL; 219 else 220 V_tcp_mssdflt = new; 221 } 222 return (error); 223 } 224 225 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, 226 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 227 &VNET_NAME(tcp_mssdflt), 0, &sysctl_net_inet_tcp_mss_check, "I", 228 "Default TCP Maximum Segment Size"); 229 230 #ifdef INET6 231 static int 232 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS) 233 { 234 int error, new; 235 236 new = V_tcp_v6mssdflt; 237 error = sysctl_handle_int(oidp, &new, 0, req); 238 if (error == 0 && req->newptr) { 239 if (new < TCP_MINMSS) 240 error = EINVAL; 241 else 242 V_tcp_v6mssdflt = new; 243 } 244 return (error); 245 } 246 247 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 248 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 249 &VNET_NAME(tcp_v6mssdflt), 0, &sysctl_net_inet_tcp_mss_v6_check, "I", 250 "Default TCP Maximum Segment Size for IPv6"); 251 #endif /* INET6 */ 252 253 /* 254 * Minimum MSS we accept and use. This prevents DoS attacks where 255 * we are forced to a ridiculous low MSS like 20 and send hundreds 256 * of packets instead of one. The effect scales with the available 257 * bandwidth and quickly saturates the CPU and network interface 258 * with packet generation and sending. Set to zero to disable MINMSS 259 * checking. This setting prevents us from sending too small packets. 260 */ 261 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS; 262 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_VNET | CTLFLAG_RW, 263 &VNET_NAME(tcp_minmss), 0, 264 "Minimum TCP Maximum Segment Size"); 265 266 VNET_DEFINE(int, tcp_do_rfc1323) = 1; 267 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_VNET | CTLFLAG_RW, 268 &VNET_NAME(tcp_do_rfc1323), 0, 269 "Enable rfc1323 (high performance TCP) extensions"); 270 271 /* 272 * As of June 2021, several TCP stacks violate RFC 7323 from September 2014. 273 * Some stacks negotiate TS, but never send them after connection setup. Some 274 * stacks negotiate TS, but don't send them when sending keep-alive segments. 275 * These include modern widely deployed TCP stacks. 276 * Therefore tolerating violations for now... 277 */ 278 VNET_DEFINE(int, tcp_tolerate_missing_ts) = 1; 279 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tolerate_missing_ts, CTLFLAG_VNET | CTLFLAG_RW, 280 &VNET_NAME(tcp_tolerate_missing_ts), 0, 281 "Tolerate missing TCP timestamps"); 282 283 VNET_DEFINE(int, tcp_ts_offset_per_conn) = 1; 284 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ts_offset_per_conn, CTLFLAG_VNET | CTLFLAG_RW, 285 &VNET_NAME(tcp_ts_offset_per_conn), 0, 286 "Initialize TCP timestamps per connection instead of per host pair"); 287 288 /* How many connections are pacing */ 289 static volatile uint32_t number_of_tcp_connections_pacing = 0; 290 static uint32_t shadow_num_connections = 0; 291 292 static int tcp_pacing_limit = 10000; 293 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pacing_limit, CTLFLAG_RW, 294 &tcp_pacing_limit, 1000, 295 "If the TCP stack does pacing, is there a limit (-1 = no, 0 = no pacing N = number of connections)"); 296 297 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pacing_count, CTLFLAG_RD, 298 &shadow_num_connections, 0, "Number of TCP connections being paced"); 299 300 static int tcp_log_debug = 0; 301 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW, 302 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments"); 303 304 static int tcp_tcbhashsize; 305 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 306 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 307 308 static int do_tcpdrain = 1; 309 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 310 "Enable tcp_drain routine for extra help when low on mbufs"); 311 312 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_VNET | CTLFLAG_RD, 313 &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs"); 314 315 VNET_DEFINE_STATIC(int, icmp_may_rst) = 1; 316 #define V_icmp_may_rst VNET(icmp_may_rst) 317 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_VNET | CTLFLAG_RW, 318 &VNET_NAME(icmp_may_rst), 0, 319 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 320 321 VNET_DEFINE_STATIC(int, tcp_isn_reseed_interval) = 0; 322 #define V_tcp_isn_reseed_interval VNET(tcp_isn_reseed_interval) 323 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_VNET | CTLFLAG_RW, 324 &VNET_NAME(tcp_isn_reseed_interval), 0, 325 "Seconds between reseeding of ISN secret"); 326 327 static int tcp_soreceive_stream; 328 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN, 329 &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets"); 330 331 VNET_DEFINE(uma_zone_t, sack_hole_zone); 332 #define V_sack_hole_zone VNET(sack_hole_zone) 333 VNET_DEFINE(uint32_t, tcp_map_entries_limit) = 0; /* unlimited */ 334 static int 335 sysctl_net_inet_tcp_map_limit_check(SYSCTL_HANDLER_ARGS) 336 { 337 int error; 338 uint32_t new; 339 340 new = V_tcp_map_entries_limit; 341 error = sysctl_handle_int(oidp, &new, 0, req); 342 if (error == 0 && req->newptr) { 343 /* only allow "0" and value > minimum */ 344 if (new > 0 && new < TCP_MIN_MAP_ENTRIES_LIMIT) 345 error = EINVAL; 346 else 347 V_tcp_map_entries_limit = new; 348 } 349 return (error); 350 } 351 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, map_limit, 352 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 353 &VNET_NAME(tcp_map_entries_limit), 0, 354 &sysctl_net_inet_tcp_map_limit_check, "IU", 355 "Total sendmap entries limit"); 356 357 VNET_DEFINE(uint32_t, tcp_map_split_limit) = 0; /* unlimited */ 358 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, split_limit, CTLFLAG_VNET | CTLFLAG_RW, 359 &VNET_NAME(tcp_map_split_limit), 0, 360 "Total sendmap split entries limit"); 361 362 #ifdef TCP_HHOOK 363 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]); 364 #endif 365 366 #define TS_OFFSET_SECRET_LENGTH SIPHASH_KEY_LENGTH 367 VNET_DEFINE_STATIC(u_char, ts_offset_secret[TS_OFFSET_SECRET_LENGTH]); 368 #define V_ts_offset_secret VNET(ts_offset_secret) 369 370 static int tcp_default_fb_init(struct tcpcb *tp); 371 static void tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged); 372 static int tcp_default_handoff_ok(struct tcpcb *tp); 373 static struct inpcb *tcp_notify(struct inpcb *, int); 374 static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int); 375 static void tcp_mtudisc(struct inpcb *, int); 376 static char * tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, 377 void *ip4hdr, const void *ip6hdr); 378 379 static struct tcp_function_block tcp_def_funcblk = { 380 .tfb_tcp_block_name = "freebsd", 381 .tfb_tcp_output = tcp_output, 382 .tfb_tcp_do_segment = tcp_do_segment, 383 .tfb_tcp_ctloutput = tcp_default_ctloutput, 384 .tfb_tcp_handoff_ok = tcp_default_handoff_ok, 385 .tfb_tcp_fb_init = tcp_default_fb_init, 386 .tfb_tcp_fb_fini = tcp_default_fb_fini, 387 }; 388 389 static int tcp_fb_cnt = 0; 390 struct tcp_funchead t_functions; 391 static struct tcp_function_block *tcp_func_set_ptr = &tcp_def_funcblk; 392 393 void 394 tcp_record_dsack(struct tcpcb *tp, tcp_seq start, tcp_seq end, int tlp) 395 { 396 TCPSTAT_INC(tcps_dsack_count); 397 tp->t_dsack_pack++; 398 if (tlp == 0) { 399 if (SEQ_GT(end, start)) { 400 tp->t_dsack_bytes += (end - start); 401 TCPSTAT_ADD(tcps_dsack_bytes, (end - start)); 402 } else { 403 tp->t_dsack_tlp_bytes += (start - end); 404 TCPSTAT_ADD(tcps_dsack_bytes, (start - end)); 405 } 406 } else { 407 if (SEQ_GT(end, start)) { 408 tp->t_dsack_bytes += (end - start); 409 TCPSTAT_ADD(tcps_dsack_tlp_bytes, (end - start)); 410 } else { 411 tp->t_dsack_tlp_bytes += (start - end); 412 TCPSTAT_ADD(tcps_dsack_tlp_bytes, (start - end)); 413 } 414 } 415 } 416 417 static struct tcp_function_block * 418 find_tcp_functions_locked(struct tcp_function_set *fs) 419 { 420 struct tcp_function *f; 421 struct tcp_function_block *blk=NULL; 422 423 TAILQ_FOREACH(f, &t_functions, tf_next) { 424 if (strcmp(f->tf_name, fs->function_set_name) == 0) { 425 blk = f->tf_fb; 426 break; 427 } 428 } 429 return(blk); 430 } 431 432 static struct tcp_function_block * 433 find_tcp_fb_locked(struct tcp_function_block *blk, struct tcp_function **s) 434 { 435 struct tcp_function_block *rblk=NULL; 436 struct tcp_function *f; 437 438 TAILQ_FOREACH(f, &t_functions, tf_next) { 439 if (f->tf_fb == blk) { 440 rblk = blk; 441 if (s) { 442 *s = f; 443 } 444 break; 445 } 446 } 447 return (rblk); 448 } 449 450 struct tcp_function_block * 451 find_and_ref_tcp_functions(struct tcp_function_set *fs) 452 { 453 struct tcp_function_block *blk; 454 455 rw_rlock(&tcp_function_lock); 456 blk = find_tcp_functions_locked(fs); 457 if (blk) 458 refcount_acquire(&blk->tfb_refcnt); 459 rw_runlock(&tcp_function_lock); 460 return(blk); 461 } 462 463 struct tcp_function_block * 464 find_and_ref_tcp_fb(struct tcp_function_block *blk) 465 { 466 struct tcp_function_block *rblk; 467 468 rw_rlock(&tcp_function_lock); 469 rblk = find_tcp_fb_locked(blk, NULL); 470 if (rblk) 471 refcount_acquire(&rblk->tfb_refcnt); 472 rw_runlock(&tcp_function_lock); 473 return(rblk); 474 } 475 476 /* Find a matching alias for the given tcp_function_block. */ 477 int 478 find_tcp_function_alias(struct tcp_function_block *blk, 479 struct tcp_function_set *fs) 480 { 481 struct tcp_function *f; 482 int found; 483 484 found = 0; 485 rw_rlock(&tcp_function_lock); 486 TAILQ_FOREACH(f, &t_functions, tf_next) { 487 if ((f->tf_fb == blk) && 488 (strncmp(f->tf_name, blk->tfb_tcp_block_name, 489 TCP_FUNCTION_NAME_LEN_MAX) != 0)) { 490 /* Matching function block with different name. */ 491 strncpy(fs->function_set_name, f->tf_name, 492 TCP_FUNCTION_NAME_LEN_MAX); 493 found = 1; 494 break; 495 } 496 } 497 /* Null terminate the string appropriately. */ 498 if (found) { 499 fs->function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0'; 500 } else { 501 fs->function_set_name[0] = '\0'; 502 } 503 rw_runlock(&tcp_function_lock); 504 return (found); 505 } 506 507 static struct tcp_function_block * 508 find_and_ref_tcp_default_fb(void) 509 { 510 struct tcp_function_block *rblk; 511 512 rw_rlock(&tcp_function_lock); 513 rblk = tcp_func_set_ptr; 514 refcount_acquire(&rblk->tfb_refcnt); 515 rw_runlock(&tcp_function_lock); 516 return (rblk); 517 } 518 519 void 520 tcp_switch_back_to_default(struct tcpcb *tp) 521 { 522 struct tcp_function_block *tfb; 523 524 KASSERT(tp->t_fb != &tcp_def_funcblk, 525 ("%s: called by the built-in default stack", __func__)); 526 527 /* 528 * Release the old stack. This function will either find a new one 529 * or panic. 530 */ 531 if (tp->t_fb->tfb_tcp_fb_fini != NULL) 532 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0); 533 refcount_release(&tp->t_fb->tfb_refcnt); 534 535 /* 536 * Now, we'll find a new function block to use. 537 * Start by trying the current user-selected 538 * default, unless this stack is the user-selected 539 * default. 540 */ 541 tfb = find_and_ref_tcp_default_fb(); 542 if (tfb == tp->t_fb) { 543 refcount_release(&tfb->tfb_refcnt); 544 tfb = NULL; 545 } 546 /* Does the stack accept this connection? */ 547 if (tfb != NULL && tfb->tfb_tcp_handoff_ok != NULL && 548 (*tfb->tfb_tcp_handoff_ok)(tp)) { 549 refcount_release(&tfb->tfb_refcnt); 550 tfb = NULL; 551 } 552 /* Try to use that stack. */ 553 if (tfb != NULL) { 554 /* Initialize the new stack. If it succeeds, we are done. */ 555 tp->t_fb = tfb; 556 if (tp->t_fb->tfb_tcp_fb_init == NULL || 557 (*tp->t_fb->tfb_tcp_fb_init)(tp) == 0) 558 return; 559 560 /* 561 * Initialization failed. Release the reference count on 562 * the stack. 563 */ 564 refcount_release(&tfb->tfb_refcnt); 565 } 566 567 /* 568 * If that wasn't feasible, use the built-in default 569 * stack which is not allowed to reject anyone. 570 */ 571 tfb = find_and_ref_tcp_fb(&tcp_def_funcblk); 572 if (tfb == NULL) { 573 /* there always should be a default */ 574 panic("Can't refer to tcp_def_funcblk"); 575 } 576 if (tfb->tfb_tcp_handoff_ok != NULL) { 577 if ((*tfb->tfb_tcp_handoff_ok) (tp)) { 578 /* The default stack cannot say no */ 579 panic("Default stack rejects a new session?"); 580 } 581 } 582 tp->t_fb = tfb; 583 if (tp->t_fb->tfb_tcp_fb_init != NULL && 584 (*tp->t_fb->tfb_tcp_fb_init)(tp)) { 585 /* The default stack cannot fail */ 586 panic("Default stack initialization failed"); 587 } 588 } 589 590 static void 591 tcp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 592 const struct sockaddr *sa, void *ctx) 593 { 594 struct ip *iph; 595 #ifdef INET6 596 struct ip6_hdr *ip6; 597 #endif 598 struct udphdr *uh; 599 struct tcphdr *th; 600 int thlen; 601 uint16_t port; 602 603 TCPSTAT_INC(tcps_tunneled_pkts); 604 if ((m->m_flags & M_PKTHDR) == 0) { 605 /* Can't handle one that is not a pkt hdr */ 606 TCPSTAT_INC(tcps_tunneled_errs); 607 goto out; 608 } 609 thlen = sizeof(struct tcphdr); 610 if (m->m_len < off + sizeof(struct udphdr) + thlen && 611 (m = m_pullup(m, off + sizeof(struct udphdr) + thlen)) == NULL) { 612 TCPSTAT_INC(tcps_tunneled_errs); 613 goto out; 614 } 615 iph = mtod(m, struct ip *); 616 uh = (struct udphdr *)((caddr_t)iph + off); 617 th = (struct tcphdr *)(uh + 1); 618 thlen = th->th_off << 2; 619 if (m->m_len < off + sizeof(struct udphdr) + thlen) { 620 m = m_pullup(m, off + sizeof(struct udphdr) + thlen); 621 if (m == NULL) { 622 TCPSTAT_INC(tcps_tunneled_errs); 623 goto out; 624 } else { 625 iph = mtod(m, struct ip *); 626 uh = (struct udphdr *)((caddr_t)iph + off); 627 th = (struct tcphdr *)(uh + 1); 628 } 629 } 630 m->m_pkthdr.tcp_tun_port = port = uh->uh_sport; 631 bcopy(th, uh, m->m_len - off); 632 m->m_len -= sizeof(struct udphdr); 633 m->m_pkthdr.len -= sizeof(struct udphdr); 634 /* 635 * We use the same algorithm for 636 * both UDP and TCP for c-sum. So 637 * the code in tcp_input will skip 638 * the checksum. So we do nothing 639 * with the flag (m->m_pkthdr.csum_flags). 640 */ 641 switch (iph->ip_v) { 642 #ifdef INET 643 case IPVERSION: 644 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 645 tcp_input_with_port(&m, &off, IPPROTO_TCP, port); 646 break; 647 #endif 648 #ifdef INET6 649 case IPV6_VERSION >> 4: 650 ip6 = mtod(m, struct ip6_hdr *); 651 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 652 tcp6_input_with_port(&m, &off, IPPROTO_TCP, port); 653 break; 654 #endif 655 default: 656 goto out; 657 break; 658 } 659 return; 660 out: 661 m_freem(m); 662 } 663 664 static int 665 sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS) 666 { 667 int error=ENOENT; 668 struct tcp_function_set fs; 669 struct tcp_function_block *blk; 670 671 memset(&fs, 0, sizeof(fs)); 672 rw_rlock(&tcp_function_lock); 673 blk = find_tcp_fb_locked(tcp_func_set_ptr, NULL); 674 if (blk) { 675 /* Found him */ 676 strcpy(fs.function_set_name, blk->tfb_tcp_block_name); 677 fs.pcbcnt = blk->tfb_refcnt; 678 } 679 rw_runlock(&tcp_function_lock); 680 error = sysctl_handle_string(oidp, fs.function_set_name, 681 sizeof(fs.function_set_name), req); 682 683 /* Check for error or no change */ 684 if (error != 0 || req->newptr == NULL) 685 return(error); 686 687 rw_wlock(&tcp_function_lock); 688 blk = find_tcp_functions_locked(&fs); 689 if ((blk == NULL) || 690 (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) { 691 error = ENOENT; 692 goto done; 693 } 694 tcp_func_set_ptr = blk; 695 done: 696 rw_wunlock(&tcp_function_lock); 697 return (error); 698 } 699 700 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_default, 701 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 702 NULL, 0, sysctl_net_inet_default_tcp_functions, "A", 703 "Set/get the default TCP functions"); 704 705 static int 706 sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS) 707 { 708 int error, cnt, linesz; 709 struct tcp_function *f; 710 char *buffer, *cp; 711 size_t bufsz, outsz; 712 bool alias; 713 714 cnt = 0; 715 rw_rlock(&tcp_function_lock); 716 TAILQ_FOREACH(f, &t_functions, tf_next) { 717 cnt++; 718 } 719 rw_runlock(&tcp_function_lock); 720 721 bufsz = (cnt+2) * ((TCP_FUNCTION_NAME_LEN_MAX * 2) + 13) + 1; 722 buffer = malloc(bufsz, M_TEMP, M_WAITOK); 723 724 error = 0; 725 cp = buffer; 726 727 linesz = snprintf(cp, bufsz, "\n%-32s%c %-32s %s\n", "Stack", 'D', 728 "Alias", "PCB count"); 729 cp += linesz; 730 bufsz -= linesz; 731 outsz = linesz; 732 733 rw_rlock(&tcp_function_lock); 734 TAILQ_FOREACH(f, &t_functions, tf_next) { 735 alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name); 736 linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n", 737 f->tf_fb->tfb_tcp_block_name, 738 (f->tf_fb == tcp_func_set_ptr) ? '*' : ' ', 739 alias ? f->tf_name : "-", 740 f->tf_fb->tfb_refcnt); 741 if (linesz >= bufsz) { 742 error = EOVERFLOW; 743 break; 744 } 745 cp += linesz; 746 bufsz -= linesz; 747 outsz += linesz; 748 } 749 rw_runlock(&tcp_function_lock); 750 if (error == 0) 751 error = sysctl_handle_string(oidp, buffer, outsz + 1, req); 752 free(buffer, M_TEMP); 753 return (error); 754 } 755 756 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_available, 757 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 758 NULL, 0, sysctl_net_inet_list_available, "A", 759 "list available TCP Function sets"); 760 761 VNET_DEFINE(int, tcp_udp_tunneling_port) = TCP_TUNNELING_PORT_DEFAULT; 762 763 #ifdef INET 764 VNET_DEFINE(struct socket *, udp4_tun_socket) = NULL; 765 #define V_udp4_tun_socket VNET(udp4_tun_socket) 766 #endif 767 #ifdef INET6 768 VNET_DEFINE(struct socket *, udp6_tun_socket) = NULL; 769 #define V_udp6_tun_socket VNET(udp6_tun_socket) 770 #endif 771 772 static void 773 tcp_over_udp_stop(void) 774 { 775 /* 776 * This function assumes sysctl caller holds inp_rinfo_lock() 777 * for writing! 778 */ 779 #ifdef INET 780 if (V_udp4_tun_socket != NULL) { 781 soclose(V_udp4_tun_socket); 782 V_udp4_tun_socket = NULL; 783 } 784 #endif 785 #ifdef INET6 786 if (V_udp6_tun_socket != NULL) { 787 soclose(V_udp6_tun_socket); 788 V_udp6_tun_socket = NULL; 789 } 790 #endif 791 } 792 793 static int 794 tcp_over_udp_start(void) 795 { 796 uint16_t port; 797 int ret; 798 #ifdef INET 799 struct sockaddr_in sin; 800 #endif 801 #ifdef INET6 802 struct sockaddr_in6 sin6; 803 #endif 804 /* 805 * This function assumes sysctl caller holds inp_info_rlock() 806 * for writing! 807 */ 808 port = V_tcp_udp_tunneling_port; 809 if (ntohs(port) == 0) { 810 /* Must have a port set */ 811 return (EINVAL); 812 } 813 #ifdef INET 814 if (V_udp4_tun_socket != NULL) { 815 /* Already running -- must stop first */ 816 return (EALREADY); 817 } 818 #endif 819 #ifdef INET6 820 if (V_udp6_tun_socket != NULL) { 821 /* Already running -- must stop first */ 822 return (EALREADY); 823 } 824 #endif 825 #ifdef INET 826 if ((ret = socreate(PF_INET, &V_udp4_tun_socket, 827 SOCK_DGRAM, IPPROTO_UDP, 828 curthread->td_ucred, curthread))) { 829 tcp_over_udp_stop(); 830 return (ret); 831 } 832 /* Call the special UDP hook. */ 833 if ((ret = udp_set_kernel_tunneling(V_udp4_tun_socket, 834 tcp_recv_udp_tunneled_packet, 835 tcp_ctlinput_viaudp, 836 NULL))) { 837 tcp_over_udp_stop(); 838 return (ret); 839 } 840 /* Ok, we have a socket, bind it to the port. */ 841 memset(&sin, 0, sizeof(struct sockaddr_in)); 842 sin.sin_len = sizeof(struct sockaddr_in); 843 sin.sin_family = AF_INET; 844 sin.sin_port = htons(port); 845 if ((ret = sobind(V_udp4_tun_socket, 846 (struct sockaddr *)&sin, curthread))) { 847 tcp_over_udp_stop(); 848 return (ret); 849 } 850 #endif 851 #ifdef INET6 852 if ((ret = socreate(PF_INET6, &V_udp6_tun_socket, 853 SOCK_DGRAM, IPPROTO_UDP, 854 curthread->td_ucred, curthread))) { 855 tcp_over_udp_stop(); 856 return (ret); 857 } 858 /* Call the special UDP hook. */ 859 if ((ret = udp_set_kernel_tunneling(V_udp6_tun_socket, 860 tcp_recv_udp_tunneled_packet, 861 tcp6_ctlinput_viaudp, 862 NULL))) { 863 tcp_over_udp_stop(); 864 return (ret); 865 } 866 /* Ok, we have a socket, bind it to the port. */ 867 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 868 sin6.sin6_len = sizeof(struct sockaddr_in6); 869 sin6.sin6_family = AF_INET6; 870 sin6.sin6_port = htons(port); 871 if ((ret = sobind(V_udp6_tun_socket, 872 (struct sockaddr *)&sin6, curthread))) { 873 tcp_over_udp_stop(); 874 return (ret); 875 } 876 #endif 877 return (0); 878 } 879 880 static int 881 sysctl_net_inet_tcp_udp_tunneling_port_check(SYSCTL_HANDLER_ARGS) 882 { 883 int error; 884 uint32_t old, new; 885 886 old = V_tcp_udp_tunneling_port; 887 new = old; 888 error = sysctl_handle_int(oidp, &new, 0, req); 889 if ((error == 0) && 890 (req->newptr != NULL)) { 891 if ((new < TCP_TUNNELING_PORT_MIN) || 892 (new > TCP_TUNNELING_PORT_MAX)) { 893 error = EINVAL; 894 } else { 895 V_tcp_udp_tunneling_port = new; 896 if (old != 0) { 897 tcp_over_udp_stop(); 898 } 899 if (new != 0) { 900 error = tcp_over_udp_start(); 901 } 902 } 903 } 904 return (error); 905 } 906 907 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_port, 908 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 909 &VNET_NAME(tcp_udp_tunneling_port), 910 0, &sysctl_net_inet_tcp_udp_tunneling_port_check, "IU", 911 "Tunneling port for tcp over udp"); 912 913 VNET_DEFINE(int, tcp_udp_tunneling_overhead) = TCP_TUNNELING_OVERHEAD_DEFAULT; 914 915 static int 916 sysctl_net_inet_tcp_udp_tunneling_overhead_check(SYSCTL_HANDLER_ARGS) 917 { 918 int error, new; 919 920 new = V_tcp_udp_tunneling_overhead; 921 error = sysctl_handle_int(oidp, &new, 0, req); 922 if (error == 0 && req->newptr) { 923 if ((new < TCP_TUNNELING_OVERHEAD_MIN) || 924 (new > TCP_TUNNELING_OVERHEAD_MAX)) 925 error = EINVAL; 926 else 927 V_tcp_udp_tunneling_overhead = new; 928 } 929 return (error); 930 } 931 932 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_overhead, 933 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 934 &VNET_NAME(tcp_udp_tunneling_overhead), 935 0, &sysctl_net_inet_tcp_udp_tunneling_overhead_check, "IU", 936 "MSS reduction when using tcp over udp"); 937 938 /* 939 * Exports one (struct tcp_function_info) for each alias/name. 940 */ 941 static int 942 sysctl_net_inet_list_func_info(SYSCTL_HANDLER_ARGS) 943 { 944 int cnt, error; 945 struct tcp_function *f; 946 struct tcp_function_info tfi; 947 948 /* 949 * We don't allow writes. 950 */ 951 if (req->newptr != NULL) 952 return (EINVAL); 953 954 /* 955 * Wire the old buffer so we can directly copy the functions to 956 * user space without dropping the lock. 957 */ 958 if (req->oldptr != NULL) { 959 error = sysctl_wire_old_buffer(req, 0); 960 if (error) 961 return (error); 962 } 963 964 /* 965 * Walk the list and copy out matching entries. If INVARIANTS 966 * is compiled in, also walk the list to verify the length of 967 * the list matches what we have recorded. 968 */ 969 rw_rlock(&tcp_function_lock); 970 971 cnt = 0; 972 #ifndef INVARIANTS 973 if (req->oldptr == NULL) { 974 cnt = tcp_fb_cnt; 975 goto skip_loop; 976 } 977 #endif 978 TAILQ_FOREACH(f, &t_functions, tf_next) { 979 #ifdef INVARIANTS 980 cnt++; 981 #endif 982 if (req->oldptr != NULL) { 983 bzero(&tfi, sizeof(tfi)); 984 tfi.tfi_refcnt = f->tf_fb->tfb_refcnt; 985 tfi.tfi_id = f->tf_fb->tfb_id; 986 (void)strlcpy(tfi.tfi_alias, f->tf_name, 987 sizeof(tfi.tfi_alias)); 988 (void)strlcpy(tfi.tfi_name, 989 f->tf_fb->tfb_tcp_block_name, sizeof(tfi.tfi_name)); 990 error = SYSCTL_OUT(req, &tfi, sizeof(tfi)); 991 /* 992 * Don't stop on error, as that is the 993 * mechanism we use to accumulate length 994 * information if the buffer was too short. 995 */ 996 } 997 } 998 KASSERT(cnt == tcp_fb_cnt, 999 ("%s: cnt (%d) != tcp_fb_cnt (%d)", __func__, cnt, tcp_fb_cnt)); 1000 #ifndef INVARIANTS 1001 skip_loop: 1002 #endif 1003 rw_runlock(&tcp_function_lock); 1004 if (req->oldptr == NULL) 1005 error = SYSCTL_OUT(req, NULL, 1006 (cnt + 1) * sizeof(struct tcp_function_info)); 1007 1008 return (error); 1009 } 1010 1011 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, function_info, 1012 CTLTYPE_OPAQUE | CTLFLAG_SKIP | CTLFLAG_RD | CTLFLAG_MPSAFE, 1013 NULL, 0, sysctl_net_inet_list_func_info, "S,tcp_function_info", 1014 "List TCP function block name-to-ID mappings"); 1015 1016 /* 1017 * tfb_tcp_handoff_ok() function for the default stack. 1018 * Note that we'll basically try to take all comers. 1019 */ 1020 static int 1021 tcp_default_handoff_ok(struct tcpcb *tp) 1022 { 1023 1024 return (0); 1025 } 1026 1027 /* 1028 * tfb_tcp_fb_init() function for the default stack. 1029 * 1030 * This handles making sure we have appropriate timers set if you are 1031 * transitioning a socket that has some amount of setup done. 1032 * 1033 * The init() fuction from the default can *never* return non-zero i.e. 1034 * it is required to always succeed since it is the stack of last resort! 1035 */ 1036 static int 1037 tcp_default_fb_init(struct tcpcb *tp) 1038 { 1039 1040 struct socket *so; 1041 1042 INP_WLOCK_ASSERT(tp->t_inpcb); 1043 1044 KASSERT(tp->t_state >= 0 && tp->t_state < TCPS_TIME_WAIT, 1045 ("%s: connection %p in unexpected state %d", __func__, tp, 1046 tp->t_state)); 1047 1048 /* 1049 * Nothing to do for ESTABLISHED or LISTEN states. And, we don't 1050 * know what to do for unexpected states (which includes TIME_WAIT). 1051 */ 1052 if (tp->t_state <= TCPS_LISTEN || tp->t_state >= TCPS_TIME_WAIT) 1053 return (0); 1054 1055 /* 1056 * Make sure some kind of transmission timer is set if there is 1057 * outstanding data. 1058 */ 1059 so = tp->t_inpcb->inp_socket; 1060 if ((!TCPS_HAVEESTABLISHED(tp->t_state) || sbavail(&so->so_snd) || 1061 tp->snd_una != tp->snd_max) && !(tcp_timer_active(tp, TT_REXMT) || 1062 tcp_timer_active(tp, TT_PERSIST))) { 1063 /* 1064 * If the session has established and it looks like it should 1065 * be in the persist state, set the persist timer. Otherwise, 1066 * set the retransmit timer. 1067 */ 1068 if (TCPS_HAVEESTABLISHED(tp->t_state) && tp->snd_wnd == 0 && 1069 (int32_t)(tp->snd_nxt - tp->snd_una) < 1070 (int32_t)sbavail(&so->so_snd)) 1071 tcp_setpersist(tp); 1072 else 1073 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 1074 } 1075 1076 /* All non-embryonic sessions get a keepalive timer. */ 1077 if (!tcp_timer_active(tp, TT_KEEP)) 1078 tcp_timer_activate(tp, TT_KEEP, 1079 TCPS_HAVEESTABLISHED(tp->t_state) ? TP_KEEPIDLE(tp) : 1080 TP_KEEPINIT(tp)); 1081 1082 /* 1083 * Make sure critical variables are initialized 1084 * if transitioning while in Recovery. 1085 */ 1086 if IN_FASTRECOVERY(tp->t_flags) { 1087 if (tp->sackhint.recover_fs == 0) 1088 tp->sackhint.recover_fs = max(1, 1089 tp->snd_nxt - tp->snd_una); 1090 } 1091 1092 return (0); 1093 } 1094 1095 /* 1096 * tfb_tcp_fb_fini() function for the default stack. 1097 * 1098 * This changes state as necessary (or prudent) to prepare for another stack 1099 * to assume responsibility for the connection. 1100 */ 1101 static void 1102 tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged) 1103 { 1104 1105 INP_WLOCK_ASSERT(tp->t_inpcb); 1106 return; 1107 } 1108 1109 /* 1110 * Target size of TCP PCB hash tables. Must be a power of two. 1111 * 1112 * Note that this can be overridden by the kernel environment 1113 * variable net.inet.tcp.tcbhashsize 1114 */ 1115 #ifndef TCBHASHSIZE 1116 #define TCBHASHSIZE 0 1117 #endif 1118 1119 /* 1120 * XXX 1121 * Callouts should be moved into struct tcp directly. They are currently 1122 * separate because the tcpcb structure is exported to userland for sysctl 1123 * parsing purposes, which do not know about callouts. 1124 */ 1125 struct tcpcb_mem { 1126 struct tcpcb tcb; 1127 struct tcp_timer tt; 1128 struct cc_var ccv; 1129 #ifdef TCP_HHOOK 1130 struct osd osd; 1131 #endif 1132 }; 1133 1134 VNET_DEFINE_STATIC(uma_zone_t, tcpcb_zone); 1135 #define V_tcpcb_zone VNET(tcpcb_zone) 1136 1137 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers"); 1138 MALLOC_DEFINE(M_TCPFUNCTIONS, "tcpfunc", "TCP function set memory"); 1139 1140 static struct mtx isn_mtx; 1141 1142 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF) 1143 #define ISN_LOCK() mtx_lock(&isn_mtx) 1144 #define ISN_UNLOCK() mtx_unlock(&isn_mtx) 1145 1146 /* 1147 * TCP initialization. 1148 */ 1149 static void 1150 tcp_zone_change(void *tag) 1151 { 1152 1153 uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets); 1154 uma_zone_set_max(V_tcpcb_zone, maxsockets); 1155 tcp_tw_zone_change(); 1156 } 1157 1158 static int 1159 tcp_inpcb_init(void *mem, int size, int flags) 1160 { 1161 struct inpcb *inp = mem; 1162 1163 INP_LOCK_INIT(inp, "inp", "tcpinp"); 1164 return (0); 1165 } 1166 1167 /* 1168 * Take a value and get the next power of 2 that doesn't overflow. 1169 * Used to size the tcp_inpcb hash buckets. 1170 */ 1171 static int 1172 maketcp_hashsize(int size) 1173 { 1174 int hashsize; 1175 1176 /* 1177 * auto tune. 1178 * get the next power of 2 higher than maxsockets. 1179 */ 1180 hashsize = 1 << fls(size); 1181 /* catch overflow, and just go one power of 2 smaller */ 1182 if (hashsize < size) { 1183 hashsize = 1 << (fls(size) - 1); 1184 } 1185 return (hashsize); 1186 } 1187 1188 static volatile int next_tcp_stack_id = 1; 1189 1190 /* 1191 * Register a TCP function block with the name provided in the names 1192 * array. (Note that this function does NOT automatically register 1193 * blk->tfb_tcp_block_name as a stack name. Therefore, you should 1194 * explicitly include blk->tfb_tcp_block_name in the list of names if 1195 * you wish to register the stack with that name.) 1196 * 1197 * Either all name registrations will succeed or all will fail. If 1198 * a name registration fails, the function will update the num_names 1199 * argument to point to the array index of the name that encountered 1200 * the failure. 1201 * 1202 * Returns 0 on success, or an error code on failure. 1203 */ 1204 int 1205 register_tcp_functions_as_names(struct tcp_function_block *blk, int wait, 1206 const char *names[], int *num_names) 1207 { 1208 struct tcp_function *n; 1209 struct tcp_function_set fs; 1210 int error, i; 1211 1212 KASSERT(names != NULL && *num_names > 0, 1213 ("%s: Called with 0-length name list", __func__)); 1214 KASSERT(names != NULL, ("%s: Called with NULL name list", __func__)); 1215 KASSERT(rw_initialized(&tcp_function_lock), 1216 ("%s: called too early", __func__)); 1217 1218 if ((blk->tfb_tcp_output == NULL) || 1219 (blk->tfb_tcp_do_segment == NULL) || 1220 (blk->tfb_tcp_ctloutput == NULL) || 1221 (strlen(blk->tfb_tcp_block_name) == 0)) { 1222 /* 1223 * These functions are required and you 1224 * need a name. 1225 */ 1226 *num_names = 0; 1227 return (EINVAL); 1228 } 1229 if (blk->tfb_tcp_timer_stop_all || 1230 blk->tfb_tcp_timer_activate || 1231 blk->tfb_tcp_timer_active || 1232 blk->tfb_tcp_timer_stop) { 1233 /* 1234 * If you define one timer function you 1235 * must have them all. 1236 */ 1237 if ((blk->tfb_tcp_timer_stop_all == NULL) || 1238 (blk->tfb_tcp_timer_activate == NULL) || 1239 (blk->tfb_tcp_timer_active == NULL) || 1240 (blk->tfb_tcp_timer_stop == NULL)) { 1241 *num_names = 0; 1242 return (EINVAL); 1243 } 1244 } 1245 1246 if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) { 1247 *num_names = 0; 1248 return (EINVAL); 1249 } 1250 1251 refcount_init(&blk->tfb_refcnt, 0); 1252 blk->tfb_id = atomic_fetchadd_int(&next_tcp_stack_id, 1); 1253 for (i = 0; i < *num_names; i++) { 1254 n = malloc(sizeof(struct tcp_function), M_TCPFUNCTIONS, wait); 1255 if (n == NULL) { 1256 error = ENOMEM; 1257 goto cleanup; 1258 } 1259 n->tf_fb = blk; 1260 1261 (void)strlcpy(fs.function_set_name, names[i], 1262 sizeof(fs.function_set_name)); 1263 rw_wlock(&tcp_function_lock); 1264 if (find_tcp_functions_locked(&fs) != NULL) { 1265 /* Duplicate name space not allowed */ 1266 rw_wunlock(&tcp_function_lock); 1267 free(n, M_TCPFUNCTIONS); 1268 error = EALREADY; 1269 goto cleanup; 1270 } 1271 (void)strlcpy(n->tf_name, names[i], sizeof(n->tf_name)); 1272 TAILQ_INSERT_TAIL(&t_functions, n, tf_next); 1273 tcp_fb_cnt++; 1274 rw_wunlock(&tcp_function_lock); 1275 } 1276 return(0); 1277 1278 cleanup: 1279 /* 1280 * Deregister the names we just added. Because registration failed 1281 * for names[i], we don't need to deregister that name. 1282 */ 1283 *num_names = i; 1284 rw_wlock(&tcp_function_lock); 1285 while (--i >= 0) { 1286 TAILQ_FOREACH(n, &t_functions, tf_next) { 1287 if (!strncmp(n->tf_name, names[i], 1288 TCP_FUNCTION_NAME_LEN_MAX)) { 1289 TAILQ_REMOVE(&t_functions, n, tf_next); 1290 tcp_fb_cnt--; 1291 n->tf_fb = NULL; 1292 free(n, M_TCPFUNCTIONS); 1293 break; 1294 } 1295 } 1296 } 1297 rw_wunlock(&tcp_function_lock); 1298 return (error); 1299 } 1300 1301 /* 1302 * Register a TCP function block using the name provided in the name 1303 * argument. 1304 * 1305 * Returns 0 on success, or an error code on failure. 1306 */ 1307 int 1308 register_tcp_functions_as_name(struct tcp_function_block *blk, const char *name, 1309 int wait) 1310 { 1311 const char *name_list[1]; 1312 int num_names, rv; 1313 1314 num_names = 1; 1315 if (name != NULL) 1316 name_list[0] = name; 1317 else 1318 name_list[0] = blk->tfb_tcp_block_name; 1319 rv = register_tcp_functions_as_names(blk, wait, name_list, &num_names); 1320 return (rv); 1321 } 1322 1323 /* 1324 * Register a TCP function block using the name defined in 1325 * blk->tfb_tcp_block_name. 1326 * 1327 * Returns 0 on success, or an error code on failure. 1328 */ 1329 int 1330 register_tcp_functions(struct tcp_function_block *blk, int wait) 1331 { 1332 1333 return (register_tcp_functions_as_name(blk, NULL, wait)); 1334 } 1335 1336 /* 1337 * Deregister all names associated with a function block. This 1338 * functionally removes the function block from use within the system. 1339 * 1340 * When called with a true quiesce argument, mark the function block 1341 * as being removed so no more stacks will use it and determine 1342 * whether the removal would succeed. 1343 * 1344 * When called with a false quiesce argument, actually attempt the 1345 * removal. 1346 * 1347 * When called with a force argument, attempt to switch all TCBs to 1348 * use the default stack instead of returning EBUSY. 1349 * 1350 * Returns 0 on success (or if the removal would succeed, or an error 1351 * code on failure. 1352 */ 1353 int 1354 deregister_tcp_functions(struct tcp_function_block *blk, bool quiesce, 1355 bool force) 1356 { 1357 struct tcp_function *f; 1358 1359 if (blk == &tcp_def_funcblk) { 1360 /* You can't un-register the default */ 1361 return (EPERM); 1362 } 1363 rw_wlock(&tcp_function_lock); 1364 if (blk == tcp_func_set_ptr) { 1365 /* You can't free the current default */ 1366 rw_wunlock(&tcp_function_lock); 1367 return (EBUSY); 1368 } 1369 /* Mark the block so no more stacks can use it. */ 1370 blk->tfb_flags |= TCP_FUNC_BEING_REMOVED; 1371 /* 1372 * If TCBs are still attached to the stack, attempt to switch them 1373 * to the default stack. 1374 */ 1375 if (force && blk->tfb_refcnt) { 1376 struct inpcb *inp; 1377 struct tcpcb *tp; 1378 VNET_ITERATOR_DECL(vnet_iter); 1379 1380 rw_wunlock(&tcp_function_lock); 1381 1382 VNET_LIST_RLOCK(); 1383 VNET_FOREACH(vnet_iter) { 1384 CURVNET_SET(vnet_iter); 1385 INP_INFO_WLOCK(&V_tcbinfo); 1386 CK_LIST_FOREACH(inp, V_tcbinfo.ipi_listhead, inp_list) { 1387 INP_WLOCK(inp); 1388 if (inp->inp_flags & INP_TIMEWAIT) { 1389 INP_WUNLOCK(inp); 1390 continue; 1391 } 1392 tp = intotcpcb(inp); 1393 if (tp == NULL || tp->t_fb != blk) { 1394 INP_WUNLOCK(inp); 1395 continue; 1396 } 1397 tcp_switch_back_to_default(tp); 1398 INP_WUNLOCK(inp); 1399 } 1400 INP_INFO_WUNLOCK(&V_tcbinfo); 1401 CURVNET_RESTORE(); 1402 } 1403 VNET_LIST_RUNLOCK(); 1404 1405 rw_wlock(&tcp_function_lock); 1406 } 1407 if (blk->tfb_refcnt) { 1408 /* TCBs still attached. */ 1409 rw_wunlock(&tcp_function_lock); 1410 return (EBUSY); 1411 } 1412 if (quiesce) { 1413 /* Skip removal. */ 1414 rw_wunlock(&tcp_function_lock); 1415 return (0); 1416 } 1417 /* Remove any function names that map to this function block. */ 1418 while (find_tcp_fb_locked(blk, &f) != NULL) { 1419 TAILQ_REMOVE(&t_functions, f, tf_next); 1420 tcp_fb_cnt--; 1421 f->tf_fb = NULL; 1422 free(f, M_TCPFUNCTIONS); 1423 } 1424 rw_wunlock(&tcp_function_lock); 1425 return (0); 1426 } 1427 1428 void 1429 tcp_init(void) 1430 { 1431 const char *tcbhash_tuneable; 1432 int hashsize; 1433 1434 tcbhash_tuneable = "net.inet.tcp.tcbhashsize"; 1435 1436 #ifdef TCP_HHOOK 1437 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, 1438 &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) 1439 printf("%s: WARNING: unable to register helper hook\n", __func__); 1440 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, 1441 &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) 1442 printf("%s: WARNING: unable to register helper hook\n", __func__); 1443 #endif 1444 #ifdef STATS 1445 if (tcp_stats_init()) 1446 printf("%s: WARNING: unable to initialise TCP stats\n", 1447 __func__); 1448 #endif 1449 hashsize = TCBHASHSIZE; 1450 TUNABLE_INT_FETCH(tcbhash_tuneable, &hashsize); 1451 if (hashsize == 0) { 1452 /* 1453 * Auto tune the hash size based on maxsockets. 1454 * A perfect hash would have a 1:1 mapping 1455 * (hashsize = maxsockets) however it's been 1456 * suggested that O(2) average is better. 1457 */ 1458 hashsize = maketcp_hashsize(maxsockets / 4); 1459 /* 1460 * Our historical default is 512, 1461 * do not autotune lower than this. 1462 */ 1463 if (hashsize < 512) 1464 hashsize = 512; 1465 if (bootverbose && IS_DEFAULT_VNET(curvnet)) 1466 printf("%s: %s auto tuned to %d\n", __func__, 1467 tcbhash_tuneable, hashsize); 1468 } 1469 /* 1470 * We require a hashsize to be a power of two. 1471 * Previously if it was not a power of two we would just reset it 1472 * back to 512, which could be a nasty surprise if you did not notice 1473 * the error message. 1474 * Instead what we do is clip it to the closest power of two lower 1475 * than the specified hash value. 1476 */ 1477 if (!powerof2(hashsize)) { 1478 int oldhashsize = hashsize; 1479 1480 hashsize = maketcp_hashsize(hashsize); 1481 /* prevent absurdly low value */ 1482 if (hashsize < 16) 1483 hashsize = 16; 1484 printf("%s: WARNING: TCB hash size not a power of 2, " 1485 "clipped from %d to %d.\n", __func__, oldhashsize, 1486 hashsize); 1487 } 1488 in_pcbinfo_init(&V_tcbinfo, "tcp", &V_tcb, hashsize, hashsize, 1489 "tcp_inpcb", tcp_inpcb_init, IPI_HASHFIELDS_4TUPLE); 1490 1491 /* 1492 * These have to be type stable for the benefit of the timers. 1493 */ 1494 V_tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem), 1495 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1496 uma_zone_set_max(V_tcpcb_zone, maxsockets); 1497 uma_zone_set_warning(V_tcpcb_zone, "kern.ipc.maxsockets limit reached"); 1498 1499 tcp_tw_init(); 1500 syncache_init(); 1501 tcp_hc_init(); 1502 1503 TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack); 1504 V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole), 1505 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1506 1507 tcp_fastopen_init(); 1508 1509 /* Skip initialization of globals for non-default instances. */ 1510 if (!IS_DEFAULT_VNET(curvnet)) 1511 return; 1512 1513 tcp_reass_global_init(); 1514 1515 /* XXX virtualize those bellow? */ 1516 tcp_delacktime = TCPTV_DELACK; 1517 tcp_keepinit = TCPTV_KEEP_INIT; 1518 tcp_keepidle = TCPTV_KEEP_IDLE; 1519 tcp_keepintvl = TCPTV_KEEPINTVL; 1520 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 1521 tcp_msl = TCPTV_MSL; 1522 tcp_rexmit_initial = TCPTV_RTOBASE; 1523 if (tcp_rexmit_initial < 1) 1524 tcp_rexmit_initial = 1; 1525 tcp_rexmit_min = TCPTV_MIN; 1526 if (tcp_rexmit_min < 1) 1527 tcp_rexmit_min = 1; 1528 tcp_persmin = TCPTV_PERSMIN; 1529 tcp_persmax = TCPTV_PERSMAX; 1530 tcp_rexmit_slop = TCPTV_CPU_VAR; 1531 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT; 1532 tcp_tcbhashsize = hashsize; 1533 1534 /* Setup the tcp function block list */ 1535 TAILQ_INIT(&t_functions); 1536 rw_init(&tcp_function_lock, "tcp_func_lock"); 1537 register_tcp_functions(&tcp_def_funcblk, M_WAITOK); 1538 #ifdef TCP_BLACKBOX 1539 /* Initialize the TCP logging data. */ 1540 tcp_log_init(); 1541 #endif 1542 arc4rand(&V_ts_offset_secret, sizeof(V_ts_offset_secret), 0); 1543 1544 if (tcp_soreceive_stream) { 1545 #ifdef INET 1546 tcp_usrreqs.pru_soreceive = soreceive_stream; 1547 #endif 1548 #ifdef INET6 1549 tcp6_usrreqs.pru_soreceive = soreceive_stream; 1550 #endif /* INET6 */ 1551 } 1552 1553 #ifdef INET6 1554 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 1555 #else /* INET6 */ 1556 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 1557 #endif /* INET6 */ 1558 if (max_protohdr < TCP_MINPROTOHDR) 1559 max_protohdr = TCP_MINPROTOHDR; 1560 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 1561 panic("tcp_init"); 1562 #undef TCP_MINPROTOHDR 1563 1564 ISN_LOCK_INIT(); 1565 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL, 1566 SHUTDOWN_PRI_DEFAULT); 1567 EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL, 1568 EVENTHANDLER_PRI_ANY); 1569 1570 tcp_inp_lro_direct_queue = counter_u64_alloc(M_WAITOK); 1571 tcp_inp_lro_wokeup_queue = counter_u64_alloc(M_WAITOK); 1572 tcp_inp_lro_compressed = counter_u64_alloc(M_WAITOK); 1573 tcp_inp_lro_locks_taken = counter_u64_alloc(M_WAITOK); 1574 tcp_extra_mbuf = counter_u64_alloc(M_WAITOK); 1575 tcp_would_have_but = counter_u64_alloc(M_WAITOK); 1576 tcp_comp_total = counter_u64_alloc(M_WAITOK); 1577 tcp_uncomp_total = counter_u64_alloc(M_WAITOK); 1578 tcp_bad_csums = counter_u64_alloc(M_WAITOK); 1579 #ifdef TCPPCAP 1580 tcp_pcap_init(); 1581 #endif 1582 } 1583 1584 #ifdef VIMAGE 1585 static void 1586 tcp_destroy(void *unused __unused) 1587 { 1588 int n; 1589 #ifdef TCP_HHOOK 1590 int error; 1591 #endif 1592 1593 /* 1594 * All our processes are gone, all our sockets should be cleaned 1595 * up, which means, we should be past the tcp_discardcb() calls. 1596 * Sleep to let all tcpcb timers really disappear and cleanup. 1597 */ 1598 for (;;) { 1599 INP_LIST_RLOCK(&V_tcbinfo); 1600 n = V_tcbinfo.ipi_count; 1601 INP_LIST_RUNLOCK(&V_tcbinfo); 1602 if (n == 0) 1603 break; 1604 pause("tcpdes", hz / 10); 1605 } 1606 tcp_hc_destroy(); 1607 syncache_destroy(); 1608 tcp_tw_destroy(); 1609 in_pcbinfo_destroy(&V_tcbinfo); 1610 /* tcp_discardcb() clears the sack_holes up. */ 1611 uma_zdestroy(V_sack_hole_zone); 1612 uma_zdestroy(V_tcpcb_zone); 1613 1614 /* 1615 * Cannot free the zone until all tcpcbs are released as we attach 1616 * the allocations to them. 1617 */ 1618 tcp_fastopen_destroy(); 1619 1620 #ifdef TCP_HHOOK 1621 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]); 1622 if (error != 0) { 1623 printf("%s: WARNING: unable to deregister helper hook " 1624 "type=%d, id=%d: error %d returned\n", __func__, 1625 HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, error); 1626 } 1627 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_OUT]); 1628 if (error != 0) { 1629 printf("%s: WARNING: unable to deregister helper hook " 1630 "type=%d, id=%d: error %d returned\n", __func__, 1631 HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error); 1632 } 1633 #endif 1634 } 1635 VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL); 1636 #endif 1637 1638 void 1639 tcp_fini(void *xtp) 1640 { 1641 1642 } 1643 1644 /* 1645 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 1646 * tcp_template used to store this data in mbufs, but we now recopy it out 1647 * of the tcpcb each time to conserve mbufs. 1648 */ 1649 void 1650 tcpip_fillheaders(struct inpcb *inp, uint16_t port, void *ip_ptr, void *tcp_ptr) 1651 { 1652 struct tcphdr *th = (struct tcphdr *)tcp_ptr; 1653 1654 INP_WLOCK_ASSERT(inp); 1655 1656 #ifdef INET6 1657 if ((inp->inp_vflag & INP_IPV6) != 0) { 1658 struct ip6_hdr *ip6; 1659 1660 ip6 = (struct ip6_hdr *)ip_ptr; 1661 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 1662 (inp->inp_flow & IPV6_FLOWINFO_MASK); 1663 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 1664 (IPV6_VERSION & IPV6_VERSION_MASK); 1665 if (port == 0) 1666 ip6->ip6_nxt = IPPROTO_TCP; 1667 else 1668 ip6->ip6_nxt = IPPROTO_UDP; 1669 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 1670 ip6->ip6_src = inp->in6p_laddr; 1671 ip6->ip6_dst = inp->in6p_faddr; 1672 } 1673 #endif /* INET6 */ 1674 #if defined(INET6) && defined(INET) 1675 else 1676 #endif 1677 #ifdef INET 1678 { 1679 struct ip *ip; 1680 1681 ip = (struct ip *)ip_ptr; 1682 ip->ip_v = IPVERSION; 1683 ip->ip_hl = 5; 1684 ip->ip_tos = inp->inp_ip_tos; 1685 ip->ip_len = 0; 1686 ip->ip_id = 0; 1687 ip->ip_off = 0; 1688 ip->ip_ttl = inp->inp_ip_ttl; 1689 ip->ip_sum = 0; 1690 if (port == 0) 1691 ip->ip_p = IPPROTO_TCP; 1692 else 1693 ip->ip_p = IPPROTO_UDP; 1694 ip->ip_src = inp->inp_laddr; 1695 ip->ip_dst = inp->inp_faddr; 1696 } 1697 #endif /* INET */ 1698 th->th_sport = inp->inp_lport; 1699 th->th_dport = inp->inp_fport; 1700 th->th_seq = 0; 1701 th->th_ack = 0; 1702 th->th_x2 = 0; 1703 th->th_off = 5; 1704 th->th_flags = 0; 1705 th->th_win = 0; 1706 th->th_urp = 0; 1707 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */ 1708 } 1709 1710 /* 1711 * Create template to be used to send tcp packets on a connection. 1712 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 1713 * use for this function is in keepalives, which use tcp_respond. 1714 */ 1715 struct tcptemp * 1716 tcpip_maketemplate(struct inpcb *inp) 1717 { 1718 struct tcptemp *t; 1719 1720 t = malloc(sizeof(*t), M_TEMP, M_NOWAIT); 1721 if (t == NULL) 1722 return (NULL); 1723 tcpip_fillheaders(inp, 0, (void *)&t->tt_ipgen, (void *)&t->tt_t); 1724 return (t); 1725 } 1726 1727 /* 1728 * Send a single message to the TCP at address specified by 1729 * the given TCP/IP header. If m == NULL, then we make a copy 1730 * of the tcpiphdr at th and send directly to the addressed host. 1731 * This is used to force keep alive messages out using the TCP 1732 * template for a connection. If flags are given then we send 1733 * a message back to the TCP which originated the segment th, 1734 * and discard the mbuf containing it and any other attached mbufs. 1735 * 1736 * In any case the ack and sequence number of the transmitted 1737 * segment are as specified by the parameters. 1738 * 1739 * NOTE: If m != NULL, then th must point to *inside* the mbuf. 1740 */ 1741 void 1742 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 1743 tcp_seq ack, tcp_seq seq, int flags) 1744 { 1745 struct tcpopt to; 1746 struct inpcb *inp; 1747 struct ip *ip; 1748 struct mbuf *optm; 1749 struct udphdr *uh = NULL; 1750 struct tcphdr *nth; 1751 struct tcp_log_buffer *lgb; 1752 u_char *optp; 1753 #ifdef INET6 1754 struct ip6_hdr *ip6; 1755 int isipv6; 1756 #endif /* INET6 */ 1757 int optlen, tlen, win, ulen; 1758 bool incl_opts; 1759 uint16_t port; 1760 int output_ret; 1761 1762 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL")); 1763 NET_EPOCH_ASSERT(); 1764 1765 #ifdef INET6 1766 isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4); 1767 ip6 = ipgen; 1768 #endif /* INET6 */ 1769 ip = ipgen; 1770 1771 if (tp != NULL) { 1772 inp = tp->t_inpcb; 1773 KASSERT(inp != NULL, ("tcp control block w/o inpcb")); 1774 INP_LOCK_ASSERT(inp); 1775 } else 1776 inp = NULL; 1777 1778 if (m != NULL) { 1779 #ifdef INET6 1780 if (isipv6 && ip6 && (ip6->ip6_nxt == IPPROTO_UDP)) 1781 port = m->m_pkthdr.tcp_tun_port; 1782 else 1783 #endif 1784 if (ip && (ip->ip_p == IPPROTO_UDP)) 1785 port = m->m_pkthdr.tcp_tun_port; 1786 else 1787 port = 0; 1788 } else 1789 port = tp->t_port; 1790 1791 incl_opts = false; 1792 win = 0; 1793 if (tp != NULL) { 1794 if (!(flags & TH_RST)) { 1795 win = sbspace(&inp->inp_socket->so_rcv); 1796 if (win > TCP_MAXWIN << tp->rcv_scale) 1797 win = TCP_MAXWIN << tp->rcv_scale; 1798 } 1799 if ((tp->t_flags & TF_NOOPT) == 0) 1800 incl_opts = true; 1801 } 1802 if (m == NULL) { 1803 m = m_gethdr(M_NOWAIT, MT_DATA); 1804 if (m == NULL) 1805 return; 1806 m->m_data += max_linkhdr; 1807 #ifdef INET6 1808 if (isipv6) { 1809 bcopy((caddr_t)ip6, mtod(m, caddr_t), 1810 sizeof(struct ip6_hdr)); 1811 ip6 = mtod(m, struct ip6_hdr *); 1812 nth = (struct tcphdr *)(ip6 + 1); 1813 if (port) { 1814 /* Insert a UDP header */ 1815 uh = (struct udphdr *)nth; 1816 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1817 uh->uh_dport = port; 1818 nth = (struct tcphdr *)(uh + 1); 1819 } 1820 } else 1821 #endif /* INET6 */ 1822 { 1823 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 1824 ip = mtod(m, struct ip *); 1825 nth = (struct tcphdr *)(ip + 1); 1826 if (port) { 1827 /* Insert a UDP header */ 1828 uh = (struct udphdr *)nth; 1829 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1830 uh->uh_dport = port; 1831 nth = (struct tcphdr *)(uh + 1); 1832 } 1833 } 1834 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 1835 flags = TH_ACK; 1836 } else if ((!M_WRITABLE(m)) || (port != 0)) { 1837 struct mbuf *n; 1838 1839 /* Can't reuse 'm', allocate a new mbuf. */ 1840 n = m_gethdr(M_NOWAIT, MT_DATA); 1841 if (n == NULL) { 1842 m_freem(m); 1843 return; 1844 } 1845 1846 if (!m_dup_pkthdr(n, m, M_NOWAIT)) { 1847 m_freem(m); 1848 m_freem(n); 1849 return; 1850 } 1851 1852 n->m_data += max_linkhdr; 1853 /* m_len is set later */ 1854 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 1855 #ifdef INET6 1856 if (isipv6) { 1857 bcopy((caddr_t)ip6, mtod(n, caddr_t), 1858 sizeof(struct ip6_hdr)); 1859 ip6 = mtod(n, struct ip6_hdr *); 1860 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 1861 nth = (struct tcphdr *)(ip6 + 1); 1862 if (port) { 1863 /* Insert a UDP header */ 1864 uh = (struct udphdr *)nth; 1865 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1866 uh->uh_dport = port; 1867 nth = (struct tcphdr *)(uh + 1); 1868 } 1869 } else 1870 #endif /* INET6 */ 1871 { 1872 bcopy((caddr_t)ip, mtod(n, caddr_t), sizeof(struct ip)); 1873 ip = mtod(n, struct ip *); 1874 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t); 1875 nth = (struct tcphdr *)(ip + 1); 1876 if (port) { 1877 /* Insert a UDP header */ 1878 uh = (struct udphdr *)nth; 1879 uh->uh_sport = htons(V_tcp_udp_tunneling_port); 1880 uh->uh_dport = port; 1881 nth = (struct tcphdr *)(uh + 1); 1882 } 1883 } 1884 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 1885 xchg(nth->th_dport, nth->th_sport, uint16_t); 1886 th = nth; 1887 m_freem(m); 1888 m = n; 1889 } else { 1890 /* 1891 * reuse the mbuf. 1892 * XXX MRT We inherit the FIB, which is lucky. 1893 */ 1894 m_freem(m->m_next); 1895 m->m_next = NULL; 1896 m->m_data = (caddr_t)ipgen; 1897 /* m_len is set later */ 1898 #ifdef INET6 1899 if (isipv6) { 1900 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 1901 nth = (struct tcphdr *)(ip6 + 1); 1902 } else 1903 #endif /* INET6 */ 1904 { 1905 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t); 1906 nth = (struct tcphdr *)(ip + 1); 1907 } 1908 if (th != nth) { 1909 /* 1910 * this is usually a case when an extension header 1911 * exists between the IPv6 header and the 1912 * TCP header. 1913 */ 1914 nth->th_sport = th->th_sport; 1915 nth->th_dport = th->th_dport; 1916 } 1917 xchg(nth->th_dport, nth->th_sport, uint16_t); 1918 #undef xchg 1919 } 1920 tlen = 0; 1921 #ifdef INET6 1922 if (isipv6) 1923 tlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 1924 #endif 1925 #if defined(INET) && defined(INET6) 1926 else 1927 #endif 1928 #ifdef INET 1929 tlen = sizeof (struct tcpiphdr); 1930 #endif 1931 if (port) 1932 tlen += sizeof (struct udphdr); 1933 #ifdef INVARIANTS 1934 m->m_len = 0; 1935 KASSERT(M_TRAILINGSPACE(m) >= tlen, 1936 ("Not enough trailing space for message (m=%p, need=%d, have=%ld)", 1937 m, tlen, (long)M_TRAILINGSPACE(m))); 1938 #endif 1939 m->m_len = tlen; 1940 to.to_flags = 0; 1941 if (incl_opts) { 1942 /* Make sure we have room. */ 1943 if (M_TRAILINGSPACE(m) < TCP_MAXOLEN) { 1944 m->m_next = m_get(M_NOWAIT, MT_DATA); 1945 if (m->m_next) { 1946 optp = mtod(m->m_next, u_char *); 1947 optm = m->m_next; 1948 } else 1949 incl_opts = false; 1950 } else { 1951 optp = (u_char *) (nth + 1); 1952 optm = m; 1953 } 1954 } 1955 if (incl_opts) { 1956 /* Timestamps. */ 1957 if (tp->t_flags & TF_RCVD_TSTMP) { 1958 to.to_tsval = tcp_ts_getticks() + tp->ts_offset; 1959 to.to_tsecr = tp->ts_recent; 1960 to.to_flags |= TOF_TS; 1961 } 1962 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1963 /* TCP-MD5 (RFC2385). */ 1964 if (tp->t_flags & TF_SIGNATURE) 1965 to.to_flags |= TOF_SIGNATURE; 1966 #endif 1967 /* Add the options. */ 1968 tlen += optlen = tcp_addoptions(&to, optp); 1969 1970 /* Update m_len in the correct mbuf. */ 1971 optm->m_len += optlen; 1972 } else 1973 optlen = 0; 1974 #ifdef INET6 1975 if (isipv6) { 1976 if (uh) { 1977 ulen = tlen - sizeof(struct ip6_hdr); 1978 uh->uh_ulen = htons(ulen); 1979 } 1980 ip6->ip6_flow = 0; 1981 ip6->ip6_vfc = IPV6_VERSION; 1982 if (port) 1983 ip6->ip6_nxt = IPPROTO_UDP; 1984 else 1985 ip6->ip6_nxt = IPPROTO_TCP; 1986 ip6->ip6_plen = htons(tlen - sizeof(*ip6)); 1987 } 1988 #endif 1989 #if defined(INET) && defined(INET6) 1990 else 1991 #endif 1992 #ifdef INET 1993 { 1994 if (uh) { 1995 ulen = tlen - sizeof(struct ip); 1996 uh->uh_ulen = htons(ulen); 1997 } 1998 ip->ip_len = htons(tlen); 1999 ip->ip_ttl = V_ip_defttl; 2000 if (port) { 2001 ip->ip_p = IPPROTO_UDP; 2002 } else { 2003 ip->ip_p = IPPROTO_TCP; 2004 } 2005 if (V_path_mtu_discovery) 2006 ip->ip_off |= htons(IP_DF); 2007 } 2008 #endif 2009 m->m_pkthdr.len = tlen; 2010 m->m_pkthdr.rcvif = NULL; 2011 #ifdef MAC 2012 if (inp != NULL) { 2013 /* 2014 * Packet is associated with a socket, so allow the 2015 * label of the response to reflect the socket label. 2016 */ 2017 INP_LOCK_ASSERT(inp); 2018 mac_inpcb_create_mbuf(inp, m); 2019 } else { 2020 /* 2021 * Packet is not associated with a socket, so possibly 2022 * update the label in place. 2023 */ 2024 mac_netinet_tcp_reply(m); 2025 } 2026 #endif 2027 nth->th_seq = htonl(seq); 2028 nth->th_ack = htonl(ack); 2029 nth->th_x2 = 0; 2030 nth->th_off = (sizeof (struct tcphdr) + optlen) >> 2; 2031 nth->th_flags = flags; 2032 if (tp != NULL) 2033 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 2034 else 2035 nth->th_win = htons((u_short)win); 2036 nth->th_urp = 0; 2037 2038 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 2039 if (to.to_flags & TOF_SIGNATURE) { 2040 if (!TCPMD5_ENABLED() || 2041 TCPMD5_OUTPUT(m, nth, to.to_signature) != 0) { 2042 m_freem(m); 2043 return; 2044 } 2045 } 2046 #endif 2047 2048 #ifdef INET6 2049 if (isipv6) { 2050 if (port) { 2051 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 2052 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 2053 uh->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 2054 nth->th_sum = 0; 2055 } else { 2056 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 2057 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2058 nth->th_sum = in6_cksum_pseudo(ip6, 2059 tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0); 2060 } 2061 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb : 2062 NULL, NULL); 2063 } 2064 #endif /* INET6 */ 2065 #if defined(INET6) && defined(INET) 2066 else 2067 #endif 2068 #ifdef INET 2069 { 2070 if (port) { 2071 uh->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 2072 htons(ulen + IPPROTO_UDP)); 2073 m->m_pkthdr.csum_flags = CSUM_UDP; 2074 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 2075 nth->th_sum = 0; 2076 } else { 2077 m->m_pkthdr.csum_flags = CSUM_TCP; 2078 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2079 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 2080 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 2081 } 2082 } 2083 #endif /* INET */ 2084 #ifdef TCPDEBUG 2085 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG)) 2086 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 2087 #endif 2088 TCP_PROBE3(debug__output, tp, th, m); 2089 if (flags & TH_RST) 2090 TCP_PROBE5(accept__refused, NULL, NULL, m, tp, nth); 2091 if ((tp != NULL) && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2092 union tcp_log_stackspecific log; 2093 struct timeval tv; 2094 2095 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2096 log.u_bbr.inhpts = tp->t_inpcb->inp_in_hpts; 2097 log.u_bbr.ininput = tp->t_inpcb->inp_in_input; 2098 log.u_bbr.flex8 = 4; 2099 log.u_bbr.pkts_out = tp->t_maxseg; 2100 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2101 log.u_bbr.delivered = 0; 2102 lgb = tcp_log_event_(tp, nth, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 2103 0, &log, false, NULL, NULL, 0, &tv); 2104 } else 2105 lgb = NULL; 2106 2107 #ifdef INET6 2108 if (isipv6) { 2109 TCP_PROBE5(send, NULL, tp, ip6, tp, nth); 2110 output_ret = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp); 2111 } 2112 #endif /* INET6 */ 2113 #if defined(INET) && defined(INET6) 2114 else 2115 #endif 2116 #ifdef INET 2117 { 2118 TCP_PROBE5(send, NULL, tp, ip, tp, nth); 2119 output_ret = ip_output(m, NULL, NULL, 0, NULL, inp); 2120 } 2121 #endif 2122 if (lgb) { 2123 lgb->tlb_errno = output_ret; 2124 lgb = NULL; 2125 } 2126 } 2127 2128 /* 2129 * Create a new TCP control block, making an 2130 * empty reassembly queue and hooking it to the argument 2131 * protocol control block. The `inp' parameter must have 2132 * come from the zone allocator set up in tcp_init(). 2133 */ 2134 struct tcpcb * 2135 tcp_newtcpcb(struct inpcb *inp) 2136 { 2137 struct tcpcb_mem *tm; 2138 struct tcpcb *tp; 2139 #ifdef INET6 2140 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 2141 #endif /* INET6 */ 2142 2143 tm = uma_zalloc(V_tcpcb_zone, M_NOWAIT | M_ZERO); 2144 if (tm == NULL) 2145 return (NULL); 2146 tp = &tm->tcb; 2147 2148 /* Initialise cc_var struct for this tcpcb. */ 2149 tp->ccv = &tm->ccv; 2150 tp->ccv->type = IPPROTO_TCP; 2151 tp->ccv->ccvc.tcp = tp; 2152 rw_rlock(&tcp_function_lock); 2153 tp->t_fb = tcp_func_set_ptr; 2154 refcount_acquire(&tp->t_fb->tfb_refcnt); 2155 rw_runlock(&tcp_function_lock); 2156 /* 2157 * Use the current system default CC algorithm. 2158 */ 2159 CC_LIST_RLOCK(); 2160 KASSERT(!STAILQ_EMPTY(&cc_list), ("cc_list is empty!")); 2161 CC_ALGO(tp) = CC_DEFAULT_ALGO(); 2162 CC_LIST_RUNLOCK(); 2163 2164 /* 2165 * The tcpcb will hold a reference on its inpcb until tcp_discardcb() 2166 * is called. 2167 */ 2168 in_pcbref(inp); /* Reference for tcpcb */ 2169 tp->t_inpcb = inp; 2170 2171 if (CC_ALGO(tp)->cb_init != NULL) 2172 if (CC_ALGO(tp)->cb_init(tp->ccv, NULL) > 0) { 2173 if (tp->t_fb->tfb_tcp_fb_fini) 2174 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 2175 in_pcbrele_wlocked(inp); 2176 refcount_release(&tp->t_fb->tfb_refcnt); 2177 uma_zfree(V_tcpcb_zone, tm); 2178 return (NULL); 2179 } 2180 2181 #ifdef TCP_HHOOK 2182 tp->osd = &tm->osd; 2183 if (khelp_init_osd(HELPER_CLASS_TCP, tp->osd)) { 2184 if (tp->t_fb->tfb_tcp_fb_fini) 2185 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 2186 in_pcbrele_wlocked(inp); 2187 refcount_release(&tp->t_fb->tfb_refcnt); 2188 uma_zfree(V_tcpcb_zone, tm); 2189 return (NULL); 2190 } 2191 #endif 2192 2193 #ifdef VIMAGE 2194 tp->t_vnet = inp->inp_vnet; 2195 #endif 2196 tp->t_timers = &tm->tt; 2197 TAILQ_INIT(&tp->t_segq); 2198 tp->t_maxseg = 2199 #ifdef INET6 2200 isipv6 ? V_tcp_v6mssdflt : 2201 #endif /* INET6 */ 2202 V_tcp_mssdflt; 2203 2204 /* Set up our timeouts. */ 2205 callout_init(&tp->t_timers->tt_rexmt, 1); 2206 callout_init(&tp->t_timers->tt_persist, 1); 2207 callout_init(&tp->t_timers->tt_keep, 1); 2208 callout_init(&tp->t_timers->tt_2msl, 1); 2209 callout_init(&tp->t_timers->tt_delack, 1); 2210 2211 if (V_tcp_do_rfc1323) 2212 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 2213 if (V_tcp_do_sack) 2214 tp->t_flags |= TF_SACK_PERMIT; 2215 TAILQ_INIT(&tp->snd_holes); 2216 2217 /* 2218 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 2219 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 2220 * reasonable initial retransmit time. 2221 */ 2222 tp->t_srtt = TCPTV_SRTTBASE; 2223 tp->t_rttvar = ((tcp_rexmit_initial - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 2224 tp->t_rttmin = tcp_rexmit_min; 2225 tp->t_rxtcur = tcp_rexmit_initial; 2226 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 2227 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 2228 tp->t_rcvtime = ticks; 2229 /* 2230 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 2231 * because the socket may be bound to an IPv6 wildcard address, 2232 * which may match an IPv4-mapped IPv6 address. 2233 */ 2234 inp->inp_ip_ttl = V_ip_defttl; 2235 inp->inp_ppcb = tp; 2236 #ifdef TCPPCAP 2237 /* 2238 * Init the TCP PCAP queues. 2239 */ 2240 tcp_pcap_tcpcb_init(tp); 2241 #endif 2242 #ifdef TCP_BLACKBOX 2243 /* Initialize the per-TCPCB log data. */ 2244 tcp_log_tcpcbinit(tp); 2245 #endif 2246 tp->t_pacing_rate = -1; 2247 if (tp->t_fb->tfb_tcp_fb_init) { 2248 if ((*tp->t_fb->tfb_tcp_fb_init)(tp)) { 2249 refcount_release(&tp->t_fb->tfb_refcnt); 2250 in_pcbrele_wlocked(inp); 2251 uma_zfree(V_tcpcb_zone, tm); 2252 return (NULL); 2253 } 2254 } 2255 #ifdef STATS 2256 if (V_tcp_perconn_stats_enable == 1) 2257 tp->t_stats = stats_blob_alloc(V_tcp_perconn_stats_dflt_tpl, 0); 2258 #endif 2259 if (V_tcp_do_lrd) 2260 tp->t_flags |= TF_LRD; 2261 return (tp); /* XXX */ 2262 } 2263 2264 /* 2265 * Switch the congestion control algorithm back to Vnet default for any active 2266 * control blocks using an algorithm which is about to go away. If the algorithm 2267 * has a cb_init function and it fails (no memory) then the operation fails and 2268 * the unload will not succeed. 2269 * 2270 */ 2271 int 2272 tcp_ccalgounload(struct cc_algo *unload_algo) 2273 { 2274 struct cc_algo *oldalgo, *newalgo; 2275 struct inpcb *inp; 2276 struct tcpcb *tp; 2277 VNET_ITERATOR_DECL(vnet_iter); 2278 2279 /* 2280 * Check all active control blocks across all network stacks and change 2281 * any that are using "unload_algo" back to its default. If "unload_algo" 2282 * requires cleanup code to be run, call it. 2283 */ 2284 VNET_LIST_RLOCK(); 2285 VNET_FOREACH(vnet_iter) { 2286 CURVNET_SET(vnet_iter); 2287 INP_INFO_WLOCK(&V_tcbinfo); 2288 /* 2289 * New connections already part way through being initialised 2290 * with the CC algo we're removing will not race with this code 2291 * because the INP_INFO_WLOCK is held during initialisation. We 2292 * therefore don't enter the loop below until the connection 2293 * list has stabilised. 2294 */ 2295 newalgo = CC_DEFAULT_ALGO(); 2296 CK_LIST_FOREACH(inp, &V_tcb, inp_list) { 2297 INP_WLOCK(inp); 2298 /* Important to skip tcptw structs. */ 2299 if (!(inp->inp_flags & INP_TIMEWAIT) && 2300 (tp = intotcpcb(inp)) != NULL) { 2301 /* 2302 * By holding INP_WLOCK here, we are assured 2303 * that the connection is not currently 2304 * executing inside the CC module's functions. 2305 * We attempt to switch to the Vnets default, 2306 * if the init fails then we fail the whole 2307 * operation and the module unload will fail. 2308 */ 2309 if (CC_ALGO(tp) == unload_algo) { 2310 struct cc_var cc_mem; 2311 int err; 2312 2313 oldalgo = CC_ALGO(tp); 2314 memset(&cc_mem, 0, sizeof(cc_mem)); 2315 cc_mem.ccvc.tcp = tp; 2316 if (newalgo->cb_init == NULL) { 2317 /* 2318 * No init we can skip the 2319 * dance around a possible failure. 2320 */ 2321 CC_DATA(tp) = NULL; 2322 goto proceed; 2323 } 2324 err = (newalgo->cb_init)(&cc_mem, NULL); 2325 if (err) { 2326 /* 2327 * Presumably no memory the caller will 2328 * need to try again. 2329 */ 2330 INP_WUNLOCK(inp); 2331 INP_INFO_WUNLOCK(&V_tcbinfo); 2332 CURVNET_RESTORE(); 2333 VNET_LIST_RUNLOCK(); 2334 return (err); 2335 } 2336 proceed: 2337 if (oldalgo->cb_destroy != NULL) 2338 oldalgo->cb_destroy(tp->ccv); 2339 CC_ALGO(tp) = newalgo; 2340 memcpy(tp->ccv, &cc_mem, sizeof(struct cc_var)); 2341 if (TCPS_HAVEESTABLISHED(tp->t_state) && 2342 (CC_ALGO(tp)->conn_init != NULL)) { 2343 /* Yep run the connection init for the new CC */ 2344 CC_ALGO(tp)->conn_init(tp->ccv); 2345 } 2346 } 2347 } 2348 INP_WUNLOCK(inp); 2349 } 2350 INP_INFO_WUNLOCK(&V_tcbinfo); 2351 CURVNET_RESTORE(); 2352 } 2353 VNET_LIST_RUNLOCK(); 2354 return (0); 2355 } 2356 2357 /* 2358 * Drop a TCP connection, reporting 2359 * the specified error. If connection is synchronized, 2360 * then send a RST to peer. 2361 */ 2362 struct tcpcb * 2363 tcp_drop(struct tcpcb *tp, int errno) 2364 { 2365 struct socket *so = tp->t_inpcb->inp_socket; 2366 2367 NET_EPOCH_ASSERT(); 2368 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 2369 INP_WLOCK_ASSERT(tp->t_inpcb); 2370 2371 if (TCPS_HAVERCVDSYN(tp->t_state)) { 2372 tcp_state_change(tp, TCPS_CLOSED); 2373 (void) tp->t_fb->tfb_tcp_output(tp); 2374 TCPSTAT_INC(tcps_drops); 2375 } else 2376 TCPSTAT_INC(tcps_conndrops); 2377 if (errno == ETIMEDOUT && tp->t_softerror) 2378 errno = tp->t_softerror; 2379 so->so_error = errno; 2380 return (tcp_close(tp)); 2381 } 2382 2383 void 2384 tcp_discardcb(struct tcpcb *tp) 2385 { 2386 struct inpcb *inp = tp->t_inpcb; 2387 struct socket *so = inp->inp_socket; 2388 #ifdef INET6 2389 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 2390 #endif /* INET6 */ 2391 int released __unused; 2392 2393 INP_WLOCK_ASSERT(inp); 2394 2395 /* 2396 * Make sure that all of our timers are stopped before we delete the 2397 * PCB. 2398 * 2399 * If stopping a timer fails, we schedule a discard function in same 2400 * callout, and the last discard function called will take care of 2401 * deleting the tcpcb. 2402 */ 2403 tp->t_timers->tt_draincnt = 0; 2404 tcp_timer_stop(tp, TT_REXMT); 2405 tcp_timer_stop(tp, TT_PERSIST); 2406 tcp_timer_stop(tp, TT_KEEP); 2407 tcp_timer_stop(tp, TT_2MSL); 2408 tcp_timer_stop(tp, TT_DELACK); 2409 if (tp->t_fb->tfb_tcp_timer_stop_all) { 2410 /* 2411 * Call the stop-all function of the methods, 2412 * this function should call the tcp_timer_stop() 2413 * method with each of the function specific timeouts. 2414 * That stop will be called via the tfb_tcp_timer_stop() 2415 * which should use the async drain function of the 2416 * callout system (see tcp_var.h). 2417 */ 2418 tp->t_fb->tfb_tcp_timer_stop_all(tp); 2419 } 2420 2421 /* free the reassembly queue, if any */ 2422 tcp_reass_flush(tp); 2423 2424 #ifdef TCP_OFFLOAD 2425 /* Disconnect offload device, if any. */ 2426 if (tp->t_flags & TF_TOE) 2427 tcp_offload_detach(tp); 2428 #endif 2429 2430 tcp_free_sackholes(tp); 2431 2432 #ifdef TCPPCAP 2433 /* Free the TCP PCAP queues. */ 2434 tcp_pcap_drain(&(tp->t_inpkts)); 2435 tcp_pcap_drain(&(tp->t_outpkts)); 2436 #endif 2437 2438 /* Allow the CC algorithm to clean up after itself. */ 2439 if (CC_ALGO(tp)->cb_destroy != NULL) 2440 CC_ALGO(tp)->cb_destroy(tp->ccv); 2441 CC_DATA(tp) = NULL; 2442 2443 #ifdef TCP_HHOOK 2444 khelp_destroy_osd(tp->osd); 2445 #endif 2446 #ifdef STATS 2447 stats_blob_destroy(tp->t_stats); 2448 #endif 2449 2450 CC_ALGO(tp) = NULL; 2451 inp->inp_ppcb = NULL; 2452 if (tp->t_timers->tt_draincnt == 0) { 2453 /* We own the last reference on tcpcb, let's free it. */ 2454 #ifdef TCP_BLACKBOX 2455 tcp_log_tcpcbfini(tp); 2456 #endif 2457 TCPSTATES_DEC(tp->t_state); 2458 if (tp->t_fb->tfb_tcp_fb_fini) 2459 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 2460 2461 /* 2462 * If we got enough samples through the srtt filter, 2463 * save the rtt and rttvar in the routing entry. 2464 * 'Enough' is arbitrarily defined as 4 rtt samples. 2465 * 4 samples is enough for the srtt filter to converge 2466 * to within enough % of the correct value; fewer samples 2467 * and we could save a bogus rtt. The danger is not high 2468 * as tcp quickly recovers from everything. 2469 * XXX: Works very well but needs some more statistics! 2470 * 2471 * XXXRRS: Updating must be after the stack fini() since 2472 * that may be converting some internal representation of 2473 * say srtt etc into the general one used by other stacks. 2474 * Lets also at least protect against the so being NULL 2475 * as RW stated below. 2476 */ 2477 if ((tp->t_rttupdated >= 4) && (so != NULL)) { 2478 struct hc_metrics_lite metrics; 2479 uint32_t ssthresh; 2480 2481 bzero(&metrics, sizeof(metrics)); 2482 /* 2483 * Update the ssthresh always when the conditions below 2484 * are satisfied. This gives us better new start value 2485 * for the congestion avoidance for new connections. 2486 * ssthresh is only set if packet loss occurred on a session. 2487 * 2488 * XXXRW: 'so' may be NULL here, and/or socket buffer may be 2489 * being torn down. Ideally this code would not use 'so'. 2490 */ 2491 ssthresh = tp->snd_ssthresh; 2492 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) { 2493 /* 2494 * convert the limit from user data bytes to 2495 * packets then to packet data bytes. 2496 */ 2497 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg; 2498 if (ssthresh < 2) 2499 ssthresh = 2; 2500 ssthresh *= (tp->t_maxseg + 2501 #ifdef INET6 2502 (isipv6 ? sizeof (struct ip6_hdr) + 2503 sizeof (struct tcphdr) : 2504 #endif 2505 sizeof (struct tcpiphdr) 2506 #ifdef INET6 2507 ) 2508 #endif 2509 ); 2510 } else 2511 ssthresh = 0; 2512 metrics.rmx_ssthresh = ssthresh; 2513 2514 metrics.rmx_rtt = tp->t_srtt; 2515 metrics.rmx_rttvar = tp->t_rttvar; 2516 metrics.rmx_cwnd = tp->snd_cwnd; 2517 metrics.rmx_sendpipe = 0; 2518 metrics.rmx_recvpipe = 0; 2519 2520 tcp_hc_update(&inp->inp_inc, &metrics); 2521 } 2522 refcount_release(&tp->t_fb->tfb_refcnt); 2523 tp->t_inpcb = NULL; 2524 uma_zfree(V_tcpcb_zone, tp); 2525 released = in_pcbrele_wlocked(inp); 2526 KASSERT(!released, ("%s: inp %p should not have been released " 2527 "here", __func__, inp)); 2528 } 2529 } 2530 2531 void 2532 tcp_timer_discard(void *ptp) 2533 { 2534 struct inpcb *inp; 2535 struct tcpcb *tp; 2536 struct epoch_tracker et; 2537 2538 tp = (struct tcpcb *)ptp; 2539 CURVNET_SET(tp->t_vnet); 2540 NET_EPOCH_ENTER(et); 2541 inp = tp->t_inpcb; 2542 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", 2543 __func__, tp)); 2544 INP_WLOCK(inp); 2545 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) != 0, 2546 ("%s: tcpcb has to be stopped here", __func__)); 2547 tp->t_timers->tt_draincnt--; 2548 if (tp->t_timers->tt_draincnt == 0) { 2549 /* We own the last reference on this tcpcb, let's free it. */ 2550 #ifdef TCP_BLACKBOX 2551 tcp_log_tcpcbfini(tp); 2552 #endif 2553 TCPSTATES_DEC(tp->t_state); 2554 if (tp->t_fb->tfb_tcp_fb_fini) 2555 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 2556 refcount_release(&tp->t_fb->tfb_refcnt); 2557 tp->t_inpcb = NULL; 2558 uma_zfree(V_tcpcb_zone, tp); 2559 if (in_pcbrele_wlocked(inp)) { 2560 NET_EPOCH_EXIT(et); 2561 CURVNET_RESTORE(); 2562 return; 2563 } 2564 } 2565 INP_WUNLOCK(inp); 2566 NET_EPOCH_EXIT(et); 2567 CURVNET_RESTORE(); 2568 } 2569 2570 /* 2571 * Attempt to close a TCP control block, marking it as dropped, and freeing 2572 * the socket if we hold the only reference. 2573 */ 2574 struct tcpcb * 2575 tcp_close(struct tcpcb *tp) 2576 { 2577 struct inpcb *inp = tp->t_inpcb; 2578 struct socket *so; 2579 2580 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 2581 INP_WLOCK_ASSERT(inp); 2582 2583 #ifdef TCP_OFFLOAD 2584 if (tp->t_state == TCPS_LISTEN) 2585 tcp_offload_listen_stop(tp); 2586 #endif 2587 /* 2588 * This releases the TFO pending counter resource for TFO listen 2589 * sockets as well as passively-created TFO sockets that transition 2590 * from SYN_RECEIVED to CLOSED. 2591 */ 2592 if (tp->t_tfo_pending) { 2593 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2594 tp->t_tfo_pending = NULL; 2595 } 2596 in_pcbdrop(inp); 2597 TCPSTAT_INC(tcps_closed); 2598 if (tp->t_state != TCPS_CLOSED) 2599 tcp_state_change(tp, TCPS_CLOSED); 2600 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL")); 2601 so = inp->inp_socket; 2602 soisdisconnected(so); 2603 if (inp->inp_flags & INP_SOCKREF) { 2604 KASSERT(so->so_state & SS_PROTOREF, 2605 ("tcp_close: !SS_PROTOREF")); 2606 inp->inp_flags &= ~INP_SOCKREF; 2607 INP_WUNLOCK(inp); 2608 SOCK_LOCK(so); 2609 so->so_state &= ~SS_PROTOREF; 2610 sofree(so); 2611 return (NULL); 2612 } 2613 return (tp); 2614 } 2615 2616 void 2617 tcp_drain(void) 2618 { 2619 VNET_ITERATOR_DECL(vnet_iter); 2620 2621 if (!do_tcpdrain) 2622 return; 2623 2624 VNET_LIST_RLOCK_NOSLEEP(); 2625 VNET_FOREACH(vnet_iter) { 2626 CURVNET_SET(vnet_iter); 2627 struct inpcb *inpb; 2628 struct tcpcb *tcpb; 2629 2630 /* 2631 * Walk the tcpbs, if existing, and flush the reassembly queue, 2632 * if there is one... 2633 * XXX: The "Net/3" implementation doesn't imply that the TCP 2634 * reassembly queue should be flushed, but in a situation 2635 * where we're really low on mbufs, this is potentially 2636 * useful. 2637 */ 2638 INP_INFO_WLOCK(&V_tcbinfo); 2639 CK_LIST_FOREACH(inpb, V_tcbinfo.ipi_listhead, inp_list) { 2640 INP_WLOCK(inpb); 2641 if (inpb->inp_flags & INP_TIMEWAIT) { 2642 INP_WUNLOCK(inpb); 2643 continue; 2644 } 2645 if ((tcpb = intotcpcb(inpb)) != NULL) { 2646 tcp_reass_flush(tcpb); 2647 tcp_clean_sackreport(tcpb); 2648 #ifdef TCP_BLACKBOX 2649 tcp_log_drain(tcpb); 2650 #endif 2651 #ifdef TCPPCAP 2652 if (tcp_pcap_aggressive_free) { 2653 /* Free the TCP PCAP queues. */ 2654 tcp_pcap_drain(&(tcpb->t_inpkts)); 2655 tcp_pcap_drain(&(tcpb->t_outpkts)); 2656 } 2657 #endif 2658 } 2659 INP_WUNLOCK(inpb); 2660 } 2661 INP_INFO_WUNLOCK(&V_tcbinfo); 2662 CURVNET_RESTORE(); 2663 } 2664 VNET_LIST_RUNLOCK_NOSLEEP(); 2665 } 2666 2667 /* 2668 * Notify a tcp user of an asynchronous error; 2669 * store error as soft error, but wake up user 2670 * (for now, won't do anything until can select for soft error). 2671 * 2672 * Do not wake up user since there currently is no mechanism for 2673 * reporting soft errors (yet - a kqueue filter may be added). 2674 */ 2675 static struct inpcb * 2676 tcp_notify(struct inpcb *inp, int error) 2677 { 2678 struct tcpcb *tp; 2679 2680 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 2681 INP_WLOCK_ASSERT(inp); 2682 2683 if ((inp->inp_flags & INP_TIMEWAIT) || 2684 (inp->inp_flags & INP_DROPPED)) 2685 return (inp); 2686 2687 tp = intotcpcb(inp); 2688 KASSERT(tp != NULL, ("tcp_notify: tp == NULL")); 2689 2690 /* 2691 * Ignore some errors if we are hooked up. 2692 * If connection hasn't completed, has retransmitted several times, 2693 * and receives a second error, give up now. This is better 2694 * than waiting a long time to establish a connection that 2695 * can never complete. 2696 */ 2697 if (tp->t_state == TCPS_ESTABLISHED && 2698 (error == EHOSTUNREACH || error == ENETUNREACH || 2699 error == EHOSTDOWN)) { 2700 if (inp->inp_route.ro_nh) { 2701 NH_FREE(inp->inp_route.ro_nh); 2702 inp->inp_route.ro_nh = (struct nhop_object *)NULL; 2703 } 2704 return (inp); 2705 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 2706 tp->t_softerror) { 2707 tp = tcp_drop(tp, error); 2708 if (tp != NULL) 2709 return (inp); 2710 else 2711 return (NULL); 2712 } else { 2713 tp->t_softerror = error; 2714 return (inp); 2715 } 2716 #if 0 2717 wakeup( &so->so_timeo); 2718 sorwakeup(so); 2719 sowwakeup(so); 2720 #endif 2721 } 2722 2723 static int 2724 tcp_pcblist(SYSCTL_HANDLER_ARGS) 2725 { 2726 struct epoch_tracker et; 2727 struct inpcb *inp; 2728 struct xinpgen xig; 2729 int error; 2730 2731 if (req->newptr != NULL) 2732 return (EPERM); 2733 2734 if (req->oldptr == NULL) { 2735 int n; 2736 2737 n = V_tcbinfo.ipi_count + 2738 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 2739 n += imax(n / 8, 10); 2740 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb); 2741 return (0); 2742 } 2743 2744 if ((error = sysctl_wire_old_buffer(req, 0)) != 0) 2745 return (error); 2746 2747 bzero(&xig, sizeof(xig)); 2748 xig.xig_len = sizeof xig; 2749 xig.xig_count = V_tcbinfo.ipi_count + 2750 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 2751 xig.xig_gen = V_tcbinfo.ipi_gencnt; 2752 xig.xig_sogen = so_gencnt; 2753 error = SYSCTL_OUT(req, &xig, sizeof xig); 2754 if (error) 2755 return (error); 2756 2757 error = syncache_pcblist(req); 2758 if (error) 2759 return (error); 2760 2761 NET_EPOCH_ENTER(et); 2762 for (inp = CK_LIST_FIRST(V_tcbinfo.ipi_listhead); 2763 inp != NULL; 2764 inp = CK_LIST_NEXT(inp, inp_list)) { 2765 INP_RLOCK(inp); 2766 if (inp->inp_gencnt <= xig.xig_gen) { 2767 int crerr; 2768 2769 /* 2770 * XXX: This use of cr_cansee(), introduced with 2771 * TCP state changes, is not quite right, but for 2772 * now, better than nothing. 2773 */ 2774 if (inp->inp_flags & INP_TIMEWAIT) { 2775 if (intotw(inp) != NULL) 2776 crerr = cr_cansee(req->td->td_ucred, 2777 intotw(inp)->tw_cred); 2778 else 2779 crerr = EINVAL; /* Skip this inp. */ 2780 } else 2781 crerr = cr_canseeinpcb(req->td->td_ucred, inp); 2782 if (crerr == 0) { 2783 struct xtcpcb xt; 2784 2785 tcp_inptoxtp(inp, &xt); 2786 INP_RUNLOCK(inp); 2787 error = SYSCTL_OUT(req, &xt, sizeof xt); 2788 if (error) 2789 break; 2790 else 2791 continue; 2792 } 2793 } 2794 INP_RUNLOCK(inp); 2795 } 2796 NET_EPOCH_EXIT(et); 2797 2798 if (!error) { 2799 /* 2800 * Give the user an updated idea of our state. 2801 * If the generation differs from what we told 2802 * her before, she knows that something happened 2803 * while we were processing this request, and it 2804 * might be necessary to retry. 2805 */ 2806 xig.xig_gen = V_tcbinfo.ipi_gencnt; 2807 xig.xig_sogen = so_gencnt; 2808 xig.xig_count = V_tcbinfo.ipi_count + 2809 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 2810 error = SYSCTL_OUT(req, &xig, sizeof xig); 2811 } 2812 2813 return (error); 2814 } 2815 2816 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, 2817 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2818 NULL, 0, tcp_pcblist, "S,xtcpcb", 2819 "List of active TCP connections"); 2820 2821 #ifdef INET 2822 static int 2823 tcp_getcred(SYSCTL_HANDLER_ARGS) 2824 { 2825 struct xucred xuc; 2826 struct sockaddr_in addrs[2]; 2827 struct epoch_tracker et; 2828 struct inpcb *inp; 2829 int error; 2830 2831 error = priv_check(req->td, PRIV_NETINET_GETCRED); 2832 if (error) 2833 return (error); 2834 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 2835 if (error) 2836 return (error); 2837 NET_EPOCH_ENTER(et); 2838 inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 2839 addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL); 2840 NET_EPOCH_EXIT(et); 2841 if (inp != NULL) { 2842 if (inp->inp_socket == NULL) 2843 error = ENOENT; 2844 if (error == 0) 2845 error = cr_canseeinpcb(req->td->td_ucred, inp); 2846 if (error == 0) 2847 cru2x(inp->inp_cred, &xuc); 2848 INP_RUNLOCK(inp); 2849 } else 2850 error = ENOENT; 2851 if (error == 0) 2852 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 2853 return (error); 2854 } 2855 2856 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, 2857 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT, 2858 0, 0, tcp_getcred, "S,xucred", 2859 "Get the xucred of a TCP connection"); 2860 #endif /* INET */ 2861 2862 #ifdef INET6 2863 static int 2864 tcp6_getcred(SYSCTL_HANDLER_ARGS) 2865 { 2866 struct epoch_tracker et; 2867 struct xucred xuc; 2868 struct sockaddr_in6 addrs[2]; 2869 struct inpcb *inp; 2870 int error; 2871 #ifdef INET 2872 int mapped = 0; 2873 #endif 2874 2875 error = priv_check(req->td, PRIV_NETINET_GETCRED); 2876 if (error) 2877 return (error); 2878 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 2879 if (error) 2880 return (error); 2881 if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 || 2882 (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) { 2883 return (error); 2884 } 2885 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 2886 #ifdef INET 2887 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 2888 mapped = 1; 2889 else 2890 #endif 2891 return (EINVAL); 2892 } 2893 2894 NET_EPOCH_ENTER(et); 2895 #ifdef INET 2896 if (mapped == 1) 2897 inp = in_pcblookup(&V_tcbinfo, 2898 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 2899 addrs[1].sin6_port, 2900 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 2901 addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL); 2902 else 2903 #endif 2904 inp = in6_pcblookup(&V_tcbinfo, 2905 &addrs[1].sin6_addr, addrs[1].sin6_port, 2906 &addrs[0].sin6_addr, addrs[0].sin6_port, 2907 INPLOOKUP_RLOCKPCB, NULL); 2908 NET_EPOCH_EXIT(et); 2909 if (inp != NULL) { 2910 if (inp->inp_socket == NULL) 2911 error = ENOENT; 2912 if (error == 0) 2913 error = cr_canseeinpcb(req->td->td_ucred, inp); 2914 if (error == 0) 2915 cru2x(inp->inp_cred, &xuc); 2916 INP_RUNLOCK(inp); 2917 } else 2918 error = ENOENT; 2919 if (error == 0) 2920 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 2921 return (error); 2922 } 2923 2924 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, 2925 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT, 2926 0, 0, tcp6_getcred, "S,xucred", 2927 "Get the xucred of a TCP6 connection"); 2928 #endif /* INET6 */ 2929 2930 #ifdef INET 2931 /* Path MTU to try next when a fragmentation-needed message is received. */ 2932 static inline int 2933 tcp_next_pmtu(const struct icmp *icp, const struct ip *ip) 2934 { 2935 int mtu = ntohs(icp->icmp_nextmtu); 2936 2937 /* If no alternative MTU was proposed, try the next smaller one. */ 2938 if (!mtu) 2939 mtu = ip_next_mtu(ntohs(ip->ip_len), 1); 2940 if (mtu < V_tcp_minmss + sizeof(struct tcpiphdr)) 2941 mtu = V_tcp_minmss + sizeof(struct tcpiphdr); 2942 2943 return (mtu); 2944 } 2945 2946 static void 2947 tcp_ctlinput_with_port(int cmd, struct sockaddr *sa, void *vip, uint16_t port) 2948 { 2949 struct ip *ip = vip; 2950 struct tcphdr *th; 2951 struct in_addr faddr; 2952 struct inpcb *inp; 2953 struct tcpcb *tp; 2954 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 2955 struct icmp *icp; 2956 struct in_conninfo inc; 2957 tcp_seq icmp_tcp_seq; 2958 int mtu; 2959 2960 faddr = ((struct sockaddr_in *)sa)->sin_addr; 2961 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 2962 return; 2963 2964 if (cmd == PRC_MSGSIZE) 2965 notify = tcp_mtudisc_notify; 2966 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 2967 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL || 2968 cmd == PRC_TIMXCEED_INTRANS) && ip) 2969 notify = tcp_drop_syn_sent; 2970 2971 /* 2972 * Hostdead is ugly because it goes linearly through all PCBs. 2973 * XXX: We never get this from ICMP, otherwise it makes an 2974 * excellent DoS attack on machines with many connections. 2975 */ 2976 else if (cmd == PRC_HOSTDEAD) 2977 ip = NULL; 2978 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 2979 return; 2980 2981 if (ip == NULL) { 2982 in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify); 2983 return; 2984 } 2985 2986 icp = (struct icmp *)((caddr_t)ip - offsetof(struct icmp, icmp_ip)); 2987 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2988 inp = in_pcblookup(&V_tcbinfo, faddr, th->th_dport, ip->ip_src, 2989 th->th_sport, INPLOOKUP_WLOCKPCB, NULL); 2990 if (inp != NULL && PRC_IS_REDIRECT(cmd)) { 2991 /* signal EHOSTDOWN, as it flushes the cached route */ 2992 inp = (*notify)(inp, EHOSTDOWN); 2993 goto out; 2994 } 2995 icmp_tcp_seq = th->th_seq; 2996 if (inp != NULL) { 2997 if (!(inp->inp_flags & INP_TIMEWAIT) && 2998 !(inp->inp_flags & INP_DROPPED) && 2999 !(inp->inp_socket == NULL)) { 3000 tp = intotcpcb(inp); 3001 #ifdef TCP_OFFLOAD 3002 if (tp->t_flags & TF_TOE && cmd == PRC_MSGSIZE) { 3003 /* 3004 * MTU discovery for offloaded connections. Let 3005 * the TOE driver verify seq# and process it. 3006 */ 3007 mtu = tcp_next_pmtu(icp, ip); 3008 tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu); 3009 goto out; 3010 } 3011 #endif 3012 if (tp->t_port != port) { 3013 goto out; 3014 } 3015 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) && 3016 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) { 3017 if (cmd == PRC_MSGSIZE) { 3018 /* 3019 * MTU discovery: we got a needfrag and 3020 * will potentially try a lower MTU. 3021 */ 3022 mtu = tcp_next_pmtu(icp, ip); 3023 3024 /* 3025 * Only process the offered MTU if it 3026 * is smaller than the current one. 3027 */ 3028 if (mtu < tp->t_maxseg + 3029 sizeof(struct tcpiphdr)) { 3030 bzero(&inc, sizeof(inc)); 3031 inc.inc_faddr = faddr; 3032 inc.inc_fibnum = 3033 inp->inp_inc.inc_fibnum; 3034 tcp_hc_updatemtu(&inc, mtu); 3035 tcp_mtudisc(inp, mtu); 3036 } 3037 } else 3038 inp = (*notify)(inp, 3039 inetctlerrmap[cmd]); 3040 } 3041 } 3042 } else { 3043 bzero(&inc, sizeof(inc)); 3044 inc.inc_fport = th->th_dport; 3045 inc.inc_lport = th->th_sport; 3046 inc.inc_faddr = faddr; 3047 inc.inc_laddr = ip->ip_src; 3048 syncache_unreach(&inc, icmp_tcp_seq, port); 3049 } 3050 out: 3051 if (inp != NULL) 3052 INP_WUNLOCK(inp); 3053 } 3054 3055 void 3056 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 3057 { 3058 tcp_ctlinput_with_port(cmd, sa, vip, htons(0)); 3059 } 3060 3061 void 3062 tcp_ctlinput_viaudp(int cmd, struct sockaddr *sa, void *vip, void *unused) 3063 { 3064 /* Its a tunneled TCP over UDP icmp */ 3065 struct ip *outer_ip, *inner_ip; 3066 struct icmp *icmp; 3067 struct udphdr *udp; 3068 struct tcphdr *th, ttemp; 3069 int i_hlen, o_len; 3070 uint16_t port; 3071 3072 inner_ip = (struct ip *)vip; 3073 icmp = (struct icmp *)((caddr_t)inner_ip - 3074 (sizeof(struct icmp) - sizeof(struct ip))); 3075 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 3076 i_hlen = inner_ip->ip_hl << 2; 3077 o_len = ntohs(outer_ip->ip_len); 3078 if (o_len < 3079 (sizeof(struct ip) + 8 + i_hlen + sizeof(struct udphdr) + offsetof(struct tcphdr, th_ack))) { 3080 /* Not enough data present */ 3081 return; 3082 } 3083 /* Ok lets strip out the inner udphdr header by copying up on top of it the tcp hdr */ 3084 udp = (struct udphdr *)(((caddr_t)inner_ip) + i_hlen); 3085 if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) { 3086 return; 3087 } 3088 port = udp->uh_dport; 3089 th = (struct tcphdr *)(udp + 1); 3090 memcpy(&ttemp, th, sizeof(struct tcphdr)); 3091 memcpy(udp, &ttemp, sizeof(struct tcphdr)); 3092 /* Now adjust down the size of the outer IP header */ 3093 o_len -= sizeof(struct udphdr); 3094 outer_ip->ip_len = htons(o_len); 3095 /* Now call in to the normal handling code */ 3096 tcp_ctlinput_with_port(cmd, sa, vip, port); 3097 } 3098 #endif /* INET */ 3099 3100 #ifdef INET6 3101 static inline int 3102 tcp6_next_pmtu(const struct icmp6_hdr *icmp6) 3103 { 3104 int mtu = ntohl(icmp6->icmp6_mtu); 3105 3106 /* 3107 * If no alternative MTU was proposed, or the proposed MTU was too 3108 * small, set to the min. 3109 */ 3110 if (mtu < IPV6_MMTU) 3111 mtu = IPV6_MMTU - 8; /* XXXNP: what is the adjustment for? */ 3112 return (mtu); 3113 } 3114 3115 static void 3116 tcp6_ctlinput_with_port(int cmd, struct sockaddr *sa, void *d, uint16_t port) 3117 { 3118 struct in6_addr *dst; 3119 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 3120 struct ip6_hdr *ip6; 3121 struct mbuf *m; 3122 struct inpcb *inp; 3123 struct tcpcb *tp; 3124 struct icmp6_hdr *icmp6; 3125 struct ip6ctlparam *ip6cp = NULL; 3126 const struct sockaddr_in6 *sa6_src = NULL; 3127 struct in_conninfo inc; 3128 struct tcp_ports { 3129 uint16_t th_sport; 3130 uint16_t th_dport; 3131 } t_ports; 3132 tcp_seq icmp_tcp_seq; 3133 unsigned int mtu; 3134 unsigned int off; 3135 3136 if (sa->sa_family != AF_INET6 || 3137 sa->sa_len != sizeof(struct sockaddr_in6)) 3138 return; 3139 3140 /* if the parameter is from icmp6, decode it. */ 3141 if (d != NULL) { 3142 ip6cp = (struct ip6ctlparam *)d; 3143 icmp6 = ip6cp->ip6c_icmp6; 3144 m = ip6cp->ip6c_m; 3145 ip6 = ip6cp->ip6c_ip6; 3146 off = ip6cp->ip6c_off; 3147 sa6_src = ip6cp->ip6c_src; 3148 dst = ip6cp->ip6c_finaldst; 3149 } else { 3150 m = NULL; 3151 ip6 = NULL; 3152 off = 0; /* fool gcc */ 3153 sa6_src = &sa6_any; 3154 dst = NULL; 3155 } 3156 3157 if (cmd == PRC_MSGSIZE) 3158 notify = tcp_mtudisc_notify; 3159 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 3160 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL || 3161 cmd == PRC_TIMXCEED_INTRANS) && ip6 != NULL) 3162 notify = tcp_drop_syn_sent; 3163 3164 /* 3165 * Hostdead is ugly because it goes linearly through all PCBs. 3166 * XXX: We never get this from ICMP, otherwise it makes an 3167 * excellent DoS attack on machines with many connections. 3168 */ 3169 else if (cmd == PRC_HOSTDEAD) 3170 ip6 = NULL; 3171 else if ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0) 3172 return; 3173 3174 if (ip6 == NULL) { 3175 in6_pcbnotify(&V_tcbinfo, sa, 0, 3176 (const struct sockaddr *)sa6_src, 3177 0, cmd, NULL, notify); 3178 return; 3179 } 3180 3181 /* Check if we can safely get the ports from the tcp hdr */ 3182 if (m == NULL || 3183 (m->m_pkthdr.len < 3184 (int32_t) (off + sizeof(struct tcp_ports)))) { 3185 return; 3186 } 3187 bzero(&t_ports, sizeof(struct tcp_ports)); 3188 m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports); 3189 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_dst, t_ports.th_dport, 3190 &ip6->ip6_src, t_ports.th_sport, INPLOOKUP_WLOCKPCB, NULL); 3191 if (inp != NULL && PRC_IS_REDIRECT(cmd)) { 3192 /* signal EHOSTDOWN, as it flushes the cached route */ 3193 inp = (*notify)(inp, EHOSTDOWN); 3194 goto out; 3195 } 3196 off += sizeof(struct tcp_ports); 3197 if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) { 3198 goto out; 3199 } 3200 m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq); 3201 if (inp != NULL) { 3202 if (!(inp->inp_flags & INP_TIMEWAIT) && 3203 !(inp->inp_flags & INP_DROPPED) && 3204 !(inp->inp_socket == NULL)) { 3205 tp = intotcpcb(inp); 3206 #ifdef TCP_OFFLOAD 3207 if (tp->t_flags & TF_TOE && cmd == PRC_MSGSIZE) { 3208 /* MTU discovery for offloaded connections. */ 3209 mtu = tcp6_next_pmtu(icmp6); 3210 tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu); 3211 goto out; 3212 } 3213 #endif 3214 if (tp->t_port != port) { 3215 goto out; 3216 } 3217 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) && 3218 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) { 3219 if (cmd == PRC_MSGSIZE) { 3220 /* 3221 * MTU discovery: 3222 * If we got a needfrag set the MTU 3223 * in the route to the suggested new 3224 * value (if given) and then notify. 3225 */ 3226 mtu = tcp6_next_pmtu(icmp6); 3227 3228 bzero(&inc, sizeof(inc)); 3229 inc.inc_fibnum = M_GETFIB(m); 3230 inc.inc_flags |= INC_ISIPV6; 3231 inc.inc6_faddr = *dst; 3232 if (in6_setscope(&inc.inc6_faddr, 3233 m->m_pkthdr.rcvif, NULL)) 3234 goto out; 3235 /* 3236 * Only process the offered MTU if it 3237 * is smaller than the current one. 3238 */ 3239 if (mtu < tp->t_maxseg + 3240 sizeof (struct tcphdr) + 3241 sizeof (struct ip6_hdr)) { 3242 tcp_hc_updatemtu(&inc, mtu); 3243 tcp_mtudisc(inp, mtu); 3244 ICMP6STAT_INC(icp6s_pmtuchg); 3245 } 3246 } else 3247 inp = (*notify)(inp, 3248 inet6ctlerrmap[cmd]); 3249 } 3250 } 3251 } else { 3252 bzero(&inc, sizeof(inc)); 3253 inc.inc_fibnum = M_GETFIB(m); 3254 inc.inc_flags |= INC_ISIPV6; 3255 inc.inc_fport = t_ports.th_dport; 3256 inc.inc_lport = t_ports.th_sport; 3257 inc.inc6_faddr = *dst; 3258 inc.inc6_laddr = ip6->ip6_src; 3259 syncache_unreach(&inc, icmp_tcp_seq, port); 3260 } 3261 out: 3262 if (inp != NULL) 3263 INP_WUNLOCK(inp); 3264 } 3265 3266 void 3267 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d) 3268 { 3269 tcp6_ctlinput_with_port(cmd, sa, d, htons(0)); 3270 } 3271 3272 void 3273 tcp6_ctlinput_viaudp(int cmd, struct sockaddr *sa, void *d, void *unused) 3274 { 3275 struct ip6ctlparam *ip6cp; 3276 struct mbuf *m; 3277 struct udphdr *udp; 3278 uint16_t port; 3279 3280 ip6cp = (struct ip6ctlparam *)d; 3281 m = m_pulldown(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(struct udphdr), NULL); 3282 if (m == NULL) { 3283 return; 3284 } 3285 udp = mtod(m, struct udphdr *); 3286 if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) { 3287 return; 3288 } 3289 port = udp->uh_dport; 3290 m_adj(m, sizeof(struct udphdr)); 3291 if ((m->m_flags & M_PKTHDR) == 0) { 3292 ip6cp->ip6c_m->m_pkthdr.len -= sizeof(struct udphdr); 3293 } 3294 /* Now call in to the normal handling code */ 3295 tcp6_ctlinput_with_port(cmd, sa, d, port); 3296 } 3297 3298 #endif /* INET6 */ 3299 3300 static uint32_t 3301 tcp_keyed_hash(struct in_conninfo *inc, u_char *key, u_int len) 3302 { 3303 SIPHASH_CTX ctx; 3304 uint32_t hash[2]; 3305 3306 KASSERT(len >= SIPHASH_KEY_LENGTH, 3307 ("%s: keylen %u too short ", __func__, len)); 3308 SipHash24_Init(&ctx); 3309 SipHash_SetKey(&ctx, (uint8_t *)key); 3310 SipHash_Update(&ctx, &inc->inc_fport, sizeof(uint16_t)); 3311 SipHash_Update(&ctx, &inc->inc_lport, sizeof(uint16_t)); 3312 switch (inc->inc_flags & INC_ISIPV6) { 3313 #ifdef INET 3314 case 0: 3315 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(struct in_addr)); 3316 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(struct in_addr)); 3317 break; 3318 #endif 3319 #ifdef INET6 3320 case INC_ISIPV6: 3321 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(struct in6_addr)); 3322 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(struct in6_addr)); 3323 break; 3324 #endif 3325 } 3326 SipHash_Final((uint8_t *)hash, &ctx); 3327 3328 return (hash[0] ^ hash[1]); 3329 } 3330 3331 uint32_t 3332 tcp_new_ts_offset(struct in_conninfo *inc) 3333 { 3334 struct in_conninfo inc_store, *local_inc; 3335 3336 if (!V_tcp_ts_offset_per_conn) { 3337 memcpy(&inc_store, inc, sizeof(struct in_conninfo)); 3338 inc_store.inc_lport = 0; 3339 inc_store.inc_fport = 0; 3340 local_inc = &inc_store; 3341 } else { 3342 local_inc = inc; 3343 } 3344 return (tcp_keyed_hash(local_inc, V_ts_offset_secret, 3345 sizeof(V_ts_offset_secret))); 3346 } 3347 3348 /* 3349 * Following is where TCP initial sequence number generation occurs. 3350 * 3351 * There are two places where we must use initial sequence numbers: 3352 * 1. In SYN-ACK packets. 3353 * 2. In SYN packets. 3354 * 3355 * All ISNs for SYN-ACK packets are generated by the syncache. See 3356 * tcp_syncache.c for details. 3357 * 3358 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 3359 * depends on this property. In addition, these ISNs should be 3360 * unguessable so as to prevent connection hijacking. To satisfy 3361 * the requirements of this situation, the algorithm outlined in 3362 * RFC 1948 is used, with only small modifications. 3363 * 3364 * Implementation details: 3365 * 3366 * Time is based off the system timer, and is corrected so that it 3367 * increases by one megabyte per second. This allows for proper 3368 * recycling on high speed LANs while still leaving over an hour 3369 * before rollover. 3370 * 3371 * As reading the *exact* system time is too expensive to be done 3372 * whenever setting up a TCP connection, we increment the time 3373 * offset in two ways. First, a small random positive increment 3374 * is added to isn_offset for each connection that is set up. 3375 * Second, the function tcp_isn_tick fires once per clock tick 3376 * and increments isn_offset as necessary so that sequence numbers 3377 * are incremented at approximately ISN_BYTES_PER_SECOND. The 3378 * random positive increments serve only to ensure that the same 3379 * exact sequence number is never sent out twice (as could otherwise 3380 * happen when a port is recycled in less than the system tick 3381 * interval.) 3382 * 3383 * net.inet.tcp.isn_reseed_interval controls the number of seconds 3384 * between seeding of isn_secret. This is normally set to zero, 3385 * as reseeding should not be necessary. 3386 * 3387 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset, 3388 * isn_offset_old, and isn_ctx is performed using the ISN lock. In 3389 * general, this means holding an exclusive (write) lock. 3390 */ 3391 3392 #define ISN_BYTES_PER_SECOND 1048576 3393 #define ISN_STATIC_INCREMENT 4096 3394 #define ISN_RANDOM_INCREMENT (4096 - 1) 3395 #define ISN_SECRET_LENGTH SIPHASH_KEY_LENGTH 3396 3397 VNET_DEFINE_STATIC(u_char, isn_secret[ISN_SECRET_LENGTH]); 3398 VNET_DEFINE_STATIC(int, isn_last); 3399 VNET_DEFINE_STATIC(int, isn_last_reseed); 3400 VNET_DEFINE_STATIC(u_int32_t, isn_offset); 3401 VNET_DEFINE_STATIC(u_int32_t, isn_offset_old); 3402 3403 #define V_isn_secret VNET(isn_secret) 3404 #define V_isn_last VNET(isn_last) 3405 #define V_isn_last_reseed VNET(isn_last_reseed) 3406 #define V_isn_offset VNET(isn_offset) 3407 #define V_isn_offset_old VNET(isn_offset_old) 3408 3409 tcp_seq 3410 tcp_new_isn(struct in_conninfo *inc) 3411 { 3412 tcp_seq new_isn; 3413 u_int32_t projected_offset; 3414 3415 ISN_LOCK(); 3416 /* Seed if this is the first use, reseed if requested. */ 3417 if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) && 3418 (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz) 3419 < (u_int)ticks))) { 3420 arc4rand(&V_isn_secret, sizeof(V_isn_secret), 0); 3421 V_isn_last_reseed = ticks; 3422 } 3423 3424 /* Compute the hash and return the ISN. */ 3425 new_isn = (tcp_seq)tcp_keyed_hash(inc, V_isn_secret, 3426 sizeof(V_isn_secret)); 3427 V_isn_offset += ISN_STATIC_INCREMENT + 3428 (arc4random() & ISN_RANDOM_INCREMENT); 3429 if (ticks != V_isn_last) { 3430 projected_offset = V_isn_offset_old + 3431 ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last); 3432 if (SEQ_GT(projected_offset, V_isn_offset)) 3433 V_isn_offset = projected_offset; 3434 V_isn_offset_old = V_isn_offset; 3435 V_isn_last = ticks; 3436 } 3437 new_isn += V_isn_offset; 3438 ISN_UNLOCK(); 3439 return (new_isn); 3440 } 3441 3442 /* 3443 * When a specific ICMP unreachable message is received and the 3444 * connection state is SYN-SENT, drop the connection. This behavior 3445 * is controlled by the icmp_may_rst sysctl. 3446 */ 3447 struct inpcb * 3448 tcp_drop_syn_sent(struct inpcb *inp, int errno) 3449 { 3450 struct tcpcb *tp; 3451 3452 NET_EPOCH_ASSERT(); 3453 INP_WLOCK_ASSERT(inp); 3454 3455 if ((inp->inp_flags & INP_TIMEWAIT) || 3456 (inp->inp_flags & INP_DROPPED)) 3457 return (inp); 3458 3459 tp = intotcpcb(inp); 3460 if (tp->t_state != TCPS_SYN_SENT) 3461 return (inp); 3462 3463 if (IS_FASTOPEN(tp->t_flags)) 3464 tcp_fastopen_disable_path(tp); 3465 3466 tp = tcp_drop(tp, errno); 3467 if (tp != NULL) 3468 return (inp); 3469 else 3470 return (NULL); 3471 } 3472 3473 /* 3474 * When `need fragmentation' ICMP is received, update our idea of the MSS 3475 * based on the new value. Also nudge TCP to send something, since we 3476 * know the packet we just sent was dropped. 3477 * This duplicates some code in the tcp_mss() function in tcp_input.c. 3478 */ 3479 static struct inpcb * 3480 tcp_mtudisc_notify(struct inpcb *inp, int error) 3481 { 3482 3483 tcp_mtudisc(inp, -1); 3484 return (inp); 3485 } 3486 3487 static void 3488 tcp_mtudisc(struct inpcb *inp, int mtuoffer) 3489 { 3490 struct tcpcb *tp; 3491 struct socket *so; 3492 3493 INP_WLOCK_ASSERT(inp); 3494 if ((inp->inp_flags & INP_TIMEWAIT) || 3495 (inp->inp_flags & INP_DROPPED)) 3496 return; 3497 3498 tp = intotcpcb(inp); 3499 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL")); 3500 3501 tcp_mss_update(tp, -1, mtuoffer, NULL, NULL); 3502 3503 so = inp->inp_socket; 3504 SOCKBUF_LOCK(&so->so_snd); 3505 /* If the mss is larger than the socket buffer, decrease the mss. */ 3506 if (so->so_snd.sb_hiwat < tp->t_maxseg) 3507 tp->t_maxseg = so->so_snd.sb_hiwat; 3508 SOCKBUF_UNLOCK(&so->so_snd); 3509 3510 TCPSTAT_INC(tcps_mturesent); 3511 tp->t_rtttime = 0; 3512 tp->snd_nxt = tp->snd_una; 3513 tcp_free_sackholes(tp); 3514 tp->snd_recover = tp->snd_max; 3515 if (tp->t_flags & TF_SACK_PERMIT) 3516 EXIT_FASTRECOVERY(tp->t_flags); 3517 if (tp->t_fb->tfb_tcp_mtu_chg != NULL) { 3518 /* 3519 * Conceptually the snd_nxt setting 3520 * and freeing sack holes should 3521 * be done by the default stacks 3522 * own tfb_tcp_mtu_chg(). 3523 */ 3524 tp->t_fb->tfb_tcp_mtu_chg(tp); 3525 } 3526 tp->t_fb->tfb_tcp_output(tp); 3527 } 3528 3529 #ifdef INET 3530 /* 3531 * Look-up the routing entry to the peer of this inpcb. If no route 3532 * is found and it cannot be allocated, then return 0. This routine 3533 * is called by TCP routines that access the rmx structure and by 3534 * tcp_mss_update to get the peer/interface MTU. 3535 */ 3536 uint32_t 3537 tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap) 3538 { 3539 struct nhop_object *nh; 3540 struct ifnet *ifp; 3541 uint32_t maxmtu = 0; 3542 3543 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer")); 3544 3545 if (inc->inc_faddr.s_addr != INADDR_ANY) { 3546 nh = fib4_lookup(inc->inc_fibnum, inc->inc_faddr, 0, NHR_NONE, 0); 3547 if (nh == NULL) 3548 return (0); 3549 3550 ifp = nh->nh_ifp; 3551 maxmtu = nh->nh_mtu; 3552 3553 /* Report additional interface capabilities. */ 3554 if (cap != NULL) { 3555 if (ifp->if_capenable & IFCAP_TSO4 && 3556 ifp->if_hwassist & CSUM_TSO) { 3557 cap->ifcap |= CSUM_TSO; 3558 cap->tsomax = ifp->if_hw_tsomax; 3559 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; 3560 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; 3561 } 3562 } 3563 } 3564 return (maxmtu); 3565 } 3566 #endif /* INET */ 3567 3568 #ifdef INET6 3569 uint32_t 3570 tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap) 3571 { 3572 struct nhop_object *nh; 3573 struct in6_addr dst6; 3574 uint32_t scopeid; 3575 struct ifnet *ifp; 3576 uint32_t maxmtu = 0; 3577 3578 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer")); 3579 3580 if (inc->inc_flags & INC_IPV6MINMTU) 3581 return (IPV6_MMTU); 3582 3583 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 3584 in6_splitscope(&inc->inc6_faddr, &dst6, &scopeid); 3585 nh = fib6_lookup(inc->inc_fibnum, &dst6, scopeid, NHR_NONE, 0); 3586 if (nh == NULL) 3587 return (0); 3588 3589 ifp = nh->nh_ifp; 3590 maxmtu = nh->nh_mtu; 3591 3592 /* Report additional interface capabilities. */ 3593 if (cap != NULL) { 3594 if (ifp->if_capenable & IFCAP_TSO6 && 3595 ifp->if_hwassist & CSUM_TSO) { 3596 cap->ifcap |= CSUM_TSO; 3597 cap->tsomax = ifp->if_hw_tsomax; 3598 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; 3599 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; 3600 } 3601 } 3602 } 3603 3604 return (maxmtu); 3605 } 3606 3607 /* 3608 * Handle setsockopt(IPV6_USE_MIN_MTU) by a TCP stack. 3609 * 3610 * XXXGL: we are updating inpcb here with INC_IPV6MINMTU flag. 3611 * The right place to do that is ip6_setpktopt() that has just been 3612 * executed. By the way it just filled ip6po_minmtu for us. 3613 */ 3614 void 3615 tcp6_use_min_mtu(struct tcpcb *tp) 3616 { 3617 struct inpcb *inp = tp->t_inpcb; 3618 3619 INP_WLOCK_ASSERT(inp); 3620 /* 3621 * In case of the IPV6_USE_MIN_MTU socket 3622 * option, the INC_IPV6MINMTU flag to announce 3623 * a corresponding MSS during the initial 3624 * handshake. If the TCP connection is not in 3625 * the front states, just reduce the MSS being 3626 * used. This avoids the sending of TCP 3627 * segments which will be fragmented at the 3628 * IPv6 layer. 3629 */ 3630 inp->inp_inc.inc_flags |= INC_IPV6MINMTU; 3631 if ((tp->t_state >= TCPS_SYN_SENT) && 3632 (inp->inp_inc.inc_flags & INC_ISIPV6)) { 3633 struct ip6_pktopts *opt; 3634 3635 opt = inp->in6p_outputopts; 3636 if (opt != NULL && opt->ip6po_minmtu == IP6PO_MINMTU_ALL && 3637 tp->t_maxseg > TCP6_MSS) 3638 tp->t_maxseg = TCP6_MSS; 3639 } 3640 } 3641 #endif /* INET6 */ 3642 3643 /* 3644 * Calculate effective SMSS per RFC5681 definition for a given TCP 3645 * connection at its current state, taking into account SACK and etc. 3646 */ 3647 u_int 3648 tcp_maxseg(const struct tcpcb *tp) 3649 { 3650 u_int optlen; 3651 3652 if (tp->t_flags & TF_NOOPT) 3653 return (tp->t_maxseg); 3654 3655 /* 3656 * Here we have a simplified code from tcp_addoptions(), 3657 * without a proper loop, and having most of paddings hardcoded. 3658 * We might make mistakes with padding here in some edge cases, 3659 * but this is harmless, since result of tcp_maxseg() is used 3660 * only in cwnd and ssthresh estimations. 3661 */ 3662 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 3663 if (tp->t_flags & TF_RCVD_TSTMP) 3664 optlen = TCPOLEN_TSTAMP_APPA; 3665 else 3666 optlen = 0; 3667 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3668 if (tp->t_flags & TF_SIGNATURE) 3669 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE); 3670 #endif 3671 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0) { 3672 optlen += TCPOLEN_SACKHDR; 3673 optlen += tp->rcv_numsacks * TCPOLEN_SACK; 3674 optlen = PADTCPOLEN(optlen); 3675 } 3676 } else { 3677 if (tp->t_flags & TF_REQ_TSTMP) 3678 optlen = TCPOLEN_TSTAMP_APPA; 3679 else 3680 optlen = PADTCPOLEN(TCPOLEN_MAXSEG); 3681 if (tp->t_flags & TF_REQ_SCALE) 3682 optlen += PADTCPOLEN(TCPOLEN_WINDOW); 3683 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3684 if (tp->t_flags & TF_SIGNATURE) 3685 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE); 3686 #endif 3687 if (tp->t_flags & TF_SACK_PERMIT) 3688 optlen += PADTCPOLEN(TCPOLEN_SACK_PERMITTED); 3689 } 3690 #undef PAD 3691 optlen = min(optlen, TCP_MAXOLEN); 3692 return (tp->t_maxseg - optlen); 3693 } 3694 3695 3696 u_int 3697 tcp_fixed_maxseg(const struct tcpcb *tp) 3698 { 3699 int optlen; 3700 3701 if (tp->t_flags & TF_NOOPT) 3702 return (tp->t_maxseg); 3703 3704 /* 3705 * Here we have a simplified code from tcp_addoptions(), 3706 * without a proper loop, and having most of paddings hardcoded. 3707 * We only consider fixed options that we would send every 3708 * time I.e. SACK is not considered. This is important 3709 * for cc modules to figure out what the modulo of the 3710 * cwnd should be. 3711 */ 3712 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4) 3713 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 3714 if (tp->t_flags & TF_RCVD_TSTMP) 3715 optlen = TCPOLEN_TSTAMP_APPA; 3716 else 3717 optlen = 0; 3718 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3719 if (tp->t_flags & TF_SIGNATURE) 3720 optlen += PAD(TCPOLEN_SIGNATURE); 3721 #endif 3722 } else { 3723 if (tp->t_flags & TF_REQ_TSTMP) 3724 optlen = TCPOLEN_TSTAMP_APPA; 3725 else 3726 optlen = PAD(TCPOLEN_MAXSEG); 3727 if (tp->t_flags & TF_REQ_SCALE) 3728 optlen += PAD(TCPOLEN_WINDOW); 3729 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 3730 if (tp->t_flags & TF_SIGNATURE) 3731 optlen += PAD(TCPOLEN_SIGNATURE); 3732 #endif 3733 if (tp->t_flags & TF_SACK_PERMIT) 3734 optlen += PAD(TCPOLEN_SACK_PERMITTED); 3735 } 3736 #undef PAD 3737 optlen = min(optlen, TCP_MAXOLEN); 3738 return (tp->t_maxseg - optlen); 3739 } 3740 3741 3742 3743 static int 3744 sysctl_drop(SYSCTL_HANDLER_ARGS) 3745 { 3746 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 3747 struct sockaddr_storage addrs[2]; 3748 struct inpcb *inp; 3749 struct tcpcb *tp; 3750 struct tcptw *tw; 3751 struct sockaddr_in *fin, *lin; 3752 struct epoch_tracker et; 3753 #ifdef INET6 3754 struct sockaddr_in6 *fin6, *lin6; 3755 #endif 3756 int error; 3757 3758 inp = NULL; 3759 fin = lin = NULL; 3760 #ifdef INET6 3761 fin6 = lin6 = NULL; 3762 #endif 3763 error = 0; 3764 3765 if (req->oldptr != NULL || req->oldlen != 0) 3766 return (EINVAL); 3767 if (req->newptr == NULL) 3768 return (EPERM); 3769 if (req->newlen < sizeof(addrs)) 3770 return (ENOMEM); 3771 error = SYSCTL_IN(req, &addrs, sizeof(addrs)); 3772 if (error) 3773 return (error); 3774 3775 switch (addrs[0].ss_family) { 3776 #ifdef INET6 3777 case AF_INET6: 3778 fin6 = (struct sockaddr_in6 *)&addrs[0]; 3779 lin6 = (struct sockaddr_in6 *)&addrs[1]; 3780 if (fin6->sin6_len != sizeof(struct sockaddr_in6) || 3781 lin6->sin6_len != sizeof(struct sockaddr_in6)) 3782 return (EINVAL); 3783 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) { 3784 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) 3785 return (EINVAL); 3786 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]); 3787 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]); 3788 fin = (struct sockaddr_in *)&addrs[0]; 3789 lin = (struct sockaddr_in *)&addrs[1]; 3790 break; 3791 } 3792 error = sa6_embedscope(fin6, V_ip6_use_defzone); 3793 if (error) 3794 return (error); 3795 error = sa6_embedscope(lin6, V_ip6_use_defzone); 3796 if (error) 3797 return (error); 3798 break; 3799 #endif 3800 #ifdef INET 3801 case AF_INET: 3802 fin = (struct sockaddr_in *)&addrs[0]; 3803 lin = (struct sockaddr_in *)&addrs[1]; 3804 if (fin->sin_len != sizeof(struct sockaddr_in) || 3805 lin->sin_len != sizeof(struct sockaddr_in)) 3806 return (EINVAL); 3807 break; 3808 #endif 3809 default: 3810 return (EINVAL); 3811 } 3812 NET_EPOCH_ENTER(et); 3813 switch (addrs[0].ss_family) { 3814 #ifdef INET6 3815 case AF_INET6: 3816 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr, 3817 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port, 3818 INPLOOKUP_WLOCKPCB, NULL); 3819 break; 3820 #endif 3821 #ifdef INET 3822 case AF_INET: 3823 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port, 3824 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL); 3825 break; 3826 #endif 3827 } 3828 if (inp != NULL) { 3829 if (inp->inp_flags & INP_TIMEWAIT) { 3830 /* 3831 * XXXRW: There currently exists a state where an 3832 * inpcb is present, but its timewait state has been 3833 * discarded. For now, don't allow dropping of this 3834 * type of inpcb. 3835 */ 3836 tw = intotw(inp); 3837 if (tw != NULL) 3838 tcp_twclose(tw, 0); 3839 else 3840 INP_WUNLOCK(inp); 3841 } else if ((inp->inp_flags & INP_DROPPED) == 0 && 3842 !SOLISTENING(inp->inp_socket)) { 3843 tp = intotcpcb(inp); 3844 tp = tcp_drop(tp, ECONNABORTED); 3845 if (tp != NULL) 3846 INP_WUNLOCK(inp); 3847 } else 3848 INP_WUNLOCK(inp); 3849 } else 3850 error = ESRCH; 3851 NET_EPOCH_EXIT(et); 3852 return (error); 3853 } 3854 3855 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop, 3856 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3857 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_drop, "", 3858 "Drop TCP connection"); 3859 3860 #ifdef KERN_TLS 3861 static int 3862 sysctl_switch_tls(SYSCTL_HANDLER_ARGS) 3863 { 3864 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 3865 struct sockaddr_storage addrs[2]; 3866 struct inpcb *inp; 3867 struct sockaddr_in *fin, *lin; 3868 struct epoch_tracker et; 3869 #ifdef INET6 3870 struct sockaddr_in6 *fin6, *lin6; 3871 #endif 3872 int error; 3873 3874 inp = NULL; 3875 fin = lin = NULL; 3876 #ifdef INET6 3877 fin6 = lin6 = NULL; 3878 #endif 3879 error = 0; 3880 3881 if (req->oldptr != NULL || req->oldlen != 0) 3882 return (EINVAL); 3883 if (req->newptr == NULL) 3884 return (EPERM); 3885 if (req->newlen < sizeof(addrs)) 3886 return (ENOMEM); 3887 error = SYSCTL_IN(req, &addrs, sizeof(addrs)); 3888 if (error) 3889 return (error); 3890 3891 switch (addrs[0].ss_family) { 3892 #ifdef INET6 3893 case AF_INET6: 3894 fin6 = (struct sockaddr_in6 *)&addrs[0]; 3895 lin6 = (struct sockaddr_in6 *)&addrs[1]; 3896 if (fin6->sin6_len != sizeof(struct sockaddr_in6) || 3897 lin6->sin6_len != sizeof(struct sockaddr_in6)) 3898 return (EINVAL); 3899 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) { 3900 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) 3901 return (EINVAL); 3902 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]); 3903 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]); 3904 fin = (struct sockaddr_in *)&addrs[0]; 3905 lin = (struct sockaddr_in *)&addrs[1]; 3906 break; 3907 } 3908 error = sa6_embedscope(fin6, V_ip6_use_defzone); 3909 if (error) 3910 return (error); 3911 error = sa6_embedscope(lin6, V_ip6_use_defzone); 3912 if (error) 3913 return (error); 3914 break; 3915 #endif 3916 #ifdef INET 3917 case AF_INET: 3918 fin = (struct sockaddr_in *)&addrs[0]; 3919 lin = (struct sockaddr_in *)&addrs[1]; 3920 if (fin->sin_len != sizeof(struct sockaddr_in) || 3921 lin->sin_len != sizeof(struct sockaddr_in)) 3922 return (EINVAL); 3923 break; 3924 #endif 3925 default: 3926 return (EINVAL); 3927 } 3928 NET_EPOCH_ENTER(et); 3929 switch (addrs[0].ss_family) { 3930 #ifdef INET6 3931 case AF_INET6: 3932 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr, 3933 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port, 3934 INPLOOKUP_WLOCKPCB, NULL); 3935 break; 3936 #endif 3937 #ifdef INET 3938 case AF_INET: 3939 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port, 3940 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL); 3941 break; 3942 #endif 3943 } 3944 NET_EPOCH_EXIT(et); 3945 if (inp != NULL) { 3946 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) != 0 || 3947 inp->inp_socket == NULL) { 3948 error = ECONNRESET; 3949 INP_WUNLOCK(inp); 3950 } else { 3951 struct socket *so; 3952 3953 so = inp->inp_socket; 3954 soref(so); 3955 error = ktls_set_tx_mode(so, 3956 arg2 == 0 ? TCP_TLS_MODE_SW : TCP_TLS_MODE_IFNET); 3957 INP_WUNLOCK(inp); 3958 sorele(so); 3959 } 3960 } else 3961 error = ESRCH; 3962 return (error); 3963 } 3964 3965 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_sw_tls, 3966 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3967 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_switch_tls, "", 3968 "Switch TCP connection to SW TLS"); 3969 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_ifnet_tls, 3970 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP | 3971 CTLFLAG_NEEDGIANT, NULL, 1, sysctl_switch_tls, "", 3972 "Switch TCP connection to ifnet TLS"); 3973 #endif 3974 3975 /* 3976 * Generate a standardized TCP log line for use throughout the 3977 * tcp subsystem. Memory allocation is done with M_NOWAIT to 3978 * allow use in the interrupt context. 3979 * 3980 * NB: The caller MUST free(s, M_TCPLOG) the returned string. 3981 * NB: The function may return NULL if memory allocation failed. 3982 * 3983 * Due to header inclusion and ordering limitations the struct ip 3984 * and ip6_hdr pointers have to be passed as void pointers. 3985 */ 3986 char * 3987 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr, 3988 const void *ip6hdr) 3989 { 3990 3991 /* Is logging enabled? */ 3992 if (V_tcp_log_in_vain == 0) 3993 return (NULL); 3994 3995 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr)); 3996 } 3997 3998 char * 3999 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr, 4000 const void *ip6hdr) 4001 { 4002 4003 /* Is logging enabled? */ 4004 if (tcp_log_debug == 0) 4005 return (NULL); 4006 4007 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr)); 4008 } 4009 4010 static char * 4011 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr, 4012 const void *ip6hdr) 4013 { 4014 char *s, *sp; 4015 size_t size; 4016 struct ip *ip; 4017 #ifdef INET6 4018 const struct ip6_hdr *ip6; 4019 4020 ip6 = (const struct ip6_hdr *)ip6hdr; 4021 #endif /* INET6 */ 4022 ip = (struct ip *)ip4hdr; 4023 4024 /* 4025 * The log line looks like this: 4026 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>" 4027 */ 4028 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") + 4029 sizeof(PRINT_TH_FLAGS) + 1 + 4030 #ifdef INET6 4031 2 * INET6_ADDRSTRLEN; 4032 #else 4033 2 * INET_ADDRSTRLEN; 4034 #endif /* INET6 */ 4035 4036 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT); 4037 if (s == NULL) 4038 return (NULL); 4039 4040 strcat(s, "TCP: ["); 4041 sp = s + strlen(s); 4042 4043 if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) { 4044 inet_ntoa_r(inc->inc_faddr, sp); 4045 sp = s + strlen(s); 4046 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport)); 4047 sp = s + strlen(s); 4048 inet_ntoa_r(inc->inc_laddr, sp); 4049 sp = s + strlen(s); 4050 sprintf(sp, "]:%i", ntohs(inc->inc_lport)); 4051 #ifdef INET6 4052 } else if (inc) { 4053 ip6_sprintf(sp, &inc->inc6_faddr); 4054 sp = s + strlen(s); 4055 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport)); 4056 sp = s + strlen(s); 4057 ip6_sprintf(sp, &inc->inc6_laddr); 4058 sp = s + strlen(s); 4059 sprintf(sp, "]:%i", ntohs(inc->inc_lport)); 4060 } else if (ip6 && th) { 4061 ip6_sprintf(sp, &ip6->ip6_src); 4062 sp = s + strlen(s); 4063 sprintf(sp, "]:%i to [", ntohs(th->th_sport)); 4064 sp = s + strlen(s); 4065 ip6_sprintf(sp, &ip6->ip6_dst); 4066 sp = s + strlen(s); 4067 sprintf(sp, "]:%i", ntohs(th->th_dport)); 4068 #endif /* INET6 */ 4069 #ifdef INET 4070 } else if (ip && th) { 4071 inet_ntoa_r(ip->ip_src, sp); 4072 sp = s + strlen(s); 4073 sprintf(sp, "]:%i to [", ntohs(th->th_sport)); 4074 sp = s + strlen(s); 4075 inet_ntoa_r(ip->ip_dst, sp); 4076 sp = s + strlen(s); 4077 sprintf(sp, "]:%i", ntohs(th->th_dport)); 4078 #endif /* INET */ 4079 } else { 4080 free(s, M_TCPLOG); 4081 return (NULL); 4082 } 4083 sp = s + strlen(s); 4084 if (th) 4085 sprintf(sp, " tcpflags 0x%b", th->th_flags, PRINT_TH_FLAGS); 4086 if (*(s + size - 1) != '\0') 4087 panic("%s: string too long", __func__); 4088 return (s); 4089 } 4090 4091 /* 4092 * A subroutine which makes it easy to track TCP state changes with DTrace. 4093 * This function shouldn't be called for t_state initializations that don't 4094 * correspond to actual TCP state transitions. 4095 */ 4096 void 4097 tcp_state_change(struct tcpcb *tp, int newstate) 4098 { 4099 #if defined(KDTRACE_HOOKS) 4100 int pstate = tp->t_state; 4101 #endif 4102 4103 TCPSTATES_DEC(tp->t_state); 4104 TCPSTATES_INC(newstate); 4105 tp->t_state = newstate; 4106 TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, pstate); 4107 } 4108 4109 /* 4110 * Create an external-format (``xtcpcb'') structure using the information in 4111 * the kernel-format tcpcb structure pointed to by tp. This is done to 4112 * reduce the spew of irrelevant information over this interface, to isolate 4113 * user code from changes in the kernel structure, and potentially to provide 4114 * information-hiding if we decide that some of this information should be 4115 * hidden from users. 4116 */ 4117 void 4118 tcp_inptoxtp(const struct inpcb *inp, struct xtcpcb *xt) 4119 { 4120 struct tcpcb *tp = intotcpcb(inp); 4121 struct tcptw *tw = intotw(inp); 4122 sbintime_t now; 4123 4124 bzero(xt, sizeof(*xt)); 4125 if (inp->inp_flags & INP_TIMEWAIT) { 4126 xt->t_state = TCPS_TIME_WAIT; 4127 xt->xt_encaps_port = tw->t_port; 4128 } else { 4129 xt->t_state = tp->t_state; 4130 xt->t_logstate = tp->t_logstate; 4131 xt->t_flags = tp->t_flags; 4132 xt->t_sndzerowin = tp->t_sndzerowin; 4133 xt->t_sndrexmitpack = tp->t_sndrexmitpack; 4134 xt->t_rcvoopack = tp->t_rcvoopack; 4135 xt->t_rcv_wnd = tp->rcv_wnd; 4136 xt->t_snd_wnd = tp->snd_wnd; 4137 xt->t_snd_cwnd = tp->snd_cwnd; 4138 xt->t_snd_ssthresh = tp->snd_ssthresh; 4139 xt->t_dsack_bytes = tp->t_dsack_bytes; 4140 xt->t_dsack_tlp_bytes = tp->t_dsack_tlp_bytes; 4141 xt->t_dsack_pack = tp->t_dsack_pack; 4142 xt->t_maxseg = tp->t_maxseg; 4143 xt->xt_ecn = (tp->t_flags2 & TF2_ECN_PERMIT) ? 1 : 0 + 4144 (tp->t_flags2 & TF2_ACE_PERMIT) ? 2 : 0; 4145 4146 now = getsbinuptime(); 4147 #define COPYTIMER(ttt) do { \ 4148 if (callout_active(&tp->t_timers->ttt)) \ 4149 xt->ttt = (tp->t_timers->ttt.c_time - now) / \ 4150 SBT_1MS; \ 4151 else \ 4152 xt->ttt = 0; \ 4153 } while (0) 4154 COPYTIMER(tt_delack); 4155 COPYTIMER(tt_rexmt); 4156 COPYTIMER(tt_persist); 4157 COPYTIMER(tt_keep); 4158 COPYTIMER(tt_2msl); 4159 #undef COPYTIMER 4160 xt->t_rcvtime = 1000 * (ticks - tp->t_rcvtime) / hz; 4161 4162 xt->xt_encaps_port = tp->t_port; 4163 bcopy(tp->t_fb->tfb_tcp_block_name, xt->xt_stack, 4164 TCP_FUNCTION_NAME_LEN_MAX); 4165 bcopy(CC_ALGO(tp)->name, xt->xt_cc, 4166 TCP_CA_NAME_MAX); 4167 #ifdef TCP_BLACKBOX 4168 (void)tcp_log_get_id(tp, xt->xt_logid); 4169 #endif 4170 } 4171 4172 xt->xt_len = sizeof(struct xtcpcb); 4173 in_pcbtoxinpcb(inp, &xt->xt_inp); 4174 if (inp->inp_socket == NULL) 4175 xt->xt_inp.xi_socket.xso_protocol = IPPROTO_TCP; 4176 } 4177 4178 void 4179 tcp_log_end_status(struct tcpcb *tp, uint8_t status) 4180 { 4181 uint32_t bit, i; 4182 4183 if ((tp == NULL) || 4184 (status > TCP_EI_STATUS_MAX_VALUE) || 4185 (status == 0)) { 4186 /* Invalid */ 4187 return; 4188 } 4189 if (status > (sizeof(uint32_t) * 8)) { 4190 /* Should this be a KASSERT? */ 4191 return; 4192 } 4193 bit = 1U << (status - 1); 4194 if (bit & tp->t_end_info_status) { 4195 /* already logged */ 4196 return; 4197 } 4198 for (i = 0; i < TCP_END_BYTE_INFO; i++) { 4199 if (tp->t_end_info_bytes[i] == TCP_EI_EMPTY_SLOT) { 4200 tp->t_end_info_bytes[i] = status; 4201 tp->t_end_info_status |= bit; 4202 break; 4203 } 4204 } 4205 } 4206 4207 int 4208 tcp_can_enable_pacing(void) 4209 { 4210 4211 if ((tcp_pacing_limit == -1) || 4212 (tcp_pacing_limit > number_of_tcp_connections_pacing)) { 4213 atomic_fetchadd_int(&number_of_tcp_connections_pacing, 1); 4214 shadow_num_connections = number_of_tcp_connections_pacing; 4215 return (1); 4216 } else { 4217 return (0); 4218 } 4219 } 4220 4221 static uint8_t tcp_pacing_warning = 0; 4222 4223 void 4224 tcp_decrement_paced_conn(void) 4225 { 4226 uint32_t ret; 4227 4228 ret = atomic_fetchadd_int(&number_of_tcp_connections_pacing, -1); 4229 shadow_num_connections = number_of_tcp_connections_pacing; 4230 KASSERT(ret != 0, ("tcp_paced_connection_exits -1 would cause wrap?")); 4231 if (ret == 0) { 4232 if (tcp_pacing_limit != -1) { 4233 printf("Warning all pacing is now disabled, count decrements invalidly!\n"); 4234 tcp_pacing_limit = 0; 4235 } else if (tcp_pacing_warning == 0) { 4236 printf("Warning pacing count is invalid, invalid decrement\n"); 4237 tcp_pacing_warning = 1; 4238 } 4239 } 4240 } 4241