1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_ipsec.h" 40 #include "opt_tcpdebug.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/callout.h> 45 #include <sys/eventhandler.h> 46 #ifdef TCP_HHOOK 47 #include <sys/hhook.h> 48 #endif 49 #include <sys/kernel.h> 50 #ifdef TCP_HHOOK 51 #include <sys/khelp.h> 52 #endif 53 #include <sys/sysctl.h> 54 #include <sys/jail.h> 55 #include <sys/malloc.h> 56 #include <sys/refcount.h> 57 #include <sys/mbuf.h> 58 #ifdef INET6 59 #include <sys/domain.h> 60 #endif 61 #include <sys/priv.h> 62 #include <sys/proc.h> 63 #include <sys/sdt.h> 64 #include <sys/socket.h> 65 #include <sys/socketvar.h> 66 #include <sys/protosw.h> 67 #include <sys/random.h> 68 69 #include <vm/uma.h> 70 71 #include <net/route.h> 72 #include <net/if.h> 73 #include <net/if_var.h> 74 #include <net/vnet.h> 75 76 #include <netinet/in.h> 77 #include <netinet/in_fib.h> 78 #include <netinet/in_kdtrace.h> 79 #include <netinet/in_pcb.h> 80 #include <netinet/in_systm.h> 81 #include <netinet/in_var.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> 84 #include <netinet/ip_var.h> 85 #ifdef INET6 86 #include <netinet/icmp6.h> 87 #include <netinet/ip6.h> 88 #include <netinet6/in6_fib.h> 89 #include <netinet6/in6_pcb.h> 90 #include <netinet6/ip6_var.h> 91 #include <netinet6/scope6_var.h> 92 #include <netinet6/nd6.h> 93 #endif 94 95 #include <netinet/tcp.h> 96 #include <netinet/tcp_fsm.h> 97 #include <netinet/tcp_seq.h> 98 #include <netinet/tcp_timer.h> 99 #include <netinet/tcp_var.h> 100 #include <netinet/tcp_log_buf.h> 101 #include <netinet/tcp_syncache.h> 102 #include <netinet/cc/cc.h> 103 #ifdef INET6 104 #include <netinet6/tcp6_var.h> 105 #endif 106 #include <netinet/tcpip.h> 107 #include <netinet/tcp_fastopen.h> 108 #ifdef TCPPCAP 109 #include <netinet/tcp_pcap.h> 110 #endif 111 #ifdef TCPDEBUG 112 #include <netinet/tcp_debug.h> 113 #endif 114 #ifdef INET6 115 #include <netinet6/ip6protosw.h> 116 #endif 117 #ifdef TCP_OFFLOAD 118 #include <netinet/tcp_offload.h> 119 #endif 120 121 #include <netipsec/ipsec_support.h> 122 123 #include <machine/in_cksum.h> 124 #include <sys/md5.h> 125 126 #include <security/mac/mac_framework.h> 127 128 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS; 129 #ifdef INET6 130 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS; 131 #endif 132 133 struct rwlock tcp_function_lock; 134 135 static int 136 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS) 137 { 138 int error, new; 139 140 new = V_tcp_mssdflt; 141 error = sysctl_handle_int(oidp, &new, 0, req); 142 if (error == 0 && req->newptr) { 143 if (new < TCP_MINMSS) 144 error = EINVAL; 145 else 146 V_tcp_mssdflt = new; 147 } 148 return (error); 149 } 150 151 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, 152 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW, &VNET_NAME(tcp_mssdflt), 0, 153 &sysctl_net_inet_tcp_mss_check, "I", 154 "Default TCP Maximum Segment Size"); 155 156 #ifdef INET6 157 static int 158 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS) 159 { 160 int error, new; 161 162 new = V_tcp_v6mssdflt; 163 error = sysctl_handle_int(oidp, &new, 0, req); 164 if (error == 0 && req->newptr) { 165 if (new < TCP_MINMSS) 166 error = EINVAL; 167 else 168 V_tcp_v6mssdflt = new; 169 } 170 return (error); 171 } 172 173 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 174 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW, &VNET_NAME(tcp_v6mssdflt), 0, 175 &sysctl_net_inet_tcp_mss_v6_check, "I", 176 "Default TCP Maximum Segment Size for IPv6"); 177 #endif /* INET6 */ 178 179 /* 180 * Minimum MSS we accept and use. This prevents DoS attacks where 181 * we are forced to a ridiculous low MSS like 20 and send hundreds 182 * of packets instead of one. The effect scales with the available 183 * bandwidth and quickly saturates the CPU and network interface 184 * with packet generation and sending. Set to zero to disable MINMSS 185 * checking. This setting prevents us from sending too small packets. 186 */ 187 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS; 188 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_VNET | CTLFLAG_RW, 189 &VNET_NAME(tcp_minmss), 0, 190 "Minimum TCP Maximum Segment Size"); 191 192 VNET_DEFINE(int, tcp_do_rfc1323) = 1; 193 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_VNET | CTLFLAG_RW, 194 &VNET_NAME(tcp_do_rfc1323), 0, 195 "Enable rfc1323 (high performance TCP) extensions"); 196 197 static int tcp_log_debug = 0; 198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW, 199 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments"); 200 201 static int tcp_tcbhashsize; 202 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 203 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 204 205 static int do_tcpdrain = 1; 206 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 207 "Enable tcp_drain routine for extra help when low on mbufs"); 208 209 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_VNET | CTLFLAG_RD, 210 &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs"); 211 212 static VNET_DEFINE(int, icmp_may_rst) = 1; 213 #define V_icmp_may_rst VNET(icmp_may_rst) 214 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_VNET | CTLFLAG_RW, 215 &VNET_NAME(icmp_may_rst), 0, 216 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 217 218 static VNET_DEFINE(int, tcp_isn_reseed_interval) = 0; 219 #define V_tcp_isn_reseed_interval VNET(tcp_isn_reseed_interval) 220 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_VNET | CTLFLAG_RW, 221 &VNET_NAME(tcp_isn_reseed_interval), 0, 222 "Seconds between reseeding of ISN secret"); 223 224 static int tcp_soreceive_stream; 225 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN, 226 &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets"); 227 228 VNET_DEFINE(uma_zone_t, sack_hole_zone); 229 #define V_sack_hole_zone VNET(sack_hole_zone) 230 231 #ifdef TCP_HHOOK 232 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]); 233 #endif 234 235 static struct inpcb *tcp_notify(struct inpcb *, int); 236 static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int); 237 static void tcp_mtudisc(struct inpcb *, int); 238 static char * tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, 239 void *ip4hdr, const void *ip6hdr); 240 241 242 static struct tcp_function_block tcp_def_funcblk = { 243 "default", 244 tcp_output, 245 tcp_do_segment, 246 tcp_default_ctloutput, 247 NULL, 248 NULL, 249 NULL, 250 NULL, 251 NULL, 252 NULL, 253 0, 254 0 255 }; 256 257 int t_functions_inited = 0; 258 static int tcp_fb_cnt = 0; 259 struct tcp_funchead t_functions; 260 static struct tcp_function_block *tcp_func_set_ptr = &tcp_def_funcblk; 261 262 static void 263 init_tcp_functions(void) 264 { 265 if (t_functions_inited == 0) { 266 TAILQ_INIT(&t_functions); 267 rw_init_flags(&tcp_function_lock, "tcp_func_lock" , 0); 268 t_functions_inited = 1; 269 } 270 } 271 272 static struct tcp_function_block * 273 find_tcp_functions_locked(struct tcp_function_set *fs) 274 { 275 struct tcp_function *f; 276 struct tcp_function_block *blk=NULL; 277 278 TAILQ_FOREACH(f, &t_functions, tf_next) { 279 if (strcmp(f->tf_name, fs->function_set_name) == 0) { 280 blk = f->tf_fb; 281 break; 282 } 283 } 284 return(blk); 285 } 286 287 static struct tcp_function_block * 288 find_tcp_fb_locked(struct tcp_function_block *blk, struct tcp_function **s) 289 { 290 struct tcp_function_block *rblk=NULL; 291 struct tcp_function *f; 292 293 TAILQ_FOREACH(f, &t_functions, tf_next) { 294 if (f->tf_fb == blk) { 295 rblk = blk; 296 if (s) { 297 *s = f; 298 } 299 break; 300 } 301 } 302 return (rblk); 303 } 304 305 struct tcp_function_block * 306 find_and_ref_tcp_functions(struct tcp_function_set *fs) 307 { 308 struct tcp_function_block *blk; 309 310 rw_rlock(&tcp_function_lock); 311 blk = find_tcp_functions_locked(fs); 312 if (blk) 313 refcount_acquire(&blk->tfb_refcnt); 314 rw_runlock(&tcp_function_lock); 315 return(blk); 316 } 317 318 struct tcp_function_block * 319 find_and_ref_tcp_fb(struct tcp_function_block *blk) 320 { 321 struct tcp_function_block *rblk; 322 323 rw_rlock(&tcp_function_lock); 324 rblk = find_tcp_fb_locked(blk, NULL); 325 if (rblk) 326 refcount_acquire(&rblk->tfb_refcnt); 327 rw_runlock(&tcp_function_lock); 328 return(rblk); 329 } 330 331 332 static int 333 sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS) 334 { 335 int error=ENOENT; 336 struct tcp_function_set fs; 337 struct tcp_function_block *blk; 338 339 memset(&fs, 0, sizeof(fs)); 340 rw_rlock(&tcp_function_lock); 341 blk = find_tcp_fb_locked(tcp_func_set_ptr, NULL); 342 if (blk) { 343 /* Found him */ 344 strcpy(fs.function_set_name, blk->tfb_tcp_block_name); 345 fs.pcbcnt = blk->tfb_refcnt; 346 } 347 rw_runlock(&tcp_function_lock); 348 error = sysctl_handle_string(oidp, fs.function_set_name, 349 sizeof(fs.function_set_name), req); 350 351 /* Check for error or no change */ 352 if (error != 0 || req->newptr == NULL) 353 return(error); 354 355 rw_wlock(&tcp_function_lock); 356 blk = find_tcp_functions_locked(&fs); 357 if ((blk == NULL) || 358 (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) { 359 error = ENOENT; 360 goto done; 361 } 362 tcp_func_set_ptr = blk; 363 done: 364 rw_wunlock(&tcp_function_lock); 365 return (error); 366 } 367 368 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_default, 369 CTLTYPE_STRING | CTLFLAG_RW, 370 NULL, 0, sysctl_net_inet_default_tcp_functions, "A", 371 "Set/get the default TCP functions"); 372 373 static int 374 sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS) 375 { 376 int error, cnt, linesz; 377 struct tcp_function *f; 378 char *buffer, *cp; 379 size_t bufsz, outsz; 380 bool alias; 381 382 cnt = 0; 383 rw_rlock(&tcp_function_lock); 384 TAILQ_FOREACH(f, &t_functions, tf_next) { 385 cnt++; 386 } 387 rw_runlock(&tcp_function_lock); 388 389 bufsz = (cnt+2) * ((TCP_FUNCTION_NAME_LEN_MAX * 2) + 13) + 1; 390 buffer = malloc(bufsz, M_TEMP, M_WAITOK); 391 392 error = 0; 393 cp = buffer; 394 395 linesz = snprintf(cp, bufsz, "\n%-32s%c %-32s %s\n", "Stack", 'D', 396 "Alias", "PCB count"); 397 cp += linesz; 398 bufsz -= linesz; 399 outsz = linesz; 400 401 rw_rlock(&tcp_function_lock); 402 TAILQ_FOREACH(f, &t_functions, tf_next) { 403 alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name); 404 linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n", 405 f->tf_fb->tfb_tcp_block_name, 406 (f->tf_fb == tcp_func_set_ptr) ? '*' : ' ', 407 alias ? f->tf_name : "-", 408 f->tf_fb->tfb_refcnt); 409 if (linesz >= bufsz) { 410 error = EOVERFLOW; 411 break; 412 } 413 cp += linesz; 414 bufsz -= linesz; 415 outsz += linesz; 416 } 417 rw_runlock(&tcp_function_lock); 418 if (error == 0) 419 error = sysctl_handle_string(oidp, buffer, outsz + 1, req); 420 free(buffer, M_TEMP); 421 return (error); 422 } 423 424 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_available, 425 CTLTYPE_STRING|CTLFLAG_RD, 426 NULL, 0, sysctl_net_inet_list_available, "A", 427 "list available TCP Function sets"); 428 429 /* 430 * Exports one (struct tcp_function_info) for each alias/name. 431 */ 432 static int 433 sysctl_net_inet_list_func_info(SYSCTL_HANDLER_ARGS) 434 { 435 int cnt, error; 436 struct tcp_function *f; 437 struct tcp_function_info tfi; 438 439 /* 440 * We don't allow writes. 441 */ 442 if (req->newptr != NULL) 443 return (EINVAL); 444 445 /* 446 * Wire the old buffer so we can directly copy the functions to 447 * user space without dropping the lock. 448 */ 449 if (req->oldptr != NULL) { 450 error = sysctl_wire_old_buffer(req, 0); 451 if (error) 452 return (error); 453 } 454 455 /* 456 * Walk the list and copy out matching entries. If INVARIANTS 457 * is compiled in, also walk the list to verify the length of 458 * the list matches what we have recorded. 459 */ 460 rw_rlock(&tcp_function_lock); 461 #ifdef INVARIANTS 462 cnt = 0; 463 #else 464 if (req->oldptr == NULL) { 465 cnt = tcp_fb_cnt; 466 goto skip_loop; 467 } 468 #endif 469 TAILQ_FOREACH(f, &t_functions, tf_next) { 470 #ifdef INVARIANTS 471 cnt++; 472 #endif 473 if (req->oldptr != NULL) { 474 tfi.tfi_refcnt = f->tf_fb->tfb_refcnt; 475 tfi.tfi_id = f->tf_fb->tfb_id; 476 (void)strncpy(tfi.tfi_alias, f->tf_name, 477 TCP_FUNCTION_NAME_LEN_MAX); 478 tfi.tfi_alias[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0'; 479 (void)strncpy(tfi.tfi_name, 480 f->tf_fb->tfb_tcp_block_name, 481 TCP_FUNCTION_NAME_LEN_MAX); 482 tfi.tfi_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0'; 483 error = SYSCTL_OUT(req, &tfi, sizeof(tfi)); 484 /* 485 * Don't stop on error, as that is the 486 * mechanism we use to accumulate length 487 * information if the buffer was too short. 488 */ 489 } 490 } 491 KASSERT(cnt == tcp_fb_cnt, 492 ("%s: cnt (%d) != tcp_fb_cnt (%d)", __func__, cnt, tcp_fb_cnt)); 493 #ifndef INVARIANTS 494 skip_loop: 495 #endif 496 rw_runlock(&tcp_function_lock); 497 if (req->oldptr == NULL) 498 error = SYSCTL_OUT(req, NULL, 499 (cnt + 1) * sizeof(struct tcp_function_info)); 500 501 return (error); 502 } 503 504 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, function_info, 505 CTLTYPE_OPAQUE | CTLFLAG_SKIP | CTLFLAG_RD | CTLFLAG_MPSAFE, 506 NULL, 0, sysctl_net_inet_list_func_info, "S,tcp_function_info", 507 "List TCP function block name-to-ID mappings"); 508 509 /* 510 * Target size of TCP PCB hash tables. Must be a power of two. 511 * 512 * Note that this can be overridden by the kernel environment 513 * variable net.inet.tcp.tcbhashsize 514 */ 515 #ifndef TCBHASHSIZE 516 #define TCBHASHSIZE 0 517 #endif 518 519 /* 520 * XXX 521 * Callouts should be moved into struct tcp directly. They are currently 522 * separate because the tcpcb structure is exported to userland for sysctl 523 * parsing purposes, which do not know about callouts. 524 */ 525 struct tcpcb_mem { 526 struct tcpcb tcb; 527 struct tcp_timer tt; 528 struct cc_var ccv; 529 #ifdef TCP_HHOOK 530 struct osd osd; 531 #endif 532 }; 533 534 static VNET_DEFINE(uma_zone_t, tcpcb_zone); 535 #define V_tcpcb_zone VNET(tcpcb_zone) 536 537 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers"); 538 MALLOC_DEFINE(M_TCPFUNCTIONS, "tcpfunc", "TCP function set memory"); 539 540 static struct mtx isn_mtx; 541 542 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF) 543 #define ISN_LOCK() mtx_lock(&isn_mtx) 544 #define ISN_UNLOCK() mtx_unlock(&isn_mtx) 545 546 /* 547 * TCP initialization. 548 */ 549 static void 550 tcp_zone_change(void *tag) 551 { 552 553 uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets); 554 uma_zone_set_max(V_tcpcb_zone, maxsockets); 555 tcp_tw_zone_change(); 556 } 557 558 static int 559 tcp_inpcb_init(void *mem, int size, int flags) 560 { 561 struct inpcb *inp = mem; 562 563 INP_LOCK_INIT(inp, "inp", "tcpinp"); 564 return (0); 565 } 566 567 /* 568 * Take a value and get the next power of 2 that doesn't overflow. 569 * Used to size the tcp_inpcb hash buckets. 570 */ 571 static int 572 maketcp_hashsize(int size) 573 { 574 int hashsize; 575 576 /* 577 * auto tune. 578 * get the next power of 2 higher than maxsockets. 579 */ 580 hashsize = 1 << fls(size); 581 /* catch overflow, and just go one power of 2 smaller */ 582 if (hashsize < size) { 583 hashsize = 1 << (fls(size) - 1); 584 } 585 return (hashsize); 586 } 587 588 static volatile int next_tcp_stack_id = 1; 589 590 /* 591 * Register a TCP function block with the name provided in the names 592 * array. (Note that this function does NOT automatically register 593 * blk->tfb_tcp_block_name as a stack name. Therefore, you should 594 * explicitly include blk->tfb_tcp_block_name in the list of names if 595 * you wish to register the stack with that name.) 596 * 597 * Either all name registrations will succeed or all will fail. If 598 * a name registration fails, the function will update the num_names 599 * argument to point to the array index of the name that encountered 600 * the failure. 601 * 602 * Returns 0 on success, or an error code on failure. 603 */ 604 int 605 register_tcp_functions_as_names(struct tcp_function_block *blk, int wait, 606 const char *names[], int *num_names) 607 { 608 struct tcp_function *n; 609 struct tcp_function_set fs; 610 int error, i; 611 612 KASSERT(names != NULL && *num_names > 0, 613 ("%s: Called with 0-length name list", __func__)); 614 KASSERT(names != NULL, ("%s: Called with NULL name list", __func__)); 615 616 if (t_functions_inited == 0) { 617 init_tcp_functions(); 618 } 619 if ((blk->tfb_tcp_output == NULL) || 620 (blk->tfb_tcp_do_segment == NULL) || 621 (blk->tfb_tcp_ctloutput == NULL) || 622 (strlen(blk->tfb_tcp_block_name) == 0)) { 623 /* 624 * These functions are required and you 625 * need a name. 626 */ 627 *num_names = 0; 628 return (EINVAL); 629 } 630 if (blk->tfb_tcp_timer_stop_all || 631 blk->tfb_tcp_timer_activate || 632 blk->tfb_tcp_timer_active || 633 blk->tfb_tcp_timer_stop) { 634 /* 635 * If you define one timer function you 636 * must have them all. 637 */ 638 if ((blk->tfb_tcp_timer_stop_all == NULL) || 639 (blk->tfb_tcp_timer_activate == NULL) || 640 (blk->tfb_tcp_timer_active == NULL) || 641 (blk->tfb_tcp_timer_stop == NULL)) { 642 *num_names = 0; 643 return (EINVAL); 644 } 645 } 646 647 refcount_init(&blk->tfb_refcnt, 0); 648 blk->tfb_flags = 0; 649 blk->tfb_id = atomic_fetchadd_int(&next_tcp_stack_id, 1); 650 for (i = 0; i < *num_names; i++) { 651 n = malloc(sizeof(struct tcp_function), M_TCPFUNCTIONS, wait); 652 if (n == NULL) { 653 error = ENOMEM; 654 goto cleanup; 655 } 656 n->tf_fb = blk; 657 658 (void)strncpy(fs.function_set_name, names[i], 659 TCP_FUNCTION_NAME_LEN_MAX); 660 fs.function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0'; 661 rw_wlock(&tcp_function_lock); 662 if (find_tcp_functions_locked(&fs) != NULL) { 663 /* Duplicate name space not allowed */ 664 rw_wunlock(&tcp_function_lock); 665 free(n, M_TCPFUNCTIONS); 666 error = EALREADY; 667 goto cleanup; 668 } 669 (void)strncpy(n->tf_name, names[i], TCP_FUNCTION_NAME_LEN_MAX); 670 n->tf_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0'; 671 TAILQ_INSERT_TAIL(&t_functions, n, tf_next); 672 tcp_fb_cnt++; 673 rw_wunlock(&tcp_function_lock); 674 } 675 return(0); 676 677 cleanup: 678 /* 679 * Deregister the names we just added. Because registration failed 680 * for names[i], we don't need to deregister that name. 681 */ 682 *num_names = i; 683 rw_wlock(&tcp_function_lock); 684 while (--i >= 0) { 685 TAILQ_FOREACH(n, &t_functions, tf_next) { 686 if (!strncmp(n->tf_name, names[i], 687 TCP_FUNCTION_NAME_LEN_MAX)) { 688 TAILQ_REMOVE(&t_functions, n, tf_next); 689 tcp_fb_cnt--; 690 n->tf_fb = NULL; 691 free(n, M_TCPFUNCTIONS); 692 break; 693 } 694 } 695 } 696 rw_wunlock(&tcp_function_lock); 697 return (error); 698 } 699 700 /* 701 * Register a TCP function block using the name provided in the name 702 * argument. 703 * 704 * Returns 0 on success, or an error code on failure. 705 */ 706 int 707 register_tcp_functions_as_name(struct tcp_function_block *blk, const char *name, 708 int wait) 709 { 710 const char *name_list[1]; 711 int num_names, rv; 712 713 num_names = 1; 714 if (name != NULL) 715 name_list[0] = name; 716 else 717 name_list[0] = blk->tfb_tcp_block_name; 718 rv = register_tcp_functions_as_names(blk, wait, name_list, &num_names); 719 return (rv); 720 } 721 722 /* 723 * Register a TCP function block using the name defined in 724 * blk->tfb_tcp_block_name. 725 * 726 * Returns 0 on success, or an error code on failure. 727 */ 728 int 729 register_tcp_functions(struct tcp_function_block *blk, int wait) 730 { 731 732 return (register_tcp_functions_as_name(blk, NULL, wait)); 733 } 734 735 int 736 deregister_tcp_functions(struct tcp_function_block *blk) 737 { 738 struct tcp_function *f; 739 int error=ENOENT; 740 741 if (strcmp(blk->tfb_tcp_block_name, "default") == 0) { 742 /* You can't un-register the default */ 743 return (EPERM); 744 } 745 rw_wlock(&tcp_function_lock); 746 if (blk == tcp_func_set_ptr) { 747 /* You can't free the current default */ 748 rw_wunlock(&tcp_function_lock); 749 return (EBUSY); 750 } 751 if (blk->tfb_refcnt) { 752 /* Still tcb attached, mark it. */ 753 blk->tfb_flags |= TCP_FUNC_BEING_REMOVED; 754 rw_wunlock(&tcp_function_lock); 755 return (EBUSY); 756 } 757 while (find_tcp_fb_locked(blk, &f) != NULL) { 758 /* Found */ 759 TAILQ_REMOVE(&t_functions, f, tf_next); 760 tcp_fb_cnt--; 761 f->tf_fb = NULL; 762 free(f, M_TCPFUNCTIONS); 763 error = 0; 764 } 765 rw_wunlock(&tcp_function_lock); 766 return (error); 767 } 768 769 void 770 tcp_init(void) 771 { 772 const char *tcbhash_tuneable; 773 int hashsize; 774 775 tcbhash_tuneable = "net.inet.tcp.tcbhashsize"; 776 777 #ifdef TCP_HHOOK 778 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, 779 &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) 780 printf("%s: WARNING: unable to register helper hook\n", __func__); 781 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, 782 &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) 783 printf("%s: WARNING: unable to register helper hook\n", __func__); 784 #endif 785 hashsize = TCBHASHSIZE; 786 TUNABLE_INT_FETCH(tcbhash_tuneable, &hashsize); 787 if (hashsize == 0) { 788 /* 789 * Auto tune the hash size based on maxsockets. 790 * A perfect hash would have a 1:1 mapping 791 * (hashsize = maxsockets) however it's been 792 * suggested that O(2) average is better. 793 */ 794 hashsize = maketcp_hashsize(maxsockets / 4); 795 /* 796 * Our historical default is 512, 797 * do not autotune lower than this. 798 */ 799 if (hashsize < 512) 800 hashsize = 512; 801 if (bootverbose && IS_DEFAULT_VNET(curvnet)) 802 printf("%s: %s auto tuned to %d\n", __func__, 803 tcbhash_tuneable, hashsize); 804 } 805 /* 806 * We require a hashsize to be a power of two. 807 * Previously if it was not a power of two we would just reset it 808 * back to 512, which could be a nasty surprise if you did not notice 809 * the error message. 810 * Instead what we do is clip it to the closest power of two lower 811 * than the specified hash value. 812 */ 813 if (!powerof2(hashsize)) { 814 int oldhashsize = hashsize; 815 816 hashsize = maketcp_hashsize(hashsize); 817 /* prevent absurdly low value */ 818 if (hashsize < 16) 819 hashsize = 16; 820 printf("%s: WARNING: TCB hash size not a power of 2, " 821 "clipped from %d to %d.\n", __func__, oldhashsize, 822 hashsize); 823 } 824 in_pcbinfo_init(&V_tcbinfo, "tcp", &V_tcb, hashsize, hashsize, 825 "tcp_inpcb", tcp_inpcb_init, IPI_HASHFIELDS_4TUPLE); 826 827 /* 828 * These have to be type stable for the benefit of the timers. 829 */ 830 V_tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem), 831 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 832 uma_zone_set_max(V_tcpcb_zone, maxsockets); 833 uma_zone_set_warning(V_tcpcb_zone, "kern.ipc.maxsockets limit reached"); 834 835 tcp_tw_init(); 836 syncache_init(); 837 tcp_hc_init(); 838 839 TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack); 840 V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole), 841 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 842 843 tcp_fastopen_init(); 844 845 /* Skip initialization of globals for non-default instances. */ 846 if (!IS_DEFAULT_VNET(curvnet)) 847 return; 848 849 tcp_reass_global_init(); 850 851 /* XXX virtualize those bellow? */ 852 tcp_delacktime = TCPTV_DELACK; 853 tcp_keepinit = TCPTV_KEEP_INIT; 854 tcp_keepidle = TCPTV_KEEP_IDLE; 855 tcp_keepintvl = TCPTV_KEEPINTVL; 856 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 857 tcp_msl = TCPTV_MSL; 858 tcp_rexmit_min = TCPTV_MIN; 859 if (tcp_rexmit_min < 1) 860 tcp_rexmit_min = 1; 861 tcp_persmin = TCPTV_PERSMIN; 862 tcp_persmax = TCPTV_PERSMAX; 863 tcp_rexmit_slop = TCPTV_CPU_VAR; 864 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT; 865 tcp_tcbhashsize = hashsize; 866 /* Setup the tcp function block list */ 867 init_tcp_functions(); 868 register_tcp_functions(&tcp_def_funcblk, M_WAITOK); 869 #ifdef TCP_BLACKBOX 870 /* Initialize the TCP logging data. */ 871 tcp_log_init(); 872 #endif 873 874 if (tcp_soreceive_stream) { 875 #ifdef INET 876 tcp_usrreqs.pru_soreceive = soreceive_stream; 877 #endif 878 #ifdef INET6 879 tcp6_usrreqs.pru_soreceive = soreceive_stream; 880 #endif /* INET6 */ 881 } 882 883 #ifdef INET6 884 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 885 #else /* INET6 */ 886 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 887 #endif /* INET6 */ 888 if (max_protohdr < TCP_MINPROTOHDR) 889 max_protohdr = TCP_MINPROTOHDR; 890 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 891 panic("tcp_init"); 892 #undef TCP_MINPROTOHDR 893 894 ISN_LOCK_INIT(); 895 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL, 896 SHUTDOWN_PRI_DEFAULT); 897 EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL, 898 EVENTHANDLER_PRI_ANY); 899 #ifdef TCPPCAP 900 tcp_pcap_init(); 901 #endif 902 } 903 904 #ifdef VIMAGE 905 static void 906 tcp_destroy(void *unused __unused) 907 { 908 int n; 909 #ifdef TCP_HHOOK 910 int error; 911 #endif 912 913 /* 914 * All our processes are gone, all our sockets should be cleaned 915 * up, which means, we should be past the tcp_discardcb() calls. 916 * Sleep to let all tcpcb timers really disappear and cleanup. 917 */ 918 for (;;) { 919 INP_LIST_RLOCK(&V_tcbinfo); 920 n = V_tcbinfo.ipi_count; 921 INP_LIST_RUNLOCK(&V_tcbinfo); 922 if (n == 0) 923 break; 924 pause("tcpdes", hz / 10); 925 } 926 tcp_hc_destroy(); 927 syncache_destroy(); 928 tcp_tw_destroy(); 929 in_pcbinfo_destroy(&V_tcbinfo); 930 /* tcp_discardcb() clears the sack_holes up. */ 931 uma_zdestroy(V_sack_hole_zone); 932 uma_zdestroy(V_tcpcb_zone); 933 934 /* 935 * Cannot free the zone until all tcpcbs are released as we attach 936 * the allocations to them. 937 */ 938 tcp_fastopen_destroy(); 939 940 #ifdef TCP_HHOOK 941 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]); 942 if (error != 0) { 943 printf("%s: WARNING: unable to deregister helper hook " 944 "type=%d, id=%d: error %d returned\n", __func__, 945 HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, error); 946 } 947 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_OUT]); 948 if (error != 0) { 949 printf("%s: WARNING: unable to deregister helper hook " 950 "type=%d, id=%d: error %d returned\n", __func__, 951 HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error); 952 } 953 #endif 954 } 955 VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL); 956 #endif 957 958 void 959 tcp_fini(void *xtp) 960 { 961 962 } 963 964 /* 965 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 966 * tcp_template used to store this data in mbufs, but we now recopy it out 967 * of the tcpcb each time to conserve mbufs. 968 */ 969 void 970 tcpip_fillheaders(struct inpcb *inp, void *ip_ptr, void *tcp_ptr) 971 { 972 struct tcphdr *th = (struct tcphdr *)tcp_ptr; 973 974 INP_WLOCK_ASSERT(inp); 975 976 #ifdef INET6 977 if ((inp->inp_vflag & INP_IPV6) != 0) { 978 struct ip6_hdr *ip6; 979 980 ip6 = (struct ip6_hdr *)ip_ptr; 981 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 982 (inp->inp_flow & IPV6_FLOWINFO_MASK); 983 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 984 (IPV6_VERSION & IPV6_VERSION_MASK); 985 ip6->ip6_nxt = IPPROTO_TCP; 986 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 987 ip6->ip6_src = inp->in6p_laddr; 988 ip6->ip6_dst = inp->in6p_faddr; 989 } 990 #endif /* INET6 */ 991 #if defined(INET6) && defined(INET) 992 else 993 #endif 994 #ifdef INET 995 { 996 struct ip *ip; 997 998 ip = (struct ip *)ip_ptr; 999 ip->ip_v = IPVERSION; 1000 ip->ip_hl = 5; 1001 ip->ip_tos = inp->inp_ip_tos; 1002 ip->ip_len = 0; 1003 ip->ip_id = 0; 1004 ip->ip_off = 0; 1005 ip->ip_ttl = inp->inp_ip_ttl; 1006 ip->ip_sum = 0; 1007 ip->ip_p = IPPROTO_TCP; 1008 ip->ip_src = inp->inp_laddr; 1009 ip->ip_dst = inp->inp_faddr; 1010 } 1011 #endif /* INET */ 1012 th->th_sport = inp->inp_lport; 1013 th->th_dport = inp->inp_fport; 1014 th->th_seq = 0; 1015 th->th_ack = 0; 1016 th->th_x2 = 0; 1017 th->th_off = 5; 1018 th->th_flags = 0; 1019 th->th_win = 0; 1020 th->th_urp = 0; 1021 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */ 1022 } 1023 1024 /* 1025 * Create template to be used to send tcp packets on a connection. 1026 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 1027 * use for this function is in keepalives, which use tcp_respond. 1028 */ 1029 struct tcptemp * 1030 tcpip_maketemplate(struct inpcb *inp) 1031 { 1032 struct tcptemp *t; 1033 1034 t = malloc(sizeof(*t), M_TEMP, M_NOWAIT); 1035 if (t == NULL) 1036 return (NULL); 1037 tcpip_fillheaders(inp, (void *)&t->tt_ipgen, (void *)&t->tt_t); 1038 return (t); 1039 } 1040 1041 /* 1042 * Send a single message to the TCP at address specified by 1043 * the given TCP/IP header. If m == NULL, then we make a copy 1044 * of the tcpiphdr at th and send directly to the addressed host. 1045 * This is used to force keep alive messages out using the TCP 1046 * template for a connection. If flags are given then we send 1047 * a message back to the TCP which originated the segment th, 1048 * and discard the mbuf containing it and any other attached mbufs. 1049 * 1050 * In any case the ack and sequence number of the transmitted 1051 * segment are as specified by the parameters. 1052 * 1053 * NOTE: If m != NULL, then th must point to *inside* the mbuf. 1054 */ 1055 void 1056 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 1057 tcp_seq ack, tcp_seq seq, int flags) 1058 { 1059 struct tcpopt to; 1060 struct inpcb *inp; 1061 struct ip *ip; 1062 struct mbuf *optm; 1063 struct tcphdr *nth; 1064 u_char *optp; 1065 #ifdef INET6 1066 struct ip6_hdr *ip6; 1067 int isipv6; 1068 #endif /* INET6 */ 1069 int optlen, tlen, win; 1070 bool incl_opts; 1071 1072 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL")); 1073 1074 #ifdef INET6 1075 isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4); 1076 ip6 = ipgen; 1077 #endif /* INET6 */ 1078 ip = ipgen; 1079 1080 if (tp != NULL) { 1081 inp = tp->t_inpcb; 1082 KASSERT(inp != NULL, ("tcp control block w/o inpcb")); 1083 INP_WLOCK_ASSERT(inp); 1084 } else 1085 inp = NULL; 1086 1087 incl_opts = false; 1088 win = 0; 1089 if (tp != NULL) { 1090 if (!(flags & TH_RST)) { 1091 win = sbspace(&inp->inp_socket->so_rcv); 1092 if (win > TCP_MAXWIN << tp->rcv_scale) 1093 win = TCP_MAXWIN << tp->rcv_scale; 1094 } 1095 if ((tp->t_flags & TF_NOOPT) == 0) 1096 incl_opts = true; 1097 } 1098 if (m == NULL) { 1099 m = m_gethdr(M_NOWAIT, MT_DATA); 1100 if (m == NULL) 1101 return; 1102 m->m_data += max_linkhdr; 1103 #ifdef INET6 1104 if (isipv6) { 1105 bcopy((caddr_t)ip6, mtod(m, caddr_t), 1106 sizeof(struct ip6_hdr)); 1107 ip6 = mtod(m, struct ip6_hdr *); 1108 nth = (struct tcphdr *)(ip6 + 1); 1109 } else 1110 #endif /* INET6 */ 1111 { 1112 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 1113 ip = mtod(m, struct ip *); 1114 nth = (struct tcphdr *)(ip + 1); 1115 } 1116 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 1117 flags = TH_ACK; 1118 } else if (!M_WRITABLE(m)) { 1119 struct mbuf *n; 1120 1121 /* Can't reuse 'm', allocate a new mbuf. */ 1122 n = m_gethdr(M_NOWAIT, MT_DATA); 1123 if (n == NULL) { 1124 m_freem(m); 1125 return; 1126 } 1127 1128 if (!m_dup_pkthdr(n, m, M_NOWAIT)) { 1129 m_freem(m); 1130 m_freem(n); 1131 return; 1132 } 1133 1134 n->m_data += max_linkhdr; 1135 /* m_len is set later */ 1136 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 1137 #ifdef INET6 1138 if (isipv6) { 1139 bcopy((caddr_t)ip6, mtod(n, caddr_t), 1140 sizeof(struct ip6_hdr)); 1141 ip6 = mtod(n, struct ip6_hdr *); 1142 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 1143 nth = (struct tcphdr *)(ip6 + 1); 1144 } else 1145 #endif /* INET6 */ 1146 { 1147 bcopy((caddr_t)ip, mtod(n, caddr_t), sizeof(struct ip)); 1148 ip = mtod(n, struct ip *); 1149 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t); 1150 nth = (struct tcphdr *)(ip + 1); 1151 } 1152 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 1153 xchg(nth->th_dport, nth->th_sport, uint16_t); 1154 th = nth; 1155 m_freem(m); 1156 m = n; 1157 } else { 1158 /* 1159 * reuse the mbuf. 1160 * XXX MRT We inherit the FIB, which is lucky. 1161 */ 1162 m_freem(m->m_next); 1163 m->m_next = NULL; 1164 m->m_data = (caddr_t)ipgen; 1165 /* m_len is set later */ 1166 #ifdef INET6 1167 if (isipv6) { 1168 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 1169 nth = (struct tcphdr *)(ip6 + 1); 1170 } else 1171 #endif /* INET6 */ 1172 { 1173 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t); 1174 nth = (struct tcphdr *)(ip + 1); 1175 } 1176 if (th != nth) { 1177 /* 1178 * this is usually a case when an extension header 1179 * exists between the IPv6 header and the 1180 * TCP header. 1181 */ 1182 nth->th_sport = th->th_sport; 1183 nth->th_dport = th->th_dport; 1184 } 1185 xchg(nth->th_dport, nth->th_sport, uint16_t); 1186 #undef xchg 1187 } 1188 tlen = 0; 1189 #ifdef INET6 1190 if (isipv6) 1191 tlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 1192 #endif 1193 #if defined(INET) && defined(INET6) 1194 else 1195 #endif 1196 #ifdef INET 1197 tlen = sizeof (struct tcpiphdr); 1198 #endif 1199 #ifdef INVARIANTS 1200 m->m_len = 0; 1201 KASSERT(M_TRAILINGSPACE(m) >= tlen, 1202 ("Not enough trailing space for message (m=%p, need=%d, have=%ld)", 1203 m, tlen, (long)M_TRAILINGSPACE(m))); 1204 #endif 1205 m->m_len = tlen; 1206 to.to_flags = 0; 1207 if (incl_opts) { 1208 /* Make sure we have room. */ 1209 if (M_TRAILINGSPACE(m) < TCP_MAXOLEN) { 1210 m->m_next = m_get(M_NOWAIT, MT_DATA); 1211 if (m->m_next) { 1212 optp = mtod(m->m_next, u_char *); 1213 optm = m->m_next; 1214 } else 1215 incl_opts = false; 1216 } else { 1217 optp = (u_char *) (nth + 1); 1218 optm = m; 1219 } 1220 } 1221 if (incl_opts) { 1222 /* Timestamps. */ 1223 if (tp->t_flags & TF_RCVD_TSTMP) { 1224 to.to_tsval = tcp_ts_getticks() + tp->ts_offset; 1225 to.to_tsecr = tp->ts_recent; 1226 to.to_flags |= TOF_TS; 1227 } 1228 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1229 /* TCP-MD5 (RFC2385). */ 1230 if (tp->t_flags & TF_SIGNATURE) 1231 to.to_flags |= TOF_SIGNATURE; 1232 #endif 1233 /* Add the options. */ 1234 tlen += optlen = tcp_addoptions(&to, optp); 1235 1236 /* Update m_len in the correct mbuf. */ 1237 optm->m_len += optlen; 1238 } else 1239 optlen = 0; 1240 #ifdef INET6 1241 if (isipv6) { 1242 ip6->ip6_flow = 0; 1243 ip6->ip6_vfc = IPV6_VERSION; 1244 ip6->ip6_nxt = IPPROTO_TCP; 1245 ip6->ip6_plen = htons(tlen - sizeof(*ip6)); 1246 } 1247 #endif 1248 #if defined(INET) && defined(INET6) 1249 else 1250 #endif 1251 #ifdef INET 1252 { 1253 ip->ip_len = htons(tlen); 1254 ip->ip_ttl = V_ip_defttl; 1255 if (V_path_mtu_discovery) 1256 ip->ip_off |= htons(IP_DF); 1257 } 1258 #endif 1259 m->m_pkthdr.len = tlen; 1260 m->m_pkthdr.rcvif = NULL; 1261 #ifdef MAC 1262 if (inp != NULL) { 1263 /* 1264 * Packet is associated with a socket, so allow the 1265 * label of the response to reflect the socket label. 1266 */ 1267 INP_WLOCK_ASSERT(inp); 1268 mac_inpcb_create_mbuf(inp, m); 1269 } else { 1270 /* 1271 * Packet is not associated with a socket, so possibly 1272 * update the label in place. 1273 */ 1274 mac_netinet_tcp_reply(m); 1275 } 1276 #endif 1277 nth->th_seq = htonl(seq); 1278 nth->th_ack = htonl(ack); 1279 nth->th_x2 = 0; 1280 nth->th_off = (sizeof (struct tcphdr) + optlen) >> 2; 1281 nth->th_flags = flags; 1282 if (tp != NULL) 1283 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 1284 else 1285 nth->th_win = htons((u_short)win); 1286 nth->th_urp = 0; 1287 1288 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1289 if (to.to_flags & TOF_SIGNATURE) { 1290 if (!TCPMD5_ENABLED() || 1291 TCPMD5_OUTPUT(m, nth, to.to_signature) != 0) { 1292 m_freem(m); 1293 return; 1294 } 1295 } 1296 #endif 1297 1298 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1299 #ifdef INET6 1300 if (isipv6) { 1301 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 1302 nth->th_sum = in6_cksum_pseudo(ip6, 1303 tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0); 1304 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb : 1305 NULL, NULL); 1306 } 1307 #endif /* INET6 */ 1308 #if defined(INET6) && defined(INET) 1309 else 1310 #endif 1311 #ifdef INET 1312 { 1313 m->m_pkthdr.csum_flags = CSUM_TCP; 1314 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1315 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 1316 } 1317 #endif /* INET */ 1318 #ifdef TCPDEBUG 1319 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG)) 1320 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 1321 #endif 1322 TCP_PROBE3(debug__output, tp, th, m); 1323 if (flags & TH_RST) 1324 TCP_PROBE5(accept__refused, NULL, NULL, m, tp, nth); 1325 1326 #ifdef INET6 1327 if (isipv6) { 1328 TCP_PROBE5(send, NULL, tp, ip6, tp, nth); 1329 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL, inp); 1330 } 1331 #endif /* INET6 */ 1332 #if defined(INET) && defined(INET6) 1333 else 1334 #endif 1335 #ifdef INET 1336 { 1337 TCP_PROBE5(send, NULL, tp, ip, tp, nth); 1338 (void)ip_output(m, NULL, NULL, 0, NULL, inp); 1339 } 1340 #endif 1341 } 1342 1343 /* 1344 * Create a new TCP control block, making an 1345 * empty reassembly queue and hooking it to the argument 1346 * protocol control block. The `inp' parameter must have 1347 * come from the zone allocator set up in tcp_init(). 1348 */ 1349 struct tcpcb * 1350 tcp_newtcpcb(struct inpcb *inp) 1351 { 1352 struct tcpcb_mem *tm; 1353 struct tcpcb *tp; 1354 #ifdef INET6 1355 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 1356 #endif /* INET6 */ 1357 1358 tm = uma_zalloc(V_tcpcb_zone, M_NOWAIT | M_ZERO); 1359 if (tm == NULL) 1360 return (NULL); 1361 tp = &tm->tcb; 1362 1363 /* Initialise cc_var struct for this tcpcb. */ 1364 tp->ccv = &tm->ccv; 1365 tp->ccv->type = IPPROTO_TCP; 1366 tp->ccv->ccvc.tcp = tp; 1367 rw_rlock(&tcp_function_lock); 1368 tp->t_fb = tcp_func_set_ptr; 1369 refcount_acquire(&tp->t_fb->tfb_refcnt); 1370 rw_runlock(&tcp_function_lock); 1371 /* 1372 * Use the current system default CC algorithm. 1373 */ 1374 CC_LIST_RLOCK(); 1375 KASSERT(!STAILQ_EMPTY(&cc_list), ("cc_list is empty!")); 1376 CC_ALGO(tp) = CC_DEFAULT(); 1377 CC_LIST_RUNLOCK(); 1378 1379 if (CC_ALGO(tp)->cb_init != NULL) 1380 if (CC_ALGO(tp)->cb_init(tp->ccv) > 0) { 1381 if (tp->t_fb->tfb_tcp_fb_fini) 1382 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 1383 refcount_release(&tp->t_fb->tfb_refcnt); 1384 uma_zfree(V_tcpcb_zone, tm); 1385 return (NULL); 1386 } 1387 1388 #ifdef TCP_HHOOK 1389 tp->osd = &tm->osd; 1390 if (khelp_init_osd(HELPER_CLASS_TCP, tp->osd)) { 1391 if (tp->t_fb->tfb_tcp_fb_fini) 1392 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 1393 refcount_release(&tp->t_fb->tfb_refcnt); 1394 uma_zfree(V_tcpcb_zone, tm); 1395 return (NULL); 1396 } 1397 #endif 1398 1399 #ifdef VIMAGE 1400 tp->t_vnet = inp->inp_vnet; 1401 #endif 1402 tp->t_timers = &tm->tt; 1403 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */ 1404 tp->t_maxseg = 1405 #ifdef INET6 1406 isipv6 ? V_tcp_v6mssdflt : 1407 #endif /* INET6 */ 1408 V_tcp_mssdflt; 1409 1410 /* Set up our timeouts. */ 1411 callout_init(&tp->t_timers->tt_rexmt, 1); 1412 callout_init(&tp->t_timers->tt_persist, 1); 1413 callout_init(&tp->t_timers->tt_keep, 1); 1414 callout_init(&tp->t_timers->tt_2msl, 1); 1415 callout_init(&tp->t_timers->tt_delack, 1); 1416 1417 if (V_tcp_do_rfc1323) 1418 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 1419 if (V_tcp_do_sack) 1420 tp->t_flags |= TF_SACK_PERMIT; 1421 TAILQ_INIT(&tp->snd_holes); 1422 /* 1423 * The tcpcb will hold a reference on its inpcb until tcp_discardcb() 1424 * is called. 1425 */ 1426 in_pcbref(inp); /* Reference for tcpcb */ 1427 tp->t_inpcb = inp; 1428 1429 /* 1430 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 1431 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 1432 * reasonable initial retransmit time. 1433 */ 1434 tp->t_srtt = TCPTV_SRTTBASE; 1435 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 1436 tp->t_rttmin = tcp_rexmit_min; 1437 tp->t_rxtcur = TCPTV_RTOBASE; 1438 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1439 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1440 tp->t_rcvtime = ticks; 1441 /* 1442 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 1443 * because the socket may be bound to an IPv6 wildcard address, 1444 * which may match an IPv4-mapped IPv6 address. 1445 */ 1446 inp->inp_ip_ttl = V_ip_defttl; 1447 inp->inp_ppcb = tp; 1448 #ifdef TCPPCAP 1449 /* 1450 * Init the TCP PCAP queues. 1451 */ 1452 tcp_pcap_tcpcb_init(tp); 1453 #endif 1454 #ifdef TCP_BLACKBOX 1455 /* Initialize the per-TCPCB log data. */ 1456 tcp_log_tcpcbinit(tp); 1457 #endif 1458 if (tp->t_fb->tfb_tcp_fb_init) { 1459 (*tp->t_fb->tfb_tcp_fb_init)(tp); 1460 } 1461 return (tp); /* XXX */ 1462 } 1463 1464 /* 1465 * Switch the congestion control algorithm back to NewReno for any active 1466 * control blocks using an algorithm which is about to go away. 1467 * This ensures the CC framework can allow the unload to proceed without leaving 1468 * any dangling pointers which would trigger a panic. 1469 * Returning non-zero would inform the CC framework that something went wrong 1470 * and it would be unsafe to allow the unload to proceed. However, there is no 1471 * way for this to occur with this implementation so we always return zero. 1472 */ 1473 int 1474 tcp_ccalgounload(struct cc_algo *unload_algo) 1475 { 1476 struct cc_algo *tmpalgo; 1477 struct inpcb *inp; 1478 struct tcpcb *tp; 1479 VNET_ITERATOR_DECL(vnet_iter); 1480 1481 /* 1482 * Check all active control blocks across all network stacks and change 1483 * any that are using "unload_algo" back to NewReno. If "unload_algo" 1484 * requires cleanup code to be run, call it. 1485 */ 1486 VNET_LIST_RLOCK(); 1487 VNET_FOREACH(vnet_iter) { 1488 CURVNET_SET(vnet_iter); 1489 INP_INFO_WLOCK(&V_tcbinfo); 1490 /* 1491 * New connections already part way through being initialised 1492 * with the CC algo we're removing will not race with this code 1493 * because the INP_INFO_WLOCK is held during initialisation. We 1494 * therefore don't enter the loop below until the connection 1495 * list has stabilised. 1496 */ 1497 LIST_FOREACH(inp, &V_tcb, inp_list) { 1498 INP_WLOCK(inp); 1499 /* Important to skip tcptw structs. */ 1500 if (!(inp->inp_flags & INP_TIMEWAIT) && 1501 (tp = intotcpcb(inp)) != NULL) { 1502 /* 1503 * By holding INP_WLOCK here, we are assured 1504 * that the connection is not currently 1505 * executing inside the CC module's functions 1506 * i.e. it is safe to make the switch back to 1507 * NewReno. 1508 */ 1509 if (CC_ALGO(tp) == unload_algo) { 1510 tmpalgo = CC_ALGO(tp); 1511 /* NewReno does not require any init. */ 1512 CC_ALGO(tp) = &newreno_cc_algo; 1513 if (tmpalgo->cb_destroy != NULL) 1514 tmpalgo->cb_destroy(tp->ccv); 1515 } 1516 } 1517 INP_WUNLOCK(inp); 1518 } 1519 INP_INFO_WUNLOCK(&V_tcbinfo); 1520 CURVNET_RESTORE(); 1521 } 1522 VNET_LIST_RUNLOCK(); 1523 1524 return (0); 1525 } 1526 1527 /* 1528 * Drop a TCP connection, reporting 1529 * the specified error. If connection is synchronized, 1530 * then send a RST to peer. 1531 */ 1532 struct tcpcb * 1533 tcp_drop(struct tcpcb *tp, int errno) 1534 { 1535 struct socket *so = tp->t_inpcb->inp_socket; 1536 1537 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 1538 INP_WLOCK_ASSERT(tp->t_inpcb); 1539 1540 if (TCPS_HAVERCVDSYN(tp->t_state)) { 1541 tcp_state_change(tp, TCPS_CLOSED); 1542 (void) tp->t_fb->tfb_tcp_output(tp); 1543 TCPSTAT_INC(tcps_drops); 1544 } else 1545 TCPSTAT_INC(tcps_conndrops); 1546 if (errno == ETIMEDOUT && tp->t_softerror) 1547 errno = tp->t_softerror; 1548 so->so_error = errno; 1549 return (tcp_close(tp)); 1550 } 1551 1552 void 1553 tcp_discardcb(struct tcpcb *tp) 1554 { 1555 struct inpcb *inp = tp->t_inpcb; 1556 struct socket *so = inp->inp_socket; 1557 #ifdef INET6 1558 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 1559 #endif /* INET6 */ 1560 int released; 1561 1562 INP_WLOCK_ASSERT(inp); 1563 1564 /* 1565 * Make sure that all of our timers are stopped before we delete the 1566 * PCB. 1567 * 1568 * If stopping a timer fails, we schedule a discard function in same 1569 * callout, and the last discard function called will take care of 1570 * deleting the tcpcb. 1571 */ 1572 tp->t_timers->tt_draincnt = 0; 1573 tcp_timer_stop(tp, TT_REXMT); 1574 tcp_timer_stop(tp, TT_PERSIST); 1575 tcp_timer_stop(tp, TT_KEEP); 1576 tcp_timer_stop(tp, TT_2MSL); 1577 tcp_timer_stop(tp, TT_DELACK); 1578 if (tp->t_fb->tfb_tcp_timer_stop_all) { 1579 /* 1580 * Call the stop-all function of the methods, 1581 * this function should call the tcp_timer_stop() 1582 * method with each of the function specific timeouts. 1583 * That stop will be called via the tfb_tcp_timer_stop() 1584 * which should use the async drain function of the 1585 * callout system (see tcp_var.h). 1586 */ 1587 tp->t_fb->tfb_tcp_timer_stop_all(tp); 1588 } 1589 1590 /* 1591 * If we got enough samples through the srtt filter, 1592 * save the rtt and rttvar in the routing entry. 1593 * 'Enough' is arbitrarily defined as 4 rtt samples. 1594 * 4 samples is enough for the srtt filter to converge 1595 * to within enough % of the correct value; fewer samples 1596 * and we could save a bogus rtt. The danger is not high 1597 * as tcp quickly recovers from everything. 1598 * XXX: Works very well but needs some more statistics! 1599 */ 1600 if (tp->t_rttupdated >= 4) { 1601 struct hc_metrics_lite metrics; 1602 uint32_t ssthresh; 1603 1604 bzero(&metrics, sizeof(metrics)); 1605 /* 1606 * Update the ssthresh always when the conditions below 1607 * are satisfied. This gives us better new start value 1608 * for the congestion avoidance for new connections. 1609 * ssthresh is only set if packet loss occurred on a session. 1610 * 1611 * XXXRW: 'so' may be NULL here, and/or socket buffer may be 1612 * being torn down. Ideally this code would not use 'so'. 1613 */ 1614 ssthresh = tp->snd_ssthresh; 1615 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) { 1616 /* 1617 * convert the limit from user data bytes to 1618 * packets then to packet data bytes. 1619 */ 1620 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg; 1621 if (ssthresh < 2) 1622 ssthresh = 2; 1623 ssthresh *= (tp->t_maxseg + 1624 #ifdef INET6 1625 (isipv6 ? sizeof (struct ip6_hdr) + 1626 sizeof (struct tcphdr) : 1627 #endif 1628 sizeof (struct tcpiphdr) 1629 #ifdef INET6 1630 ) 1631 #endif 1632 ); 1633 } else 1634 ssthresh = 0; 1635 metrics.rmx_ssthresh = ssthresh; 1636 1637 metrics.rmx_rtt = tp->t_srtt; 1638 metrics.rmx_rttvar = tp->t_rttvar; 1639 metrics.rmx_cwnd = tp->snd_cwnd; 1640 metrics.rmx_sendpipe = 0; 1641 metrics.rmx_recvpipe = 0; 1642 1643 tcp_hc_update(&inp->inp_inc, &metrics); 1644 } 1645 1646 /* free the reassembly queue, if any */ 1647 tcp_reass_flush(tp); 1648 1649 #ifdef TCP_OFFLOAD 1650 /* Disconnect offload device, if any. */ 1651 if (tp->t_flags & TF_TOE) 1652 tcp_offload_detach(tp); 1653 #endif 1654 1655 tcp_free_sackholes(tp); 1656 1657 #ifdef TCPPCAP 1658 /* Free the TCP PCAP queues. */ 1659 tcp_pcap_drain(&(tp->t_inpkts)); 1660 tcp_pcap_drain(&(tp->t_outpkts)); 1661 #endif 1662 1663 /* Allow the CC algorithm to clean up after itself. */ 1664 if (CC_ALGO(tp)->cb_destroy != NULL) 1665 CC_ALGO(tp)->cb_destroy(tp->ccv); 1666 1667 #ifdef TCP_HHOOK 1668 khelp_destroy_osd(tp->osd); 1669 #endif 1670 1671 CC_ALGO(tp) = NULL; 1672 inp->inp_ppcb = NULL; 1673 if (tp->t_timers->tt_draincnt == 0) { 1674 /* We own the last reference on tcpcb, let's free it. */ 1675 #ifdef TCP_BLACKBOX 1676 tcp_log_tcpcbfini(tp); 1677 #endif 1678 TCPSTATES_DEC(tp->t_state); 1679 if (tp->t_fb->tfb_tcp_fb_fini) 1680 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 1681 refcount_release(&tp->t_fb->tfb_refcnt); 1682 tp->t_inpcb = NULL; 1683 uma_zfree(V_tcpcb_zone, tp); 1684 released = in_pcbrele_wlocked(inp); 1685 KASSERT(!released, ("%s: inp %p should not have been released " 1686 "here", __func__, inp)); 1687 } 1688 } 1689 1690 void 1691 tcp_timer_discard(void *ptp) 1692 { 1693 struct inpcb *inp; 1694 struct tcpcb *tp; 1695 1696 tp = (struct tcpcb *)ptp; 1697 CURVNET_SET(tp->t_vnet); 1698 INP_INFO_RLOCK(&V_tcbinfo); 1699 inp = tp->t_inpcb; 1700 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", 1701 __func__, tp)); 1702 INP_WLOCK(inp); 1703 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) != 0, 1704 ("%s: tcpcb has to be stopped here", __func__)); 1705 tp->t_timers->tt_draincnt--; 1706 if (tp->t_timers->tt_draincnt == 0) { 1707 /* We own the last reference on this tcpcb, let's free it. */ 1708 #ifdef TCP_BLACKBOX 1709 tcp_log_tcpcbfini(tp); 1710 #endif 1711 TCPSTATES_DEC(tp->t_state); 1712 if (tp->t_fb->tfb_tcp_fb_fini) 1713 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1); 1714 refcount_release(&tp->t_fb->tfb_refcnt); 1715 tp->t_inpcb = NULL; 1716 uma_zfree(V_tcpcb_zone, tp); 1717 if (in_pcbrele_wlocked(inp)) { 1718 INP_INFO_RUNLOCK(&V_tcbinfo); 1719 CURVNET_RESTORE(); 1720 return; 1721 } 1722 } 1723 INP_WUNLOCK(inp); 1724 INP_INFO_RUNLOCK(&V_tcbinfo); 1725 CURVNET_RESTORE(); 1726 } 1727 1728 /* 1729 * Attempt to close a TCP control block, marking it as dropped, and freeing 1730 * the socket if we hold the only reference. 1731 */ 1732 struct tcpcb * 1733 tcp_close(struct tcpcb *tp) 1734 { 1735 struct inpcb *inp = tp->t_inpcb; 1736 struct socket *so; 1737 1738 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 1739 INP_WLOCK_ASSERT(inp); 1740 1741 #ifdef TCP_OFFLOAD 1742 if (tp->t_state == TCPS_LISTEN) 1743 tcp_offload_listen_stop(tp); 1744 #endif 1745 /* 1746 * This releases the TFO pending counter resource for TFO listen 1747 * sockets as well as passively-created TFO sockets that transition 1748 * from SYN_RECEIVED to CLOSED. 1749 */ 1750 if (tp->t_tfo_pending) { 1751 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 1752 tp->t_tfo_pending = NULL; 1753 } 1754 in_pcbdrop(inp); 1755 TCPSTAT_INC(tcps_closed); 1756 if (tp->t_state != TCPS_CLOSED) 1757 tcp_state_change(tp, TCPS_CLOSED); 1758 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL")); 1759 so = inp->inp_socket; 1760 soisdisconnected(so); 1761 if (inp->inp_flags & INP_SOCKREF) { 1762 KASSERT(so->so_state & SS_PROTOREF, 1763 ("tcp_close: !SS_PROTOREF")); 1764 inp->inp_flags &= ~INP_SOCKREF; 1765 INP_WUNLOCK(inp); 1766 SOCK_LOCK(so); 1767 so->so_state &= ~SS_PROTOREF; 1768 sofree(so); 1769 return (NULL); 1770 } 1771 return (tp); 1772 } 1773 1774 void 1775 tcp_drain(void) 1776 { 1777 VNET_ITERATOR_DECL(vnet_iter); 1778 1779 if (!do_tcpdrain) 1780 return; 1781 1782 VNET_LIST_RLOCK_NOSLEEP(); 1783 VNET_FOREACH(vnet_iter) { 1784 CURVNET_SET(vnet_iter); 1785 struct inpcb *inpb; 1786 struct tcpcb *tcpb; 1787 1788 /* 1789 * Walk the tcpbs, if existing, and flush the reassembly queue, 1790 * if there is one... 1791 * XXX: The "Net/3" implementation doesn't imply that the TCP 1792 * reassembly queue should be flushed, but in a situation 1793 * where we're really low on mbufs, this is potentially 1794 * useful. 1795 */ 1796 INP_INFO_WLOCK(&V_tcbinfo); 1797 LIST_FOREACH(inpb, V_tcbinfo.ipi_listhead, inp_list) { 1798 if (inpb->inp_flags & INP_TIMEWAIT) 1799 continue; 1800 INP_WLOCK(inpb); 1801 if ((tcpb = intotcpcb(inpb)) != NULL) { 1802 tcp_reass_flush(tcpb); 1803 tcp_clean_sackreport(tcpb); 1804 #ifdef TCP_BLACKBOX 1805 tcp_log_drain(tcpb); 1806 #endif 1807 #ifdef TCPPCAP 1808 if (tcp_pcap_aggressive_free) { 1809 /* Free the TCP PCAP queues. */ 1810 tcp_pcap_drain(&(tcpb->t_inpkts)); 1811 tcp_pcap_drain(&(tcpb->t_outpkts)); 1812 } 1813 #endif 1814 } 1815 INP_WUNLOCK(inpb); 1816 } 1817 INP_INFO_WUNLOCK(&V_tcbinfo); 1818 CURVNET_RESTORE(); 1819 } 1820 VNET_LIST_RUNLOCK_NOSLEEP(); 1821 } 1822 1823 /* 1824 * Notify a tcp user of an asynchronous error; 1825 * store error as soft error, but wake up user 1826 * (for now, won't do anything until can select for soft error). 1827 * 1828 * Do not wake up user since there currently is no mechanism for 1829 * reporting soft errors (yet - a kqueue filter may be added). 1830 */ 1831 static struct inpcb * 1832 tcp_notify(struct inpcb *inp, int error) 1833 { 1834 struct tcpcb *tp; 1835 1836 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 1837 INP_WLOCK_ASSERT(inp); 1838 1839 if ((inp->inp_flags & INP_TIMEWAIT) || 1840 (inp->inp_flags & INP_DROPPED)) 1841 return (inp); 1842 1843 tp = intotcpcb(inp); 1844 KASSERT(tp != NULL, ("tcp_notify: tp == NULL")); 1845 1846 /* 1847 * Ignore some errors if we are hooked up. 1848 * If connection hasn't completed, has retransmitted several times, 1849 * and receives a second error, give up now. This is better 1850 * than waiting a long time to establish a connection that 1851 * can never complete. 1852 */ 1853 if (tp->t_state == TCPS_ESTABLISHED && 1854 (error == EHOSTUNREACH || error == ENETUNREACH || 1855 error == EHOSTDOWN)) { 1856 if (inp->inp_route.ro_rt) { 1857 RTFREE(inp->inp_route.ro_rt); 1858 inp->inp_route.ro_rt = (struct rtentry *)NULL; 1859 } 1860 return (inp); 1861 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 1862 tp->t_softerror) { 1863 tp = tcp_drop(tp, error); 1864 if (tp != NULL) 1865 return (inp); 1866 else 1867 return (NULL); 1868 } else { 1869 tp->t_softerror = error; 1870 return (inp); 1871 } 1872 #if 0 1873 wakeup( &so->so_timeo); 1874 sorwakeup(so); 1875 sowwakeup(so); 1876 #endif 1877 } 1878 1879 static int 1880 tcp_pcblist(SYSCTL_HANDLER_ARGS) 1881 { 1882 int error, i, m, n, pcb_count; 1883 struct inpcb *inp, **inp_list; 1884 inp_gen_t gencnt; 1885 struct xinpgen xig; 1886 1887 /* 1888 * The process of preparing the TCB list is too time-consuming and 1889 * resource-intensive to repeat twice on every request. 1890 */ 1891 if (req->oldptr == NULL) { 1892 n = V_tcbinfo.ipi_count + 1893 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 1894 n += imax(n / 8, 10); 1895 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb); 1896 return (0); 1897 } 1898 1899 if (req->newptr != NULL) 1900 return (EPERM); 1901 1902 /* 1903 * OK, now we're committed to doing something. 1904 */ 1905 INP_LIST_RLOCK(&V_tcbinfo); 1906 gencnt = V_tcbinfo.ipi_gencnt; 1907 n = V_tcbinfo.ipi_count; 1908 INP_LIST_RUNLOCK(&V_tcbinfo); 1909 1910 m = counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]); 1911 1912 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig) 1913 + (n + m) * sizeof(struct xtcpcb)); 1914 if (error != 0) 1915 return (error); 1916 1917 xig.xig_len = sizeof xig; 1918 xig.xig_count = n + m; 1919 xig.xig_gen = gencnt; 1920 xig.xig_sogen = so_gencnt; 1921 error = SYSCTL_OUT(req, &xig, sizeof xig); 1922 if (error) 1923 return (error); 1924 1925 error = syncache_pcblist(req, m, &pcb_count); 1926 if (error) 1927 return (error); 1928 1929 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 1930 1931 INP_INFO_WLOCK(&V_tcbinfo); 1932 for (inp = LIST_FIRST(V_tcbinfo.ipi_listhead), i = 0; 1933 inp != NULL && i < n; inp = LIST_NEXT(inp, inp_list)) { 1934 INP_WLOCK(inp); 1935 if (inp->inp_gencnt <= gencnt) { 1936 /* 1937 * XXX: This use of cr_cansee(), introduced with 1938 * TCP state changes, is not quite right, but for 1939 * now, better than nothing. 1940 */ 1941 if (inp->inp_flags & INP_TIMEWAIT) { 1942 if (intotw(inp) != NULL) 1943 error = cr_cansee(req->td->td_ucred, 1944 intotw(inp)->tw_cred); 1945 else 1946 error = EINVAL; /* Skip this inp. */ 1947 } else 1948 error = cr_canseeinpcb(req->td->td_ucred, inp); 1949 if (error == 0) { 1950 in_pcbref(inp); 1951 inp_list[i++] = inp; 1952 } 1953 } 1954 INP_WUNLOCK(inp); 1955 } 1956 INP_INFO_WUNLOCK(&V_tcbinfo); 1957 n = i; 1958 1959 error = 0; 1960 for (i = 0; i < n; i++) { 1961 inp = inp_list[i]; 1962 INP_RLOCK(inp); 1963 if (inp->inp_gencnt <= gencnt) { 1964 struct xtcpcb xt; 1965 1966 tcp_inptoxtp(inp, &xt); 1967 INP_RUNLOCK(inp); 1968 error = SYSCTL_OUT(req, &xt, sizeof xt); 1969 } else 1970 INP_RUNLOCK(inp); 1971 } 1972 INP_INFO_RLOCK(&V_tcbinfo); 1973 for (i = 0; i < n; i++) { 1974 inp = inp_list[i]; 1975 INP_RLOCK(inp); 1976 if (!in_pcbrele_rlocked(inp)) 1977 INP_RUNLOCK(inp); 1978 } 1979 INP_INFO_RUNLOCK(&V_tcbinfo); 1980 1981 if (!error) { 1982 /* 1983 * Give the user an updated idea of our state. 1984 * If the generation differs from what we told 1985 * her before, she knows that something happened 1986 * while we were processing this request, and it 1987 * might be necessary to retry. 1988 */ 1989 INP_LIST_RLOCK(&V_tcbinfo); 1990 xig.xig_gen = V_tcbinfo.ipi_gencnt; 1991 xig.xig_sogen = so_gencnt; 1992 xig.xig_count = V_tcbinfo.ipi_count + pcb_count; 1993 INP_LIST_RUNLOCK(&V_tcbinfo); 1994 error = SYSCTL_OUT(req, &xig, sizeof xig); 1995 } 1996 free(inp_list, M_TEMP); 1997 return (error); 1998 } 1999 2000 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, 2001 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, 2002 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 2003 2004 #ifdef INET 2005 static int 2006 tcp_getcred(SYSCTL_HANDLER_ARGS) 2007 { 2008 struct xucred xuc; 2009 struct sockaddr_in addrs[2]; 2010 struct inpcb *inp; 2011 int error; 2012 2013 error = priv_check(req->td, PRIV_NETINET_GETCRED); 2014 if (error) 2015 return (error); 2016 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 2017 if (error) 2018 return (error); 2019 inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 2020 addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL); 2021 if (inp != NULL) { 2022 if (inp->inp_socket == NULL) 2023 error = ENOENT; 2024 if (error == 0) 2025 error = cr_canseeinpcb(req->td->td_ucred, inp); 2026 if (error == 0) 2027 cru2x(inp->inp_cred, &xuc); 2028 INP_RUNLOCK(inp); 2029 } else 2030 error = ENOENT; 2031 if (error == 0) 2032 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 2033 return (error); 2034 } 2035 2036 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, 2037 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 2038 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection"); 2039 #endif /* INET */ 2040 2041 #ifdef INET6 2042 static int 2043 tcp6_getcred(SYSCTL_HANDLER_ARGS) 2044 { 2045 struct xucred xuc; 2046 struct sockaddr_in6 addrs[2]; 2047 struct inpcb *inp; 2048 int error; 2049 #ifdef INET 2050 int mapped = 0; 2051 #endif 2052 2053 error = priv_check(req->td, PRIV_NETINET_GETCRED); 2054 if (error) 2055 return (error); 2056 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 2057 if (error) 2058 return (error); 2059 if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 || 2060 (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) { 2061 return (error); 2062 } 2063 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 2064 #ifdef INET 2065 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 2066 mapped = 1; 2067 else 2068 #endif 2069 return (EINVAL); 2070 } 2071 2072 #ifdef INET 2073 if (mapped == 1) 2074 inp = in_pcblookup(&V_tcbinfo, 2075 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 2076 addrs[1].sin6_port, 2077 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 2078 addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL); 2079 else 2080 #endif 2081 inp = in6_pcblookup(&V_tcbinfo, 2082 &addrs[1].sin6_addr, addrs[1].sin6_port, 2083 &addrs[0].sin6_addr, addrs[0].sin6_port, 2084 INPLOOKUP_RLOCKPCB, NULL); 2085 if (inp != NULL) { 2086 if (inp->inp_socket == NULL) 2087 error = ENOENT; 2088 if (error == 0) 2089 error = cr_canseeinpcb(req->td->td_ucred, inp); 2090 if (error == 0) 2091 cru2x(inp->inp_cred, &xuc); 2092 INP_RUNLOCK(inp); 2093 } else 2094 error = ENOENT; 2095 if (error == 0) 2096 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 2097 return (error); 2098 } 2099 2100 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, 2101 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 2102 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection"); 2103 #endif /* INET6 */ 2104 2105 2106 #ifdef INET 2107 void 2108 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 2109 { 2110 struct ip *ip = vip; 2111 struct tcphdr *th; 2112 struct in_addr faddr; 2113 struct inpcb *inp; 2114 struct tcpcb *tp; 2115 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 2116 struct icmp *icp; 2117 struct in_conninfo inc; 2118 tcp_seq icmp_tcp_seq; 2119 int mtu; 2120 2121 faddr = ((struct sockaddr_in *)sa)->sin_addr; 2122 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 2123 return; 2124 2125 if (cmd == PRC_MSGSIZE) 2126 notify = tcp_mtudisc_notify; 2127 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 2128 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL || 2129 cmd == PRC_TIMXCEED_INTRANS) && ip) 2130 notify = tcp_drop_syn_sent; 2131 2132 /* 2133 * Hostdead is ugly because it goes linearly through all PCBs. 2134 * XXX: We never get this from ICMP, otherwise it makes an 2135 * excellent DoS attack on machines with many connections. 2136 */ 2137 else if (cmd == PRC_HOSTDEAD) 2138 ip = NULL; 2139 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 2140 return; 2141 2142 if (ip == NULL) { 2143 in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify); 2144 return; 2145 } 2146 2147 icp = (struct icmp *)((caddr_t)ip - offsetof(struct icmp, icmp_ip)); 2148 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2149 INP_INFO_RLOCK(&V_tcbinfo); 2150 inp = in_pcblookup(&V_tcbinfo, faddr, th->th_dport, ip->ip_src, 2151 th->th_sport, INPLOOKUP_WLOCKPCB, NULL); 2152 if (inp != NULL && PRC_IS_REDIRECT(cmd)) { 2153 /* signal EHOSTDOWN, as it flushes the cached route */ 2154 inp = (*notify)(inp, EHOSTDOWN); 2155 goto out; 2156 } 2157 icmp_tcp_seq = th->th_seq; 2158 if (inp != NULL) { 2159 if (!(inp->inp_flags & INP_TIMEWAIT) && 2160 !(inp->inp_flags & INP_DROPPED) && 2161 !(inp->inp_socket == NULL)) { 2162 tp = intotcpcb(inp); 2163 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) && 2164 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) { 2165 if (cmd == PRC_MSGSIZE) { 2166 /* 2167 * MTU discovery: 2168 * If we got a needfrag set the MTU 2169 * in the route to the suggested new 2170 * value (if given) and then notify. 2171 */ 2172 mtu = ntohs(icp->icmp_nextmtu); 2173 /* 2174 * If no alternative MTU was 2175 * proposed, try the next smaller 2176 * one. 2177 */ 2178 if (!mtu) 2179 mtu = ip_next_mtu( 2180 ntohs(ip->ip_len), 1); 2181 if (mtu < V_tcp_minmss + 2182 sizeof(struct tcpiphdr)) 2183 mtu = V_tcp_minmss + 2184 sizeof(struct tcpiphdr); 2185 /* 2186 * Only process the offered MTU if it 2187 * is smaller than the current one. 2188 */ 2189 if (mtu < tp->t_maxseg + 2190 sizeof(struct tcpiphdr)) { 2191 bzero(&inc, sizeof(inc)); 2192 inc.inc_faddr = faddr; 2193 inc.inc_fibnum = 2194 inp->inp_inc.inc_fibnum; 2195 tcp_hc_updatemtu(&inc, mtu); 2196 tcp_mtudisc(inp, mtu); 2197 } 2198 } else 2199 inp = (*notify)(inp, 2200 inetctlerrmap[cmd]); 2201 } 2202 } 2203 } else { 2204 bzero(&inc, sizeof(inc)); 2205 inc.inc_fport = th->th_dport; 2206 inc.inc_lport = th->th_sport; 2207 inc.inc_faddr = faddr; 2208 inc.inc_laddr = ip->ip_src; 2209 syncache_unreach(&inc, icmp_tcp_seq); 2210 } 2211 out: 2212 if (inp != NULL) 2213 INP_WUNLOCK(inp); 2214 INP_INFO_RUNLOCK(&V_tcbinfo); 2215 } 2216 #endif /* INET */ 2217 2218 #ifdef INET6 2219 void 2220 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d) 2221 { 2222 struct in6_addr *dst; 2223 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 2224 struct ip6_hdr *ip6; 2225 struct mbuf *m; 2226 struct inpcb *inp; 2227 struct tcpcb *tp; 2228 struct icmp6_hdr *icmp6; 2229 struct ip6ctlparam *ip6cp = NULL; 2230 const struct sockaddr_in6 *sa6_src = NULL; 2231 struct in_conninfo inc; 2232 struct tcp_ports { 2233 uint16_t th_sport; 2234 uint16_t th_dport; 2235 } t_ports; 2236 tcp_seq icmp_tcp_seq; 2237 unsigned int mtu; 2238 unsigned int off; 2239 2240 if (sa->sa_family != AF_INET6 || 2241 sa->sa_len != sizeof(struct sockaddr_in6)) 2242 return; 2243 2244 /* if the parameter is from icmp6, decode it. */ 2245 if (d != NULL) { 2246 ip6cp = (struct ip6ctlparam *)d; 2247 icmp6 = ip6cp->ip6c_icmp6; 2248 m = ip6cp->ip6c_m; 2249 ip6 = ip6cp->ip6c_ip6; 2250 off = ip6cp->ip6c_off; 2251 sa6_src = ip6cp->ip6c_src; 2252 dst = ip6cp->ip6c_finaldst; 2253 } else { 2254 m = NULL; 2255 ip6 = NULL; 2256 off = 0; /* fool gcc */ 2257 sa6_src = &sa6_any; 2258 dst = NULL; 2259 } 2260 2261 if (cmd == PRC_MSGSIZE) 2262 notify = tcp_mtudisc_notify; 2263 else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 2264 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL || 2265 cmd == PRC_TIMXCEED_INTRANS) && ip6 != NULL) 2266 notify = tcp_drop_syn_sent; 2267 2268 /* 2269 * Hostdead is ugly because it goes linearly through all PCBs. 2270 * XXX: We never get this from ICMP, otherwise it makes an 2271 * excellent DoS attack on machines with many connections. 2272 */ 2273 else if (cmd == PRC_HOSTDEAD) 2274 ip6 = NULL; 2275 else if ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0) 2276 return; 2277 2278 if (ip6 == NULL) { 2279 in6_pcbnotify(&V_tcbinfo, sa, 0, 2280 (const struct sockaddr *)sa6_src, 2281 0, cmd, NULL, notify); 2282 return; 2283 } 2284 2285 /* Check if we can safely get the ports from the tcp hdr */ 2286 if (m == NULL || 2287 (m->m_pkthdr.len < 2288 (int32_t) (off + sizeof(struct tcp_ports)))) { 2289 return; 2290 } 2291 bzero(&t_ports, sizeof(struct tcp_ports)); 2292 m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports); 2293 INP_INFO_RLOCK(&V_tcbinfo); 2294 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_dst, t_ports.th_dport, 2295 &ip6->ip6_src, t_ports.th_sport, INPLOOKUP_WLOCKPCB, NULL); 2296 if (inp != NULL && PRC_IS_REDIRECT(cmd)) { 2297 /* signal EHOSTDOWN, as it flushes the cached route */ 2298 inp = (*notify)(inp, EHOSTDOWN); 2299 goto out; 2300 } 2301 off += sizeof(struct tcp_ports); 2302 if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) { 2303 goto out; 2304 } 2305 m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq); 2306 if (inp != NULL) { 2307 if (!(inp->inp_flags & INP_TIMEWAIT) && 2308 !(inp->inp_flags & INP_DROPPED) && 2309 !(inp->inp_socket == NULL)) { 2310 tp = intotcpcb(inp); 2311 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) && 2312 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) { 2313 if (cmd == PRC_MSGSIZE) { 2314 /* 2315 * MTU discovery: 2316 * If we got a needfrag set the MTU 2317 * in the route to the suggested new 2318 * value (if given) and then notify. 2319 */ 2320 mtu = ntohl(icmp6->icmp6_mtu); 2321 /* 2322 * If no alternative MTU was 2323 * proposed, or the proposed 2324 * MTU was too small, set to 2325 * the min. 2326 */ 2327 if (mtu < IPV6_MMTU) 2328 mtu = IPV6_MMTU - 8; 2329 bzero(&inc, sizeof(inc)); 2330 inc.inc_fibnum = M_GETFIB(m); 2331 inc.inc_flags |= INC_ISIPV6; 2332 inc.inc6_faddr = *dst; 2333 if (in6_setscope(&inc.inc6_faddr, 2334 m->m_pkthdr.rcvif, NULL)) 2335 goto out; 2336 /* 2337 * Only process the offered MTU if it 2338 * is smaller than the current one. 2339 */ 2340 if (mtu < tp->t_maxseg + 2341 sizeof (struct tcphdr) + 2342 sizeof (struct ip6_hdr)) { 2343 tcp_hc_updatemtu(&inc, mtu); 2344 tcp_mtudisc(inp, mtu); 2345 ICMP6STAT_INC(icp6s_pmtuchg); 2346 } 2347 } else 2348 inp = (*notify)(inp, 2349 inet6ctlerrmap[cmd]); 2350 } 2351 } 2352 } else { 2353 bzero(&inc, sizeof(inc)); 2354 inc.inc_fibnum = M_GETFIB(m); 2355 inc.inc_flags |= INC_ISIPV6; 2356 inc.inc_fport = t_ports.th_dport; 2357 inc.inc_lport = t_ports.th_sport; 2358 inc.inc6_faddr = *dst; 2359 inc.inc6_laddr = ip6->ip6_src; 2360 syncache_unreach(&inc, icmp_tcp_seq); 2361 } 2362 out: 2363 if (inp != NULL) 2364 INP_WUNLOCK(inp); 2365 INP_INFO_RUNLOCK(&V_tcbinfo); 2366 } 2367 #endif /* INET6 */ 2368 2369 2370 /* 2371 * Following is where TCP initial sequence number generation occurs. 2372 * 2373 * There are two places where we must use initial sequence numbers: 2374 * 1. In SYN-ACK packets. 2375 * 2. In SYN packets. 2376 * 2377 * All ISNs for SYN-ACK packets are generated by the syncache. See 2378 * tcp_syncache.c for details. 2379 * 2380 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 2381 * depends on this property. In addition, these ISNs should be 2382 * unguessable so as to prevent connection hijacking. To satisfy 2383 * the requirements of this situation, the algorithm outlined in 2384 * RFC 1948 is used, with only small modifications. 2385 * 2386 * Implementation details: 2387 * 2388 * Time is based off the system timer, and is corrected so that it 2389 * increases by one megabyte per second. This allows for proper 2390 * recycling on high speed LANs while still leaving over an hour 2391 * before rollover. 2392 * 2393 * As reading the *exact* system time is too expensive to be done 2394 * whenever setting up a TCP connection, we increment the time 2395 * offset in two ways. First, a small random positive increment 2396 * is added to isn_offset for each connection that is set up. 2397 * Second, the function tcp_isn_tick fires once per clock tick 2398 * and increments isn_offset as necessary so that sequence numbers 2399 * are incremented at approximately ISN_BYTES_PER_SECOND. The 2400 * random positive increments serve only to ensure that the same 2401 * exact sequence number is never sent out twice (as could otherwise 2402 * happen when a port is recycled in less than the system tick 2403 * interval.) 2404 * 2405 * net.inet.tcp.isn_reseed_interval controls the number of seconds 2406 * between seeding of isn_secret. This is normally set to zero, 2407 * as reseeding should not be necessary. 2408 * 2409 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset, 2410 * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock. In 2411 * general, this means holding an exclusive (write) lock. 2412 */ 2413 2414 #define ISN_BYTES_PER_SECOND 1048576 2415 #define ISN_STATIC_INCREMENT 4096 2416 #define ISN_RANDOM_INCREMENT (4096 - 1) 2417 2418 static VNET_DEFINE(u_char, isn_secret[32]); 2419 static VNET_DEFINE(int, isn_last); 2420 static VNET_DEFINE(int, isn_last_reseed); 2421 static VNET_DEFINE(u_int32_t, isn_offset); 2422 static VNET_DEFINE(u_int32_t, isn_offset_old); 2423 2424 #define V_isn_secret VNET(isn_secret) 2425 #define V_isn_last VNET(isn_last) 2426 #define V_isn_last_reseed VNET(isn_last_reseed) 2427 #define V_isn_offset VNET(isn_offset) 2428 #define V_isn_offset_old VNET(isn_offset_old) 2429 2430 tcp_seq 2431 tcp_new_isn(struct tcpcb *tp) 2432 { 2433 MD5_CTX isn_ctx; 2434 u_int32_t md5_buffer[4]; 2435 tcp_seq new_isn; 2436 u_int32_t projected_offset; 2437 2438 INP_WLOCK_ASSERT(tp->t_inpcb); 2439 2440 ISN_LOCK(); 2441 /* Seed if this is the first use, reseed if requested. */ 2442 if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) && 2443 (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz) 2444 < (u_int)ticks))) { 2445 read_random(&V_isn_secret, sizeof(V_isn_secret)); 2446 V_isn_last_reseed = ticks; 2447 } 2448 2449 /* Compute the md5 hash and return the ISN. */ 2450 MD5Init(&isn_ctx); 2451 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short)); 2452 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short)); 2453 #ifdef INET6 2454 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) { 2455 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 2456 sizeof(struct in6_addr)); 2457 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 2458 sizeof(struct in6_addr)); 2459 } else 2460 #endif 2461 { 2462 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 2463 sizeof(struct in_addr)); 2464 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 2465 sizeof(struct in_addr)); 2466 } 2467 MD5Update(&isn_ctx, (u_char *) &V_isn_secret, sizeof(V_isn_secret)); 2468 MD5Final((u_char *) &md5_buffer, &isn_ctx); 2469 new_isn = (tcp_seq) md5_buffer[0]; 2470 V_isn_offset += ISN_STATIC_INCREMENT + 2471 (arc4random() & ISN_RANDOM_INCREMENT); 2472 if (ticks != V_isn_last) { 2473 projected_offset = V_isn_offset_old + 2474 ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last); 2475 if (SEQ_GT(projected_offset, V_isn_offset)) 2476 V_isn_offset = projected_offset; 2477 V_isn_offset_old = V_isn_offset; 2478 V_isn_last = ticks; 2479 } 2480 new_isn += V_isn_offset; 2481 ISN_UNLOCK(); 2482 return (new_isn); 2483 } 2484 2485 /* 2486 * When a specific ICMP unreachable message is received and the 2487 * connection state is SYN-SENT, drop the connection. This behavior 2488 * is controlled by the icmp_may_rst sysctl. 2489 */ 2490 struct inpcb * 2491 tcp_drop_syn_sent(struct inpcb *inp, int errno) 2492 { 2493 struct tcpcb *tp; 2494 2495 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2496 INP_WLOCK_ASSERT(inp); 2497 2498 if ((inp->inp_flags & INP_TIMEWAIT) || 2499 (inp->inp_flags & INP_DROPPED)) 2500 return (inp); 2501 2502 tp = intotcpcb(inp); 2503 if (tp->t_state != TCPS_SYN_SENT) 2504 return (inp); 2505 2506 if (IS_FASTOPEN(tp->t_flags)) 2507 tcp_fastopen_disable_path(tp); 2508 2509 tp = tcp_drop(tp, errno); 2510 if (tp != NULL) 2511 return (inp); 2512 else 2513 return (NULL); 2514 } 2515 2516 /* 2517 * When `need fragmentation' ICMP is received, update our idea of the MSS 2518 * based on the new value. Also nudge TCP to send something, since we 2519 * know the packet we just sent was dropped. 2520 * This duplicates some code in the tcp_mss() function in tcp_input.c. 2521 */ 2522 static struct inpcb * 2523 tcp_mtudisc_notify(struct inpcb *inp, int error) 2524 { 2525 2526 tcp_mtudisc(inp, -1); 2527 return (inp); 2528 } 2529 2530 static void 2531 tcp_mtudisc(struct inpcb *inp, int mtuoffer) 2532 { 2533 struct tcpcb *tp; 2534 struct socket *so; 2535 2536 INP_WLOCK_ASSERT(inp); 2537 if ((inp->inp_flags & INP_TIMEWAIT) || 2538 (inp->inp_flags & INP_DROPPED)) 2539 return; 2540 2541 tp = intotcpcb(inp); 2542 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL")); 2543 2544 tcp_mss_update(tp, -1, mtuoffer, NULL, NULL); 2545 2546 so = inp->inp_socket; 2547 SOCKBUF_LOCK(&so->so_snd); 2548 /* If the mss is larger than the socket buffer, decrease the mss. */ 2549 if (so->so_snd.sb_hiwat < tp->t_maxseg) 2550 tp->t_maxseg = so->so_snd.sb_hiwat; 2551 SOCKBUF_UNLOCK(&so->so_snd); 2552 2553 TCPSTAT_INC(tcps_mturesent); 2554 tp->t_rtttime = 0; 2555 tp->snd_nxt = tp->snd_una; 2556 tcp_free_sackholes(tp); 2557 tp->snd_recover = tp->snd_max; 2558 if (tp->t_flags & TF_SACK_PERMIT) 2559 EXIT_FASTRECOVERY(tp->t_flags); 2560 tp->t_fb->tfb_tcp_output(tp); 2561 } 2562 2563 #ifdef INET 2564 /* 2565 * Look-up the routing entry to the peer of this inpcb. If no route 2566 * is found and it cannot be allocated, then return 0. This routine 2567 * is called by TCP routines that access the rmx structure and by 2568 * tcp_mss_update to get the peer/interface MTU. 2569 */ 2570 uint32_t 2571 tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap) 2572 { 2573 struct nhop4_extended nh4; 2574 struct ifnet *ifp; 2575 uint32_t maxmtu = 0; 2576 2577 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer")); 2578 2579 if (inc->inc_faddr.s_addr != INADDR_ANY) { 2580 2581 if (fib4_lookup_nh_ext(inc->inc_fibnum, inc->inc_faddr, 2582 NHR_REF, 0, &nh4) != 0) 2583 return (0); 2584 2585 ifp = nh4.nh_ifp; 2586 maxmtu = nh4.nh_mtu; 2587 2588 /* Report additional interface capabilities. */ 2589 if (cap != NULL) { 2590 if (ifp->if_capenable & IFCAP_TSO4 && 2591 ifp->if_hwassist & CSUM_TSO) { 2592 cap->ifcap |= CSUM_TSO; 2593 cap->tsomax = ifp->if_hw_tsomax; 2594 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; 2595 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; 2596 } 2597 } 2598 fib4_free_nh_ext(inc->inc_fibnum, &nh4); 2599 } 2600 return (maxmtu); 2601 } 2602 #endif /* INET */ 2603 2604 #ifdef INET6 2605 uint32_t 2606 tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap) 2607 { 2608 struct nhop6_extended nh6; 2609 struct in6_addr dst6; 2610 uint32_t scopeid; 2611 struct ifnet *ifp; 2612 uint32_t maxmtu = 0; 2613 2614 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer")); 2615 2616 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 2617 in6_splitscope(&inc->inc6_faddr, &dst6, &scopeid); 2618 if (fib6_lookup_nh_ext(inc->inc_fibnum, &dst6, scopeid, 0, 2619 0, &nh6) != 0) 2620 return (0); 2621 2622 ifp = nh6.nh_ifp; 2623 maxmtu = nh6.nh_mtu; 2624 2625 /* Report additional interface capabilities. */ 2626 if (cap != NULL) { 2627 if (ifp->if_capenable & IFCAP_TSO6 && 2628 ifp->if_hwassist & CSUM_TSO) { 2629 cap->ifcap |= CSUM_TSO; 2630 cap->tsomax = ifp->if_hw_tsomax; 2631 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; 2632 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; 2633 } 2634 } 2635 fib6_free_nh_ext(inc->inc_fibnum, &nh6); 2636 } 2637 2638 return (maxmtu); 2639 } 2640 #endif /* INET6 */ 2641 2642 /* 2643 * Calculate effective SMSS per RFC5681 definition for a given TCP 2644 * connection at its current state, taking into account SACK and etc. 2645 */ 2646 u_int 2647 tcp_maxseg(const struct tcpcb *tp) 2648 { 2649 u_int optlen; 2650 2651 if (tp->t_flags & TF_NOOPT) 2652 return (tp->t_maxseg); 2653 2654 /* 2655 * Here we have a simplified code from tcp_addoptions(), 2656 * without a proper loop, and having most of paddings hardcoded. 2657 * We might make mistakes with padding here in some edge cases, 2658 * but this is harmless, since result of tcp_maxseg() is used 2659 * only in cwnd and ssthresh estimations. 2660 */ 2661 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4) 2662 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 2663 if (tp->t_flags & TF_RCVD_TSTMP) 2664 optlen = TCPOLEN_TSTAMP_APPA; 2665 else 2666 optlen = 0; 2667 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 2668 if (tp->t_flags & TF_SIGNATURE) 2669 optlen += PAD(TCPOLEN_SIGNATURE); 2670 #endif 2671 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0) { 2672 optlen += TCPOLEN_SACKHDR; 2673 optlen += tp->rcv_numsacks * TCPOLEN_SACK; 2674 optlen = PAD(optlen); 2675 } 2676 } else { 2677 if (tp->t_flags & TF_REQ_TSTMP) 2678 optlen = TCPOLEN_TSTAMP_APPA; 2679 else 2680 optlen = PAD(TCPOLEN_MAXSEG); 2681 if (tp->t_flags & TF_REQ_SCALE) 2682 optlen += PAD(TCPOLEN_WINDOW); 2683 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 2684 if (tp->t_flags & TF_SIGNATURE) 2685 optlen += PAD(TCPOLEN_SIGNATURE); 2686 #endif 2687 if (tp->t_flags & TF_SACK_PERMIT) 2688 optlen += PAD(TCPOLEN_SACK_PERMITTED); 2689 } 2690 #undef PAD 2691 optlen = min(optlen, TCP_MAXOLEN); 2692 return (tp->t_maxseg - optlen); 2693 } 2694 2695 static int 2696 sysctl_drop(SYSCTL_HANDLER_ARGS) 2697 { 2698 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 2699 struct sockaddr_storage addrs[2]; 2700 struct inpcb *inp; 2701 struct tcpcb *tp; 2702 struct tcptw *tw; 2703 struct sockaddr_in *fin, *lin; 2704 #ifdef INET6 2705 struct sockaddr_in6 *fin6, *lin6; 2706 #endif 2707 int error; 2708 2709 inp = NULL; 2710 fin = lin = NULL; 2711 #ifdef INET6 2712 fin6 = lin6 = NULL; 2713 #endif 2714 error = 0; 2715 2716 if (req->oldptr != NULL || req->oldlen != 0) 2717 return (EINVAL); 2718 if (req->newptr == NULL) 2719 return (EPERM); 2720 if (req->newlen < sizeof(addrs)) 2721 return (ENOMEM); 2722 error = SYSCTL_IN(req, &addrs, sizeof(addrs)); 2723 if (error) 2724 return (error); 2725 2726 switch (addrs[0].ss_family) { 2727 #ifdef INET6 2728 case AF_INET6: 2729 fin6 = (struct sockaddr_in6 *)&addrs[0]; 2730 lin6 = (struct sockaddr_in6 *)&addrs[1]; 2731 if (fin6->sin6_len != sizeof(struct sockaddr_in6) || 2732 lin6->sin6_len != sizeof(struct sockaddr_in6)) 2733 return (EINVAL); 2734 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) { 2735 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) 2736 return (EINVAL); 2737 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]); 2738 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]); 2739 fin = (struct sockaddr_in *)&addrs[0]; 2740 lin = (struct sockaddr_in *)&addrs[1]; 2741 break; 2742 } 2743 error = sa6_embedscope(fin6, V_ip6_use_defzone); 2744 if (error) 2745 return (error); 2746 error = sa6_embedscope(lin6, V_ip6_use_defzone); 2747 if (error) 2748 return (error); 2749 break; 2750 #endif 2751 #ifdef INET 2752 case AF_INET: 2753 fin = (struct sockaddr_in *)&addrs[0]; 2754 lin = (struct sockaddr_in *)&addrs[1]; 2755 if (fin->sin_len != sizeof(struct sockaddr_in) || 2756 lin->sin_len != sizeof(struct sockaddr_in)) 2757 return (EINVAL); 2758 break; 2759 #endif 2760 default: 2761 return (EINVAL); 2762 } 2763 INP_INFO_RLOCK(&V_tcbinfo); 2764 switch (addrs[0].ss_family) { 2765 #ifdef INET6 2766 case AF_INET6: 2767 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr, 2768 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port, 2769 INPLOOKUP_WLOCKPCB, NULL); 2770 break; 2771 #endif 2772 #ifdef INET 2773 case AF_INET: 2774 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port, 2775 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL); 2776 break; 2777 #endif 2778 } 2779 if (inp != NULL) { 2780 if (inp->inp_flags & INP_TIMEWAIT) { 2781 /* 2782 * XXXRW: There currently exists a state where an 2783 * inpcb is present, but its timewait state has been 2784 * discarded. For now, don't allow dropping of this 2785 * type of inpcb. 2786 */ 2787 tw = intotw(inp); 2788 if (tw != NULL) 2789 tcp_twclose(tw, 0); 2790 else 2791 INP_WUNLOCK(inp); 2792 } else if (!(inp->inp_flags & INP_DROPPED) && 2793 !(inp->inp_socket->so_options & SO_ACCEPTCONN)) { 2794 tp = intotcpcb(inp); 2795 tp = tcp_drop(tp, ECONNABORTED); 2796 if (tp != NULL) 2797 INP_WUNLOCK(inp); 2798 } else 2799 INP_WUNLOCK(inp); 2800 } else 2801 error = ESRCH; 2802 INP_INFO_RUNLOCK(&V_tcbinfo); 2803 return (error); 2804 } 2805 2806 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop, 2807 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP, NULL, 2808 0, sysctl_drop, "", "Drop TCP connection"); 2809 2810 /* 2811 * Generate a standardized TCP log line for use throughout the 2812 * tcp subsystem. Memory allocation is done with M_NOWAIT to 2813 * allow use in the interrupt context. 2814 * 2815 * NB: The caller MUST free(s, M_TCPLOG) the returned string. 2816 * NB: The function may return NULL if memory allocation failed. 2817 * 2818 * Due to header inclusion and ordering limitations the struct ip 2819 * and ip6_hdr pointers have to be passed as void pointers. 2820 */ 2821 char * 2822 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr, 2823 const void *ip6hdr) 2824 { 2825 2826 /* Is logging enabled? */ 2827 if (tcp_log_in_vain == 0) 2828 return (NULL); 2829 2830 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr)); 2831 } 2832 2833 char * 2834 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr, 2835 const void *ip6hdr) 2836 { 2837 2838 /* Is logging enabled? */ 2839 if (tcp_log_debug == 0) 2840 return (NULL); 2841 2842 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr)); 2843 } 2844 2845 static char * 2846 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr, 2847 const void *ip6hdr) 2848 { 2849 char *s, *sp; 2850 size_t size; 2851 struct ip *ip; 2852 #ifdef INET6 2853 const struct ip6_hdr *ip6; 2854 2855 ip6 = (const struct ip6_hdr *)ip6hdr; 2856 #endif /* INET6 */ 2857 ip = (struct ip *)ip4hdr; 2858 2859 /* 2860 * The log line looks like this: 2861 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>" 2862 */ 2863 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") + 2864 sizeof(PRINT_TH_FLAGS) + 1 + 2865 #ifdef INET6 2866 2 * INET6_ADDRSTRLEN; 2867 #else 2868 2 * INET_ADDRSTRLEN; 2869 #endif /* INET6 */ 2870 2871 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT); 2872 if (s == NULL) 2873 return (NULL); 2874 2875 strcat(s, "TCP: ["); 2876 sp = s + strlen(s); 2877 2878 if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) { 2879 inet_ntoa_r(inc->inc_faddr, sp); 2880 sp = s + strlen(s); 2881 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport)); 2882 sp = s + strlen(s); 2883 inet_ntoa_r(inc->inc_laddr, sp); 2884 sp = s + strlen(s); 2885 sprintf(sp, "]:%i", ntohs(inc->inc_lport)); 2886 #ifdef INET6 2887 } else if (inc) { 2888 ip6_sprintf(sp, &inc->inc6_faddr); 2889 sp = s + strlen(s); 2890 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport)); 2891 sp = s + strlen(s); 2892 ip6_sprintf(sp, &inc->inc6_laddr); 2893 sp = s + strlen(s); 2894 sprintf(sp, "]:%i", ntohs(inc->inc_lport)); 2895 } else if (ip6 && th) { 2896 ip6_sprintf(sp, &ip6->ip6_src); 2897 sp = s + strlen(s); 2898 sprintf(sp, "]:%i to [", ntohs(th->th_sport)); 2899 sp = s + strlen(s); 2900 ip6_sprintf(sp, &ip6->ip6_dst); 2901 sp = s + strlen(s); 2902 sprintf(sp, "]:%i", ntohs(th->th_dport)); 2903 #endif /* INET6 */ 2904 #ifdef INET 2905 } else if (ip && th) { 2906 inet_ntoa_r(ip->ip_src, sp); 2907 sp = s + strlen(s); 2908 sprintf(sp, "]:%i to [", ntohs(th->th_sport)); 2909 sp = s + strlen(s); 2910 inet_ntoa_r(ip->ip_dst, sp); 2911 sp = s + strlen(s); 2912 sprintf(sp, "]:%i", ntohs(th->th_dport)); 2913 #endif /* INET */ 2914 } else { 2915 free(s, M_TCPLOG); 2916 return (NULL); 2917 } 2918 sp = s + strlen(s); 2919 if (th) 2920 sprintf(sp, " tcpflags 0x%b", th->th_flags, PRINT_TH_FLAGS); 2921 if (*(s + size - 1) != '\0') 2922 panic("%s: string too long", __func__); 2923 return (s); 2924 } 2925 2926 /* 2927 * A subroutine which makes it easy to track TCP state changes with DTrace. 2928 * This function shouldn't be called for t_state initializations that don't 2929 * correspond to actual TCP state transitions. 2930 */ 2931 void 2932 tcp_state_change(struct tcpcb *tp, int newstate) 2933 { 2934 #if defined(KDTRACE_HOOKS) 2935 int pstate = tp->t_state; 2936 #endif 2937 2938 TCPSTATES_DEC(tp->t_state); 2939 TCPSTATES_INC(newstate); 2940 tp->t_state = newstate; 2941 TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, pstate); 2942 } 2943 2944 /* 2945 * Create an external-format (``xtcpcb'') structure using the information in 2946 * the kernel-format tcpcb structure pointed to by tp. This is done to 2947 * reduce the spew of irrelevant information over this interface, to isolate 2948 * user code from changes in the kernel structure, and potentially to provide 2949 * information-hiding if we decide that some of this information should be 2950 * hidden from users. 2951 */ 2952 void 2953 tcp_inptoxtp(const struct inpcb *inp, struct xtcpcb *xt) 2954 { 2955 struct tcpcb *tp = intotcpcb(inp); 2956 sbintime_t now; 2957 2958 if (inp->inp_flags & INP_TIMEWAIT) { 2959 bzero(xt, sizeof(struct xtcpcb)); 2960 xt->t_state = TCPS_TIME_WAIT; 2961 } else { 2962 xt->t_state = tp->t_state; 2963 xt->t_logstate = tp->t_logstate; 2964 xt->t_flags = tp->t_flags; 2965 xt->t_sndzerowin = tp->t_sndzerowin; 2966 xt->t_sndrexmitpack = tp->t_sndrexmitpack; 2967 xt->t_rcvoopack = tp->t_rcvoopack; 2968 2969 now = getsbinuptime(); 2970 #define COPYTIMER(ttt) do { \ 2971 if (callout_active(&tp->t_timers->ttt)) \ 2972 xt->ttt = (tp->t_timers->ttt.c_time - now) / \ 2973 SBT_1MS; \ 2974 else \ 2975 xt->ttt = 0; \ 2976 } while (0) 2977 COPYTIMER(tt_delack); 2978 COPYTIMER(tt_rexmt); 2979 COPYTIMER(tt_persist); 2980 COPYTIMER(tt_keep); 2981 COPYTIMER(tt_2msl); 2982 #undef COPYTIMER 2983 xt->t_rcvtime = 1000 * (ticks - tp->t_rcvtime) / hz; 2984 2985 bcopy(tp->t_fb->tfb_tcp_block_name, xt->xt_stack, 2986 TCP_FUNCTION_NAME_LEN_MAX); 2987 bzero(xt->xt_logid, TCP_LOG_ID_LEN); 2988 #ifdef TCP_BLACKBOX 2989 (void)tcp_log_get_id(tp, xt->xt_logid); 2990 #endif 2991 } 2992 2993 xt->xt_len = sizeof(struct xtcpcb); 2994 in_pcbtoxinpcb(inp, &xt->xt_inp); 2995 if (inp->inp_socket == NULL) 2996 xt->xt_inp.xi_socket.xso_protocol = IPPROTO_TCP; 2997 } 2998