xref: /freebsd/sys/netinet/tcp_subr.c (revision 705a6ee2b6112c3a653b2bd68f961a8b5b8071a4)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_ipsec.h"
40 #include "opt_kern_tls.h"
41 #include "opt_tcpdebug.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/arb.h>
46 #include <sys/callout.h>
47 #include <sys/eventhandler.h>
48 #ifdef TCP_HHOOK
49 #include <sys/hhook.h>
50 #endif
51 #include <sys/kernel.h>
52 #ifdef TCP_HHOOK
53 #include <sys/khelp.h>
54 #endif
55 #ifdef KERN_TLS
56 #include <sys/ktls.h>
57 #endif
58 #include <sys/qmath.h>
59 #include <sys/stats.h>
60 #include <sys/sysctl.h>
61 #include <sys/jail.h>
62 #include <sys/malloc.h>
63 #include <sys/refcount.h>
64 #include <sys/mbuf.h>
65 #ifdef INET6
66 #include <sys/domain.h>
67 #endif
68 #include <sys/priv.h>
69 #include <sys/proc.h>
70 #include <sys/sdt.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/protosw.h>
74 #include <sys/random.h>
75 
76 #include <vm/uma.h>
77 
78 #include <net/route.h>
79 #include <net/route/nhop.h>
80 #include <net/if.h>
81 #include <net/if_var.h>
82 #include <net/vnet.h>
83 
84 #include <netinet/in.h>
85 #include <netinet/in_fib.h>
86 #include <netinet/in_kdtrace.h>
87 #include <netinet/in_pcb.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip.h>
91 #include <netinet/ip_icmp.h>
92 #include <netinet/ip_var.h>
93 #ifdef INET6
94 #include <netinet/icmp6.h>
95 #include <netinet/ip6.h>
96 #include <netinet6/in6_fib.h>
97 #include <netinet6/in6_pcb.h>
98 #include <netinet6/ip6_var.h>
99 #include <netinet6/scope6_var.h>
100 #include <netinet6/nd6.h>
101 #endif
102 
103 #include <netinet/tcp.h>
104 #include <netinet/tcp_fsm.h>
105 #include <netinet/tcp_seq.h>
106 #include <netinet/tcp_timer.h>
107 #include <netinet/tcp_var.h>
108 #include <netinet/tcp_log_buf.h>
109 #include <netinet/tcp_syncache.h>
110 #include <netinet/tcp_hpts.h>
111 #include <netinet/cc/cc.h>
112 #ifdef INET6
113 #include <netinet6/tcp6_var.h>
114 #endif
115 #include <netinet/tcpip.h>
116 #include <netinet/tcp_fastopen.h>
117 #ifdef TCPPCAP
118 #include <netinet/tcp_pcap.h>
119 #endif
120 #ifdef TCPDEBUG
121 #include <netinet/tcp_debug.h>
122 #endif
123 #ifdef INET6
124 #include <netinet6/ip6protosw.h>
125 #endif
126 #ifdef TCP_OFFLOAD
127 #include <netinet/tcp_offload.h>
128 #endif
129 #include <netinet/udp.h>
130 #include <netinet/udp_var.h>
131 
132 #include <netipsec/ipsec_support.h>
133 
134 #include <machine/in_cksum.h>
135 #include <crypto/siphash/siphash.h>
136 
137 #include <security/mac/mac_framework.h>
138 
139 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS;
140 #ifdef INET6
141 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS;
142 #endif
143 
144 #ifdef NETFLIX_EXP_DETECTION
145 /*  Sack attack detection thresholds and such */
146 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack_attack,
147     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
148     "Sack Attack detection thresholds");
149 int32_t tcp_force_detection = 0;
150 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, force_detection,
151     CTLFLAG_RW,
152     &tcp_force_detection, 0,
153     "Do we force detection even if the INP has it off?");
154 int32_t tcp_sack_to_ack_thresh = 700;	/* 70 % */
155 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sack_to_ack_thresh,
156     CTLFLAG_RW,
157     &tcp_sack_to_ack_thresh, 700,
158     "Percentage of sacks to acks we must see above (10.1 percent is 101)?");
159 int32_t tcp_sack_to_move_thresh = 600;	/* 60 % */
160 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, move_thresh,
161     CTLFLAG_RW,
162     &tcp_sack_to_move_thresh, 600,
163     "Percentage of sack moves we must see above (10.1 percent is 101)");
164 int32_t tcp_restoral_thresh = 650;	/* 65 % (sack:2:ack -5%) */
165 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, restore_thresh,
166     CTLFLAG_RW,
167     &tcp_restoral_thresh, 550,
168     "Percentage of sack to ack percentage we must see below to restore(10.1 percent is 101)");
169 int32_t tcp_sad_decay_val = 800;
170 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, decay_per,
171     CTLFLAG_RW,
172     &tcp_sad_decay_val, 800,
173     "The decay percentage (10.1 percent equals 101 )");
174 int32_t tcp_map_minimum = 500;
175 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, nummaps,
176     CTLFLAG_RW,
177     &tcp_map_minimum, 500,
178     "Number of Map enteries before we start detection");
179 int32_t tcp_attack_on_turns_on_logging = 0;
180 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, attacks_logged,
181     CTLFLAG_RW,
182     &tcp_attack_on_turns_on_logging, 0,
183    "When we have a positive hit on attack, do we turn on logging?");
184 int32_t tcp_sad_pacing_interval = 2000;
185 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_pacing_int,
186     CTLFLAG_RW,
187     &tcp_sad_pacing_interval, 2000,
188     "What is the minimum pacing interval for a classified attacker?");
189 
190 int32_t tcp_sad_low_pps = 100;
191 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_low_pps,
192     CTLFLAG_RW,
193     &tcp_sad_low_pps, 100,
194     "What is the input pps that below which we do not decay?");
195 #endif
196 uint32_t tcp_ack_war_time_window = 1000;
197 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_timewindow,
198     CTLFLAG_RW,
199     &tcp_ack_war_time_window, 1000,
200    "If the tcp_stack does ack-war prevention how many milliseconds are in its time window?");
201 uint32_t tcp_ack_war_cnt = 5;
202 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_cnt,
203     CTLFLAG_RW,
204     &tcp_ack_war_cnt, 5,
205    "If the tcp_stack does ack-war prevention how many acks can be sent in its time window?");
206 
207 struct rwlock tcp_function_lock;
208 
209 static int
210 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS)
211 {
212 	int error, new;
213 
214 	new = V_tcp_mssdflt;
215 	error = sysctl_handle_int(oidp, &new, 0, req);
216 	if (error == 0 && req->newptr) {
217 		if (new < TCP_MINMSS)
218 			error = EINVAL;
219 		else
220 			V_tcp_mssdflt = new;
221 	}
222 	return (error);
223 }
224 
225 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
226     CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
227     &VNET_NAME(tcp_mssdflt), 0, &sysctl_net_inet_tcp_mss_check, "I",
228     "Default TCP Maximum Segment Size");
229 
230 #ifdef INET6
231 static int
232 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS)
233 {
234 	int error, new;
235 
236 	new = V_tcp_v6mssdflt;
237 	error = sysctl_handle_int(oidp, &new, 0, req);
238 	if (error == 0 && req->newptr) {
239 		if (new < TCP_MINMSS)
240 			error = EINVAL;
241 		else
242 			V_tcp_v6mssdflt = new;
243 	}
244 	return (error);
245 }
246 
247 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
248     CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
249     &VNET_NAME(tcp_v6mssdflt), 0, &sysctl_net_inet_tcp_mss_v6_check, "I",
250    "Default TCP Maximum Segment Size for IPv6");
251 #endif /* INET6 */
252 
253 /*
254  * Minimum MSS we accept and use. This prevents DoS attacks where
255  * we are forced to a ridiculous low MSS like 20 and send hundreds
256  * of packets instead of one. The effect scales with the available
257  * bandwidth and quickly saturates the CPU and network interface
258  * with packet generation and sending. Set to zero to disable MINMSS
259  * checking. This setting prevents us from sending too small packets.
260  */
261 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS;
262 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_VNET | CTLFLAG_RW,
263      &VNET_NAME(tcp_minmss), 0,
264     "Minimum TCP Maximum Segment Size");
265 
266 VNET_DEFINE(int, tcp_do_rfc1323) = 1;
267 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_VNET | CTLFLAG_RW,
268     &VNET_NAME(tcp_do_rfc1323), 0,
269     "Enable rfc1323 (high performance TCP) extensions");
270 
271 /*
272  * As of June 2021, several TCP stacks violate RFC 7323 from September 2014.
273  * Some stacks negotiate TS, but never send them after connection setup. Some
274  * stacks negotiate TS, but don't send them when sending keep-alive segments.
275  * These include modern widely deployed TCP stacks.
276  * Therefore tolerating violations for now...
277  */
278 VNET_DEFINE(int, tcp_tolerate_missing_ts) = 1;
279 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tolerate_missing_ts, CTLFLAG_VNET | CTLFLAG_RW,
280     &VNET_NAME(tcp_tolerate_missing_ts), 0,
281     "Tolerate missing TCP timestamps");
282 
283 VNET_DEFINE(int, tcp_ts_offset_per_conn) = 1;
284 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ts_offset_per_conn, CTLFLAG_VNET | CTLFLAG_RW,
285     &VNET_NAME(tcp_ts_offset_per_conn), 0,
286     "Initialize TCP timestamps per connection instead of per host pair");
287 
288 /* How many connections are pacing */
289 static volatile uint32_t number_of_tcp_connections_pacing = 0;
290 static uint32_t shadow_num_connections = 0;
291 
292 static int tcp_pacing_limit = 10000;
293 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pacing_limit, CTLFLAG_RW,
294     &tcp_pacing_limit, 1000,
295     "If the TCP stack does pacing, is there a limit (-1 = no, 0 = no pacing N = number of connections)");
296 
297 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pacing_count, CTLFLAG_RD,
298     &shadow_num_connections, 0, "Number of TCP connections being paced");
299 
300 static int	tcp_log_debug = 0;
301 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
302     &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
303 
304 static int	tcp_tcbhashsize;
305 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
306     &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
307 
308 static int	do_tcpdrain = 1;
309 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
310     "Enable tcp_drain routine for extra help when low on mbufs");
311 
312 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_VNET | CTLFLAG_RD,
313     &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs");
314 
315 VNET_DEFINE_STATIC(int, icmp_may_rst) = 1;
316 #define	V_icmp_may_rst			VNET(icmp_may_rst)
317 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_VNET | CTLFLAG_RW,
318     &VNET_NAME(icmp_may_rst), 0,
319     "Certain ICMP unreachable messages may abort connections in SYN_SENT");
320 
321 VNET_DEFINE_STATIC(int, tcp_isn_reseed_interval) = 0;
322 #define	V_tcp_isn_reseed_interval	VNET(tcp_isn_reseed_interval)
323 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_VNET | CTLFLAG_RW,
324     &VNET_NAME(tcp_isn_reseed_interval), 0,
325     "Seconds between reseeding of ISN secret");
326 
327 static int	tcp_soreceive_stream;
328 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN,
329     &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets");
330 
331 VNET_DEFINE(uma_zone_t, sack_hole_zone);
332 #define	V_sack_hole_zone		VNET(sack_hole_zone)
333 VNET_DEFINE(uint32_t, tcp_map_entries_limit) = 0;	/* unlimited */
334 static int
335 sysctl_net_inet_tcp_map_limit_check(SYSCTL_HANDLER_ARGS)
336 {
337 	int error;
338 	uint32_t new;
339 
340 	new = V_tcp_map_entries_limit;
341 	error = sysctl_handle_int(oidp, &new, 0, req);
342 	if (error == 0 && req->newptr) {
343 		/* only allow "0" and value > minimum */
344 		if (new > 0 && new < TCP_MIN_MAP_ENTRIES_LIMIT)
345 			error = EINVAL;
346 		else
347 			V_tcp_map_entries_limit = new;
348 	}
349 	return (error);
350 }
351 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, map_limit,
352     CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
353     &VNET_NAME(tcp_map_entries_limit), 0,
354     &sysctl_net_inet_tcp_map_limit_check, "IU",
355     "Total sendmap entries limit");
356 
357 VNET_DEFINE(uint32_t, tcp_map_split_limit) = 0;	/* unlimited */
358 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, split_limit, CTLFLAG_VNET | CTLFLAG_RW,
359      &VNET_NAME(tcp_map_split_limit), 0,
360     "Total sendmap split entries limit");
361 
362 #ifdef TCP_HHOOK
363 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]);
364 #endif
365 
366 #define TS_OFFSET_SECRET_LENGTH SIPHASH_KEY_LENGTH
367 VNET_DEFINE_STATIC(u_char, ts_offset_secret[TS_OFFSET_SECRET_LENGTH]);
368 #define	V_ts_offset_secret	VNET(ts_offset_secret)
369 
370 static int	tcp_default_fb_init(struct tcpcb *tp);
371 static void	tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged);
372 static int	tcp_default_handoff_ok(struct tcpcb *tp);
373 static struct inpcb *tcp_notify(struct inpcb *, int);
374 static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int);
375 static void tcp_mtudisc(struct inpcb *, int);
376 static char *	tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th,
377 		    void *ip4hdr, const void *ip6hdr);
378 
379 static struct tcp_function_block tcp_def_funcblk = {
380 	.tfb_tcp_block_name = "freebsd",
381 	.tfb_tcp_output = tcp_output,
382 	.tfb_tcp_do_segment = tcp_do_segment,
383 	.tfb_tcp_ctloutput = tcp_default_ctloutput,
384 	.tfb_tcp_handoff_ok = tcp_default_handoff_ok,
385 	.tfb_tcp_fb_init = tcp_default_fb_init,
386 	.tfb_tcp_fb_fini = tcp_default_fb_fini,
387 };
388 
389 static int tcp_fb_cnt = 0;
390 struct tcp_funchead t_functions;
391 static struct tcp_function_block *tcp_func_set_ptr = &tcp_def_funcblk;
392 
393 void
394 tcp_record_dsack(struct tcpcb *tp, tcp_seq start, tcp_seq end, int tlp)
395 {
396 	TCPSTAT_INC(tcps_dsack_count);
397 	tp->t_dsack_pack++;
398 	if (tlp == 0) {
399 		if (SEQ_GT(end, start)) {
400 			tp->t_dsack_bytes += (end - start);
401 			TCPSTAT_ADD(tcps_dsack_bytes, (end - start));
402 		} else {
403 			tp->t_dsack_tlp_bytes += (start - end);
404 			TCPSTAT_ADD(tcps_dsack_bytes, (start - end));
405 		}
406 	} else {
407 		if (SEQ_GT(end, start)) {
408 			tp->t_dsack_bytes += (end - start);
409 			TCPSTAT_ADD(tcps_dsack_tlp_bytes, (end - start));
410 		} else {
411 			tp->t_dsack_tlp_bytes += (start - end);
412 			TCPSTAT_ADD(tcps_dsack_tlp_bytes, (start - end));
413 		}
414 	}
415 }
416 
417 static struct tcp_function_block *
418 find_tcp_functions_locked(struct tcp_function_set *fs)
419 {
420 	struct tcp_function *f;
421 	struct tcp_function_block *blk=NULL;
422 
423 	TAILQ_FOREACH(f, &t_functions, tf_next) {
424 		if (strcmp(f->tf_name, fs->function_set_name) == 0) {
425 			blk = f->tf_fb;
426 			break;
427 		}
428 	}
429 	return(blk);
430 }
431 
432 static struct tcp_function_block *
433 find_tcp_fb_locked(struct tcp_function_block *blk, struct tcp_function **s)
434 {
435 	struct tcp_function_block *rblk=NULL;
436 	struct tcp_function *f;
437 
438 	TAILQ_FOREACH(f, &t_functions, tf_next) {
439 		if (f->tf_fb == blk) {
440 			rblk = blk;
441 			if (s) {
442 				*s = f;
443 			}
444 			break;
445 		}
446 	}
447 	return (rblk);
448 }
449 
450 struct tcp_function_block *
451 find_and_ref_tcp_functions(struct tcp_function_set *fs)
452 {
453 	struct tcp_function_block *blk;
454 
455 	rw_rlock(&tcp_function_lock);
456 	blk = find_tcp_functions_locked(fs);
457 	if (blk)
458 		refcount_acquire(&blk->tfb_refcnt);
459 	rw_runlock(&tcp_function_lock);
460 	return(blk);
461 }
462 
463 struct tcp_function_block *
464 find_and_ref_tcp_fb(struct tcp_function_block *blk)
465 {
466 	struct tcp_function_block *rblk;
467 
468 	rw_rlock(&tcp_function_lock);
469 	rblk = find_tcp_fb_locked(blk, NULL);
470 	if (rblk)
471 		refcount_acquire(&rblk->tfb_refcnt);
472 	rw_runlock(&tcp_function_lock);
473 	return(rblk);
474 }
475 
476 /* Find a matching alias for the given tcp_function_block. */
477 int
478 find_tcp_function_alias(struct tcp_function_block *blk,
479     struct tcp_function_set *fs)
480 {
481 	struct tcp_function *f;
482 	int found;
483 
484 	found = 0;
485 	rw_rlock(&tcp_function_lock);
486 	TAILQ_FOREACH(f, &t_functions, tf_next) {
487 		if ((f->tf_fb == blk) &&
488 		    (strncmp(f->tf_name, blk->tfb_tcp_block_name,
489 		        TCP_FUNCTION_NAME_LEN_MAX) != 0)) {
490 			/* Matching function block with different name. */
491 			strncpy(fs->function_set_name, f->tf_name,
492 			    TCP_FUNCTION_NAME_LEN_MAX);
493 			found = 1;
494 			break;
495 		}
496 	}
497 	/* Null terminate the string appropriately. */
498 	if (found) {
499 		fs->function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0';
500 	} else {
501 		fs->function_set_name[0] = '\0';
502 	}
503 	rw_runlock(&tcp_function_lock);
504 	return (found);
505 }
506 
507 static struct tcp_function_block *
508 find_and_ref_tcp_default_fb(void)
509 {
510 	struct tcp_function_block *rblk;
511 
512 	rw_rlock(&tcp_function_lock);
513 	rblk = tcp_func_set_ptr;
514 	refcount_acquire(&rblk->tfb_refcnt);
515 	rw_runlock(&tcp_function_lock);
516 	return (rblk);
517 }
518 
519 void
520 tcp_switch_back_to_default(struct tcpcb *tp)
521 {
522 	struct tcp_function_block *tfb;
523 
524 	KASSERT(tp->t_fb != &tcp_def_funcblk,
525 	    ("%s: called by the built-in default stack", __func__));
526 
527 	/*
528 	 * Release the old stack. This function will either find a new one
529 	 * or panic.
530 	 */
531 	if (tp->t_fb->tfb_tcp_fb_fini != NULL)
532 		(*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
533 	refcount_release(&tp->t_fb->tfb_refcnt);
534 
535 	/*
536 	 * Now, we'll find a new function block to use.
537 	 * Start by trying the current user-selected
538 	 * default, unless this stack is the user-selected
539 	 * default.
540 	 */
541 	tfb = find_and_ref_tcp_default_fb();
542 	if (tfb == tp->t_fb) {
543 		refcount_release(&tfb->tfb_refcnt);
544 		tfb = NULL;
545 	}
546 	/* Does the stack accept this connection? */
547 	if (tfb != NULL && tfb->tfb_tcp_handoff_ok != NULL &&
548 	    (*tfb->tfb_tcp_handoff_ok)(tp)) {
549 		refcount_release(&tfb->tfb_refcnt);
550 		tfb = NULL;
551 	}
552 	/* Try to use that stack. */
553 	if (tfb != NULL) {
554 		/* Initialize the new stack. If it succeeds, we are done. */
555 		tp->t_fb = tfb;
556 		if (tp->t_fb->tfb_tcp_fb_init == NULL ||
557 		    (*tp->t_fb->tfb_tcp_fb_init)(tp) == 0)
558 			return;
559 
560 		/*
561 		 * Initialization failed. Release the reference count on
562 		 * the stack.
563 		 */
564 		refcount_release(&tfb->tfb_refcnt);
565 	}
566 
567 	/*
568 	 * If that wasn't feasible, use the built-in default
569 	 * stack which is not allowed to reject anyone.
570 	 */
571 	tfb = find_and_ref_tcp_fb(&tcp_def_funcblk);
572 	if (tfb == NULL) {
573 		/* there always should be a default */
574 		panic("Can't refer to tcp_def_funcblk");
575 	}
576 	if (tfb->tfb_tcp_handoff_ok != NULL) {
577 		if ((*tfb->tfb_tcp_handoff_ok) (tp)) {
578 			/* The default stack cannot say no */
579 			panic("Default stack rejects a new session?");
580 		}
581 	}
582 	tp->t_fb = tfb;
583 	if (tp->t_fb->tfb_tcp_fb_init != NULL &&
584 	    (*tp->t_fb->tfb_tcp_fb_init)(tp)) {
585 		/* The default stack cannot fail */
586 		panic("Default stack initialization failed");
587 	}
588 }
589 
590 static void
591 tcp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
592     const struct sockaddr *sa, void *ctx)
593 {
594 	struct ip *iph;
595 #ifdef INET6
596 	struct ip6_hdr *ip6;
597 #endif
598 	struct udphdr *uh;
599 	struct tcphdr *th;
600 	int thlen;
601 	uint16_t port;
602 
603 	TCPSTAT_INC(tcps_tunneled_pkts);
604 	if ((m->m_flags & M_PKTHDR) == 0) {
605 		/* Can't handle one that is not a pkt hdr */
606 		TCPSTAT_INC(tcps_tunneled_errs);
607 		goto out;
608 	}
609 	thlen = sizeof(struct tcphdr);
610 	if (m->m_len < off + sizeof(struct udphdr) + thlen &&
611 	    (m =  m_pullup(m, off + sizeof(struct udphdr) + thlen)) == NULL) {
612 		TCPSTAT_INC(tcps_tunneled_errs);
613 		goto out;
614 	}
615 	iph = mtod(m, struct ip *);
616 	uh = (struct udphdr *)((caddr_t)iph + off);
617 	th = (struct tcphdr *)(uh + 1);
618 	thlen = th->th_off << 2;
619 	if (m->m_len < off + sizeof(struct udphdr) + thlen) {
620 		m =  m_pullup(m, off + sizeof(struct udphdr) + thlen);
621 		if (m == NULL) {
622 			TCPSTAT_INC(tcps_tunneled_errs);
623 			goto out;
624 		} else {
625 			iph = mtod(m, struct ip *);
626 			uh = (struct udphdr *)((caddr_t)iph + off);
627 			th = (struct tcphdr *)(uh + 1);
628 		}
629 	}
630 	m->m_pkthdr.tcp_tun_port = port = uh->uh_sport;
631 	bcopy(th, uh, m->m_len - off);
632 	m->m_len -= sizeof(struct udphdr);
633 	m->m_pkthdr.len -= sizeof(struct udphdr);
634 	/*
635 	 * We use the same algorithm for
636 	 * both UDP and TCP for c-sum. So
637 	 * the code in tcp_input will skip
638 	 * the checksum. So we do nothing
639 	 * with the flag (m->m_pkthdr.csum_flags).
640 	 */
641 	switch (iph->ip_v) {
642 #ifdef INET
643 	case IPVERSION:
644 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
645 		tcp_input_with_port(&m, &off, IPPROTO_TCP, port);
646 		break;
647 #endif
648 #ifdef INET6
649 	case IPV6_VERSION >> 4:
650 		ip6 = mtod(m, struct ip6_hdr *);
651 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
652 		tcp6_input_with_port(&m, &off, IPPROTO_TCP, port);
653 		break;
654 #endif
655 	default:
656 		goto out;
657 		break;
658 	}
659 	return;
660 out:
661 	m_freem(m);
662 }
663 
664 static int
665 sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS)
666 {
667 	int error=ENOENT;
668 	struct tcp_function_set fs;
669 	struct tcp_function_block *blk;
670 
671 	memset(&fs, 0, sizeof(fs));
672 	rw_rlock(&tcp_function_lock);
673 	blk = find_tcp_fb_locked(tcp_func_set_ptr, NULL);
674 	if (blk) {
675 		/* Found him */
676 		strcpy(fs.function_set_name, blk->tfb_tcp_block_name);
677 		fs.pcbcnt = blk->tfb_refcnt;
678 	}
679 	rw_runlock(&tcp_function_lock);
680 	error = sysctl_handle_string(oidp, fs.function_set_name,
681 				     sizeof(fs.function_set_name), req);
682 
683 	/* Check for error or no change */
684 	if (error != 0 || req->newptr == NULL)
685 		return(error);
686 
687 	rw_wlock(&tcp_function_lock);
688 	blk = find_tcp_functions_locked(&fs);
689 	if ((blk == NULL) ||
690 	    (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) {
691 		error = ENOENT;
692 		goto done;
693 	}
694 	tcp_func_set_ptr = blk;
695 done:
696 	rw_wunlock(&tcp_function_lock);
697 	return (error);
698 }
699 
700 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_default,
701     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
702     NULL, 0, sysctl_net_inet_default_tcp_functions, "A",
703     "Set/get the default TCP functions");
704 
705 static int
706 sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS)
707 {
708 	int error, cnt, linesz;
709 	struct tcp_function *f;
710 	char *buffer, *cp;
711 	size_t bufsz, outsz;
712 	bool alias;
713 
714 	cnt = 0;
715 	rw_rlock(&tcp_function_lock);
716 	TAILQ_FOREACH(f, &t_functions, tf_next) {
717 		cnt++;
718 	}
719 	rw_runlock(&tcp_function_lock);
720 
721 	bufsz = (cnt+2) * ((TCP_FUNCTION_NAME_LEN_MAX * 2) + 13) + 1;
722 	buffer = malloc(bufsz, M_TEMP, M_WAITOK);
723 
724 	error = 0;
725 	cp = buffer;
726 
727 	linesz = snprintf(cp, bufsz, "\n%-32s%c %-32s %s\n", "Stack", 'D',
728 	    "Alias", "PCB count");
729 	cp += linesz;
730 	bufsz -= linesz;
731 	outsz = linesz;
732 
733 	rw_rlock(&tcp_function_lock);
734 	TAILQ_FOREACH(f, &t_functions, tf_next) {
735 		alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name);
736 		linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n",
737 		    f->tf_fb->tfb_tcp_block_name,
738 		    (f->tf_fb == tcp_func_set_ptr) ? '*' : ' ',
739 		    alias ? f->tf_name : "-",
740 		    f->tf_fb->tfb_refcnt);
741 		if (linesz >= bufsz) {
742 			error = EOVERFLOW;
743 			break;
744 		}
745 		cp += linesz;
746 		bufsz -= linesz;
747 		outsz += linesz;
748 	}
749 	rw_runlock(&tcp_function_lock);
750 	if (error == 0)
751 		error = sysctl_handle_string(oidp, buffer, outsz + 1, req);
752 	free(buffer, M_TEMP);
753 	return (error);
754 }
755 
756 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_available,
757     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
758     NULL, 0, sysctl_net_inet_list_available, "A",
759     "list available TCP Function sets");
760 
761 VNET_DEFINE(int, tcp_udp_tunneling_port) = TCP_TUNNELING_PORT_DEFAULT;
762 
763 #ifdef INET
764 VNET_DEFINE(struct socket *, udp4_tun_socket) = NULL;
765 #define	V_udp4_tun_socket	VNET(udp4_tun_socket)
766 #endif
767 #ifdef INET6
768 VNET_DEFINE(struct socket *, udp6_tun_socket) = NULL;
769 #define	V_udp6_tun_socket	VNET(udp6_tun_socket)
770 #endif
771 
772 static void
773 tcp_over_udp_stop(void)
774 {
775 	/*
776 	 * This function assumes sysctl caller holds inp_rinfo_lock()
777 	 * for writing!
778 	 */
779 #ifdef INET
780 	if (V_udp4_tun_socket != NULL) {
781 		soclose(V_udp4_tun_socket);
782 		V_udp4_tun_socket = NULL;
783 	}
784 #endif
785 #ifdef INET6
786 	if (V_udp6_tun_socket != NULL) {
787 		soclose(V_udp6_tun_socket);
788 		V_udp6_tun_socket = NULL;
789 	}
790 #endif
791 }
792 
793 static int
794 tcp_over_udp_start(void)
795 {
796 	uint16_t port;
797 	int ret;
798 #ifdef INET
799 	struct sockaddr_in sin;
800 #endif
801 #ifdef INET6
802 	struct sockaddr_in6 sin6;
803 #endif
804 	/*
805 	 * This function assumes sysctl caller holds inp_info_rlock()
806 	 * for writing!
807 	 */
808 	port = V_tcp_udp_tunneling_port;
809 	if (ntohs(port) == 0) {
810 		/* Must have a port set */
811 		return (EINVAL);
812 	}
813 #ifdef INET
814 	if (V_udp4_tun_socket != NULL) {
815 		/* Already running -- must stop first */
816 		return (EALREADY);
817 	}
818 #endif
819 #ifdef INET6
820 	if (V_udp6_tun_socket != NULL) {
821 		/* Already running -- must stop first */
822 		return (EALREADY);
823 	}
824 #endif
825 #ifdef INET
826 	if ((ret = socreate(PF_INET, &V_udp4_tun_socket,
827 	    SOCK_DGRAM, IPPROTO_UDP,
828 	    curthread->td_ucred, curthread))) {
829 		tcp_over_udp_stop();
830 		return (ret);
831 	}
832 	/* Call the special UDP hook. */
833 	if ((ret = udp_set_kernel_tunneling(V_udp4_tun_socket,
834 	    tcp_recv_udp_tunneled_packet,
835 	    tcp_ctlinput_viaudp,
836 	    NULL))) {
837 		tcp_over_udp_stop();
838 		return (ret);
839 	}
840 	/* Ok, we have a socket, bind it to the port. */
841 	memset(&sin, 0, sizeof(struct sockaddr_in));
842 	sin.sin_len = sizeof(struct sockaddr_in);
843 	sin.sin_family = AF_INET;
844 	sin.sin_port = htons(port);
845 	if ((ret = sobind(V_udp4_tun_socket,
846 	    (struct sockaddr *)&sin, curthread))) {
847 		tcp_over_udp_stop();
848 		return (ret);
849 	}
850 #endif
851 #ifdef INET6
852 	if ((ret = socreate(PF_INET6, &V_udp6_tun_socket,
853 	    SOCK_DGRAM, IPPROTO_UDP,
854 	    curthread->td_ucred, curthread))) {
855 		tcp_over_udp_stop();
856 		return (ret);
857 	}
858 	/* Call the special UDP hook. */
859 	if ((ret = udp_set_kernel_tunneling(V_udp6_tun_socket,
860 	    tcp_recv_udp_tunneled_packet,
861 	    tcp6_ctlinput_viaudp,
862 	    NULL))) {
863 		tcp_over_udp_stop();
864 		return (ret);
865 	}
866 	/* Ok, we have a socket, bind it to the port. */
867 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
868 	sin6.sin6_len = sizeof(struct sockaddr_in6);
869 	sin6.sin6_family = AF_INET6;
870 	sin6.sin6_port = htons(port);
871 	if ((ret = sobind(V_udp6_tun_socket,
872 	    (struct sockaddr *)&sin6, curthread))) {
873 		tcp_over_udp_stop();
874 		return (ret);
875 	}
876 #endif
877 	return (0);
878 }
879 
880 static int
881 sysctl_net_inet_tcp_udp_tunneling_port_check(SYSCTL_HANDLER_ARGS)
882 {
883 	int error;
884 	uint32_t old, new;
885 
886 	old = V_tcp_udp_tunneling_port;
887 	new = old;
888 	error = sysctl_handle_int(oidp, &new, 0, req);
889 	if ((error == 0) &&
890 	    (req->newptr != NULL)) {
891 		if ((new < TCP_TUNNELING_PORT_MIN) ||
892 		    (new > TCP_TUNNELING_PORT_MAX)) {
893 			error = EINVAL;
894 		} else {
895 			V_tcp_udp_tunneling_port = new;
896 			if (old != 0) {
897 				tcp_over_udp_stop();
898 			}
899 			if (new != 0) {
900 				error = tcp_over_udp_start();
901 			}
902 		}
903 	}
904 	return (error);
905 }
906 
907 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_port,
908     CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
909     &VNET_NAME(tcp_udp_tunneling_port),
910     0, &sysctl_net_inet_tcp_udp_tunneling_port_check, "IU",
911     "Tunneling port for tcp over udp");
912 
913 VNET_DEFINE(int, tcp_udp_tunneling_overhead) = TCP_TUNNELING_OVERHEAD_DEFAULT;
914 
915 static int
916 sysctl_net_inet_tcp_udp_tunneling_overhead_check(SYSCTL_HANDLER_ARGS)
917 {
918 	int error, new;
919 
920 	new = V_tcp_udp_tunneling_overhead;
921 	error = sysctl_handle_int(oidp, &new, 0, req);
922 	if (error == 0 && req->newptr) {
923 		if ((new < TCP_TUNNELING_OVERHEAD_MIN) ||
924 		    (new > TCP_TUNNELING_OVERHEAD_MAX))
925 			error = EINVAL;
926 		else
927 			V_tcp_udp_tunneling_overhead = new;
928 	}
929 	return (error);
930 }
931 
932 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_overhead,
933     CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
934     &VNET_NAME(tcp_udp_tunneling_overhead),
935     0, &sysctl_net_inet_tcp_udp_tunneling_overhead_check, "IU",
936     "MSS reduction when using tcp over udp");
937 
938 /*
939  * Exports one (struct tcp_function_info) for each alias/name.
940  */
941 static int
942 sysctl_net_inet_list_func_info(SYSCTL_HANDLER_ARGS)
943 {
944 	int cnt, error;
945 	struct tcp_function *f;
946 	struct tcp_function_info tfi;
947 
948 	/*
949 	 * We don't allow writes.
950 	 */
951 	if (req->newptr != NULL)
952 		return (EINVAL);
953 
954 	/*
955 	 * Wire the old buffer so we can directly copy the functions to
956 	 * user space without dropping the lock.
957 	 */
958 	if (req->oldptr != NULL) {
959 		error = sysctl_wire_old_buffer(req, 0);
960 		if (error)
961 			return (error);
962 	}
963 
964 	/*
965 	 * Walk the list and copy out matching entries. If INVARIANTS
966 	 * is compiled in, also walk the list to verify the length of
967 	 * the list matches what we have recorded.
968 	 */
969 	rw_rlock(&tcp_function_lock);
970 
971 	cnt = 0;
972 #ifndef INVARIANTS
973 	if (req->oldptr == NULL) {
974 		cnt = tcp_fb_cnt;
975 		goto skip_loop;
976 	}
977 #endif
978 	TAILQ_FOREACH(f, &t_functions, tf_next) {
979 #ifdef INVARIANTS
980 		cnt++;
981 #endif
982 		if (req->oldptr != NULL) {
983 			bzero(&tfi, sizeof(tfi));
984 			tfi.tfi_refcnt = f->tf_fb->tfb_refcnt;
985 			tfi.tfi_id = f->tf_fb->tfb_id;
986 			(void)strlcpy(tfi.tfi_alias, f->tf_name,
987 			    sizeof(tfi.tfi_alias));
988 			(void)strlcpy(tfi.tfi_name,
989 			    f->tf_fb->tfb_tcp_block_name, sizeof(tfi.tfi_name));
990 			error = SYSCTL_OUT(req, &tfi, sizeof(tfi));
991 			/*
992 			 * Don't stop on error, as that is the
993 			 * mechanism we use to accumulate length
994 			 * information if the buffer was too short.
995 			 */
996 		}
997 	}
998 	KASSERT(cnt == tcp_fb_cnt,
999 	    ("%s: cnt (%d) != tcp_fb_cnt (%d)", __func__, cnt, tcp_fb_cnt));
1000 #ifndef INVARIANTS
1001 skip_loop:
1002 #endif
1003 	rw_runlock(&tcp_function_lock);
1004 	if (req->oldptr == NULL)
1005 		error = SYSCTL_OUT(req, NULL,
1006 		    (cnt + 1) * sizeof(struct tcp_function_info));
1007 
1008 	return (error);
1009 }
1010 
1011 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, function_info,
1012 	    CTLTYPE_OPAQUE | CTLFLAG_SKIP | CTLFLAG_RD | CTLFLAG_MPSAFE,
1013 	    NULL, 0, sysctl_net_inet_list_func_info, "S,tcp_function_info",
1014 	    "List TCP function block name-to-ID mappings");
1015 
1016 /*
1017  * tfb_tcp_handoff_ok() function for the default stack.
1018  * Note that we'll basically try to take all comers.
1019  */
1020 static int
1021 tcp_default_handoff_ok(struct tcpcb *tp)
1022 {
1023 
1024 	return (0);
1025 }
1026 
1027 /*
1028  * tfb_tcp_fb_init() function for the default stack.
1029  *
1030  * This handles making sure we have appropriate timers set if you are
1031  * transitioning a socket that has some amount of setup done.
1032  *
1033  * The init() fuction from the default can *never* return non-zero i.e.
1034  * it is required to always succeed since it is the stack of last resort!
1035  */
1036 static int
1037 tcp_default_fb_init(struct tcpcb *tp)
1038 {
1039 
1040 	struct socket *so;
1041 
1042 	INP_WLOCK_ASSERT(tp->t_inpcb);
1043 
1044 	KASSERT(tp->t_state >= 0 && tp->t_state < TCPS_TIME_WAIT,
1045 	    ("%s: connection %p in unexpected state %d", __func__, tp,
1046 	    tp->t_state));
1047 
1048 	/*
1049 	 * Nothing to do for ESTABLISHED or LISTEN states. And, we don't
1050 	 * know what to do for unexpected states (which includes TIME_WAIT).
1051 	 */
1052 	if (tp->t_state <= TCPS_LISTEN || tp->t_state >= TCPS_TIME_WAIT)
1053 		return (0);
1054 
1055 	/*
1056 	 * Make sure some kind of transmission timer is set if there is
1057 	 * outstanding data.
1058 	 */
1059 	so = tp->t_inpcb->inp_socket;
1060 	if ((!TCPS_HAVEESTABLISHED(tp->t_state) || sbavail(&so->so_snd) ||
1061 	    tp->snd_una != tp->snd_max) && !(tcp_timer_active(tp, TT_REXMT) ||
1062 	    tcp_timer_active(tp, TT_PERSIST))) {
1063 		/*
1064 		 * If the session has established and it looks like it should
1065 		 * be in the persist state, set the persist timer. Otherwise,
1066 		 * set the retransmit timer.
1067 		 */
1068 		if (TCPS_HAVEESTABLISHED(tp->t_state) && tp->snd_wnd == 0 &&
1069 		    (int32_t)(tp->snd_nxt - tp->snd_una) <
1070 		    (int32_t)sbavail(&so->so_snd))
1071 			tcp_setpersist(tp);
1072 		else
1073 			tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
1074 	}
1075 
1076 	/* All non-embryonic sessions get a keepalive timer. */
1077 	if (!tcp_timer_active(tp, TT_KEEP))
1078 		tcp_timer_activate(tp, TT_KEEP,
1079 		    TCPS_HAVEESTABLISHED(tp->t_state) ? TP_KEEPIDLE(tp) :
1080 		    TP_KEEPINIT(tp));
1081 
1082 	/*
1083 	 * Make sure critical variables are initialized
1084 	 * if transitioning while in Recovery.
1085 	 */
1086 	if IN_FASTRECOVERY(tp->t_flags) {
1087 		if (tp->sackhint.recover_fs == 0)
1088 			tp->sackhint.recover_fs = max(1,
1089 			    tp->snd_nxt - tp->snd_una);
1090 	}
1091 
1092 	return (0);
1093 }
1094 
1095 /*
1096  * tfb_tcp_fb_fini() function for the default stack.
1097  *
1098  * This changes state as necessary (or prudent) to prepare for another stack
1099  * to assume responsibility for the connection.
1100  */
1101 static void
1102 tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged)
1103 {
1104 
1105 	INP_WLOCK_ASSERT(tp->t_inpcb);
1106 	return;
1107 }
1108 
1109 /*
1110  * Target size of TCP PCB hash tables. Must be a power of two.
1111  *
1112  * Note that this can be overridden by the kernel environment
1113  * variable net.inet.tcp.tcbhashsize
1114  */
1115 #ifndef TCBHASHSIZE
1116 #define TCBHASHSIZE	0
1117 #endif
1118 
1119 /*
1120  * XXX
1121  * Callouts should be moved into struct tcp directly.  They are currently
1122  * separate because the tcpcb structure is exported to userland for sysctl
1123  * parsing purposes, which do not know about callouts.
1124  */
1125 struct tcpcb_mem {
1126 	struct	tcpcb		tcb;
1127 	struct	tcp_timer	tt;
1128 	struct	cc_var		ccv;
1129 #ifdef TCP_HHOOK
1130 	struct	osd		osd;
1131 #endif
1132 };
1133 
1134 VNET_DEFINE_STATIC(uma_zone_t, tcpcb_zone);
1135 #define	V_tcpcb_zone			VNET(tcpcb_zone)
1136 
1137 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
1138 MALLOC_DEFINE(M_TCPFUNCTIONS, "tcpfunc", "TCP function set memory");
1139 
1140 static struct mtx isn_mtx;
1141 
1142 #define	ISN_LOCK_INIT()	mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
1143 #define	ISN_LOCK()	mtx_lock(&isn_mtx)
1144 #define	ISN_UNLOCK()	mtx_unlock(&isn_mtx)
1145 
1146 /*
1147  * TCP initialization.
1148  */
1149 static void
1150 tcp_zone_change(void *tag)
1151 {
1152 
1153 	uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
1154 	uma_zone_set_max(V_tcpcb_zone, maxsockets);
1155 	tcp_tw_zone_change();
1156 }
1157 
1158 static int
1159 tcp_inpcb_init(void *mem, int size, int flags)
1160 {
1161 	struct inpcb *inp = mem;
1162 
1163 	INP_LOCK_INIT(inp, "inp", "tcpinp");
1164 	return (0);
1165 }
1166 
1167 /*
1168  * Take a value and get the next power of 2 that doesn't overflow.
1169  * Used to size the tcp_inpcb hash buckets.
1170  */
1171 static int
1172 maketcp_hashsize(int size)
1173 {
1174 	int hashsize;
1175 
1176 	/*
1177 	 * auto tune.
1178 	 * get the next power of 2 higher than maxsockets.
1179 	 */
1180 	hashsize = 1 << fls(size);
1181 	/* catch overflow, and just go one power of 2 smaller */
1182 	if (hashsize < size) {
1183 		hashsize = 1 << (fls(size) - 1);
1184 	}
1185 	return (hashsize);
1186 }
1187 
1188 static volatile int next_tcp_stack_id = 1;
1189 
1190 /*
1191  * Register a TCP function block with the name provided in the names
1192  * array.  (Note that this function does NOT automatically register
1193  * blk->tfb_tcp_block_name as a stack name.  Therefore, you should
1194  * explicitly include blk->tfb_tcp_block_name in the list of names if
1195  * you wish to register the stack with that name.)
1196  *
1197  * Either all name registrations will succeed or all will fail.  If
1198  * a name registration fails, the function will update the num_names
1199  * argument to point to the array index of the name that encountered
1200  * the failure.
1201  *
1202  * Returns 0 on success, or an error code on failure.
1203  */
1204 int
1205 register_tcp_functions_as_names(struct tcp_function_block *blk, int wait,
1206     const char *names[], int *num_names)
1207 {
1208 	struct tcp_function *n;
1209 	struct tcp_function_set fs;
1210 	int error, i;
1211 
1212 	KASSERT(names != NULL && *num_names > 0,
1213 	    ("%s: Called with 0-length name list", __func__));
1214 	KASSERT(names != NULL, ("%s: Called with NULL name list", __func__));
1215 	KASSERT(rw_initialized(&tcp_function_lock),
1216 	    ("%s: called too early", __func__));
1217 
1218 	if ((blk->tfb_tcp_output == NULL) ||
1219 	    (blk->tfb_tcp_do_segment == NULL) ||
1220 	    (blk->tfb_tcp_ctloutput == NULL) ||
1221 	    (strlen(blk->tfb_tcp_block_name) == 0)) {
1222 		/*
1223 		 * These functions are required and you
1224 		 * need a name.
1225 		 */
1226 		*num_names = 0;
1227 		return (EINVAL);
1228 	}
1229 	if (blk->tfb_tcp_timer_stop_all ||
1230 	    blk->tfb_tcp_timer_activate ||
1231 	    blk->tfb_tcp_timer_active ||
1232 	    blk->tfb_tcp_timer_stop) {
1233 		/*
1234 		 * If you define one timer function you
1235 		 * must have them all.
1236 		 */
1237 		if ((blk->tfb_tcp_timer_stop_all == NULL) ||
1238 		    (blk->tfb_tcp_timer_activate == NULL) ||
1239 		    (blk->tfb_tcp_timer_active == NULL) ||
1240 		    (blk->tfb_tcp_timer_stop == NULL)) {
1241 			*num_names = 0;
1242 			return (EINVAL);
1243 		}
1244 	}
1245 
1246 	if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) {
1247 		*num_names = 0;
1248 		return (EINVAL);
1249 	}
1250 
1251 	refcount_init(&blk->tfb_refcnt, 0);
1252 	blk->tfb_id = atomic_fetchadd_int(&next_tcp_stack_id, 1);
1253 	for (i = 0; i < *num_names; i++) {
1254 		n = malloc(sizeof(struct tcp_function), M_TCPFUNCTIONS, wait);
1255 		if (n == NULL) {
1256 			error = ENOMEM;
1257 			goto cleanup;
1258 		}
1259 		n->tf_fb = blk;
1260 
1261 		(void)strlcpy(fs.function_set_name, names[i],
1262 		    sizeof(fs.function_set_name));
1263 		rw_wlock(&tcp_function_lock);
1264 		if (find_tcp_functions_locked(&fs) != NULL) {
1265 			/* Duplicate name space not allowed */
1266 			rw_wunlock(&tcp_function_lock);
1267 			free(n, M_TCPFUNCTIONS);
1268 			error = EALREADY;
1269 			goto cleanup;
1270 		}
1271 		(void)strlcpy(n->tf_name, names[i], sizeof(n->tf_name));
1272 		TAILQ_INSERT_TAIL(&t_functions, n, tf_next);
1273 		tcp_fb_cnt++;
1274 		rw_wunlock(&tcp_function_lock);
1275 	}
1276 	return(0);
1277 
1278 cleanup:
1279 	/*
1280 	 * Deregister the names we just added. Because registration failed
1281 	 * for names[i], we don't need to deregister that name.
1282 	 */
1283 	*num_names = i;
1284 	rw_wlock(&tcp_function_lock);
1285 	while (--i >= 0) {
1286 		TAILQ_FOREACH(n, &t_functions, tf_next) {
1287 			if (!strncmp(n->tf_name, names[i],
1288 			    TCP_FUNCTION_NAME_LEN_MAX)) {
1289 				TAILQ_REMOVE(&t_functions, n, tf_next);
1290 				tcp_fb_cnt--;
1291 				n->tf_fb = NULL;
1292 				free(n, M_TCPFUNCTIONS);
1293 				break;
1294 			}
1295 		}
1296 	}
1297 	rw_wunlock(&tcp_function_lock);
1298 	return (error);
1299 }
1300 
1301 /*
1302  * Register a TCP function block using the name provided in the name
1303  * argument.
1304  *
1305  * Returns 0 on success, or an error code on failure.
1306  */
1307 int
1308 register_tcp_functions_as_name(struct tcp_function_block *blk, const char *name,
1309     int wait)
1310 {
1311 	const char *name_list[1];
1312 	int num_names, rv;
1313 
1314 	num_names = 1;
1315 	if (name != NULL)
1316 		name_list[0] = name;
1317 	else
1318 		name_list[0] = blk->tfb_tcp_block_name;
1319 	rv = register_tcp_functions_as_names(blk, wait, name_list, &num_names);
1320 	return (rv);
1321 }
1322 
1323 /*
1324  * Register a TCP function block using the name defined in
1325  * blk->tfb_tcp_block_name.
1326  *
1327  * Returns 0 on success, or an error code on failure.
1328  */
1329 int
1330 register_tcp_functions(struct tcp_function_block *blk, int wait)
1331 {
1332 
1333 	return (register_tcp_functions_as_name(blk, NULL, wait));
1334 }
1335 
1336 /*
1337  * Deregister all names associated with a function block. This
1338  * functionally removes the function block from use within the system.
1339  *
1340  * When called with a true quiesce argument, mark the function block
1341  * as being removed so no more stacks will use it and determine
1342  * whether the removal would succeed.
1343  *
1344  * When called with a false quiesce argument, actually attempt the
1345  * removal.
1346  *
1347  * When called with a force argument, attempt to switch all TCBs to
1348  * use the default stack instead of returning EBUSY.
1349  *
1350  * Returns 0 on success (or if the removal would succeed, or an error
1351  * code on failure.
1352  */
1353 int
1354 deregister_tcp_functions(struct tcp_function_block *blk, bool quiesce,
1355     bool force)
1356 {
1357 	struct tcp_function *f;
1358 
1359 	if (blk == &tcp_def_funcblk) {
1360 		/* You can't un-register the default */
1361 		return (EPERM);
1362 	}
1363 	rw_wlock(&tcp_function_lock);
1364 	if (blk == tcp_func_set_ptr) {
1365 		/* You can't free the current default */
1366 		rw_wunlock(&tcp_function_lock);
1367 		return (EBUSY);
1368 	}
1369 	/* Mark the block so no more stacks can use it. */
1370 	blk->tfb_flags |= TCP_FUNC_BEING_REMOVED;
1371 	/*
1372 	 * If TCBs are still attached to the stack, attempt to switch them
1373 	 * to the default stack.
1374 	 */
1375 	if (force && blk->tfb_refcnt) {
1376 		struct inpcb *inp;
1377 		struct tcpcb *tp;
1378 		VNET_ITERATOR_DECL(vnet_iter);
1379 
1380 		rw_wunlock(&tcp_function_lock);
1381 
1382 		VNET_LIST_RLOCK();
1383 		VNET_FOREACH(vnet_iter) {
1384 			CURVNET_SET(vnet_iter);
1385 			INP_INFO_WLOCK(&V_tcbinfo);
1386 			CK_LIST_FOREACH(inp, V_tcbinfo.ipi_listhead, inp_list) {
1387 				INP_WLOCK(inp);
1388 				if (inp->inp_flags & INP_TIMEWAIT) {
1389 					INP_WUNLOCK(inp);
1390 					continue;
1391 				}
1392 				tp = intotcpcb(inp);
1393 				if (tp == NULL || tp->t_fb != blk) {
1394 					INP_WUNLOCK(inp);
1395 					continue;
1396 				}
1397 				tcp_switch_back_to_default(tp);
1398 				INP_WUNLOCK(inp);
1399 			}
1400 			INP_INFO_WUNLOCK(&V_tcbinfo);
1401 			CURVNET_RESTORE();
1402 		}
1403 		VNET_LIST_RUNLOCK();
1404 
1405 		rw_wlock(&tcp_function_lock);
1406 	}
1407 	if (blk->tfb_refcnt) {
1408 		/* TCBs still attached. */
1409 		rw_wunlock(&tcp_function_lock);
1410 		return (EBUSY);
1411 	}
1412 	if (quiesce) {
1413 		/* Skip removal. */
1414 		rw_wunlock(&tcp_function_lock);
1415 		return (0);
1416 	}
1417 	/* Remove any function names that map to this function block. */
1418 	while (find_tcp_fb_locked(blk, &f) != NULL) {
1419 		TAILQ_REMOVE(&t_functions, f, tf_next);
1420 		tcp_fb_cnt--;
1421 		f->tf_fb = NULL;
1422 		free(f, M_TCPFUNCTIONS);
1423 	}
1424 	rw_wunlock(&tcp_function_lock);
1425 	return (0);
1426 }
1427 
1428 void
1429 tcp_init(void)
1430 {
1431 	const char *tcbhash_tuneable;
1432 	int hashsize;
1433 
1434 	tcbhash_tuneable = "net.inet.tcp.tcbhashsize";
1435 
1436 #ifdef TCP_HHOOK
1437 	if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN,
1438 	    &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
1439 		printf("%s: WARNING: unable to register helper hook\n", __func__);
1440 	if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT,
1441 	    &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
1442 		printf("%s: WARNING: unable to register helper hook\n", __func__);
1443 #endif
1444 #ifdef STATS
1445 	if (tcp_stats_init())
1446 		printf("%s: WARNING: unable to initialise TCP stats\n",
1447 		    __func__);
1448 #endif
1449 	hashsize = TCBHASHSIZE;
1450 	TUNABLE_INT_FETCH(tcbhash_tuneable, &hashsize);
1451 	if (hashsize == 0) {
1452 		/*
1453 		 * Auto tune the hash size based on maxsockets.
1454 		 * A perfect hash would have a 1:1 mapping
1455 		 * (hashsize = maxsockets) however it's been
1456 		 * suggested that O(2) average is better.
1457 		 */
1458 		hashsize = maketcp_hashsize(maxsockets / 4);
1459 		/*
1460 		 * Our historical default is 512,
1461 		 * do not autotune lower than this.
1462 		 */
1463 		if (hashsize < 512)
1464 			hashsize = 512;
1465 		if (bootverbose && IS_DEFAULT_VNET(curvnet))
1466 			printf("%s: %s auto tuned to %d\n", __func__,
1467 			    tcbhash_tuneable, hashsize);
1468 	}
1469 	/*
1470 	 * We require a hashsize to be a power of two.
1471 	 * Previously if it was not a power of two we would just reset it
1472 	 * back to 512, which could be a nasty surprise if you did not notice
1473 	 * the error message.
1474 	 * Instead what we do is clip it to the closest power of two lower
1475 	 * than the specified hash value.
1476 	 */
1477 	if (!powerof2(hashsize)) {
1478 		int oldhashsize = hashsize;
1479 
1480 		hashsize = maketcp_hashsize(hashsize);
1481 		/* prevent absurdly low value */
1482 		if (hashsize < 16)
1483 			hashsize = 16;
1484 		printf("%s: WARNING: TCB hash size not a power of 2, "
1485 		    "clipped from %d to %d.\n", __func__, oldhashsize,
1486 		    hashsize);
1487 	}
1488 	in_pcbinfo_init(&V_tcbinfo, "tcp", &V_tcb, hashsize, hashsize,
1489 	    "tcp_inpcb", tcp_inpcb_init, IPI_HASHFIELDS_4TUPLE);
1490 
1491 	/*
1492 	 * These have to be type stable for the benefit of the timers.
1493 	 */
1494 	V_tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
1495 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1496 	uma_zone_set_max(V_tcpcb_zone, maxsockets);
1497 	uma_zone_set_warning(V_tcpcb_zone, "kern.ipc.maxsockets limit reached");
1498 
1499 	tcp_tw_init();
1500 	syncache_init();
1501 	tcp_hc_init();
1502 
1503 	TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack);
1504 	V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
1505 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1506 
1507 	tcp_fastopen_init();
1508 
1509 	/* Skip initialization of globals for non-default instances. */
1510 	if (!IS_DEFAULT_VNET(curvnet))
1511 		return;
1512 
1513 	tcp_reass_global_init();
1514 
1515 	/* XXX virtualize those bellow? */
1516 	tcp_delacktime = TCPTV_DELACK;
1517 	tcp_keepinit = TCPTV_KEEP_INIT;
1518 	tcp_keepidle = TCPTV_KEEP_IDLE;
1519 	tcp_keepintvl = TCPTV_KEEPINTVL;
1520 	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
1521 	tcp_msl = TCPTV_MSL;
1522 	tcp_rexmit_initial = TCPTV_RTOBASE;
1523 	if (tcp_rexmit_initial < 1)
1524 		tcp_rexmit_initial = 1;
1525 	tcp_rexmit_min = TCPTV_MIN;
1526 	if (tcp_rexmit_min < 1)
1527 		tcp_rexmit_min = 1;
1528 	tcp_persmin = TCPTV_PERSMIN;
1529 	tcp_persmax = TCPTV_PERSMAX;
1530 	tcp_rexmit_slop = TCPTV_CPU_VAR;
1531 	tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
1532 	tcp_tcbhashsize = hashsize;
1533 
1534 	/* Setup the tcp function block list */
1535 	TAILQ_INIT(&t_functions);
1536 	rw_init(&tcp_function_lock, "tcp_func_lock");
1537 	register_tcp_functions(&tcp_def_funcblk, M_WAITOK);
1538 #ifdef TCP_BLACKBOX
1539 	/* Initialize the TCP logging data. */
1540 	tcp_log_init();
1541 #endif
1542 	arc4rand(&V_ts_offset_secret, sizeof(V_ts_offset_secret), 0);
1543 
1544 	if (tcp_soreceive_stream) {
1545 #ifdef INET
1546 		tcp_usrreqs.pru_soreceive = soreceive_stream;
1547 #endif
1548 #ifdef INET6
1549 		tcp6_usrreqs.pru_soreceive = soreceive_stream;
1550 #endif /* INET6 */
1551 	}
1552 
1553 #ifdef INET6
1554 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
1555 #else /* INET6 */
1556 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
1557 #endif /* INET6 */
1558 	if (max_protohdr < TCP_MINPROTOHDR)
1559 		max_protohdr = TCP_MINPROTOHDR;
1560 	if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
1561 		panic("tcp_init");
1562 #undef TCP_MINPROTOHDR
1563 
1564 	ISN_LOCK_INIT();
1565 	EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
1566 		SHUTDOWN_PRI_DEFAULT);
1567 	EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL,
1568 		EVENTHANDLER_PRI_ANY);
1569 
1570 	tcp_inp_lro_direct_queue = counter_u64_alloc(M_WAITOK);
1571 	tcp_inp_lro_wokeup_queue = counter_u64_alloc(M_WAITOK);
1572 	tcp_inp_lro_compressed = counter_u64_alloc(M_WAITOK);
1573 	tcp_inp_lro_locks_taken = counter_u64_alloc(M_WAITOK);
1574 	tcp_extra_mbuf = counter_u64_alloc(M_WAITOK);
1575 	tcp_would_have_but = counter_u64_alloc(M_WAITOK);
1576 	tcp_comp_total = counter_u64_alloc(M_WAITOK);
1577 	tcp_uncomp_total = counter_u64_alloc(M_WAITOK);
1578 	tcp_bad_csums = counter_u64_alloc(M_WAITOK);
1579 #ifdef TCPPCAP
1580 	tcp_pcap_init();
1581 #endif
1582 }
1583 
1584 #ifdef VIMAGE
1585 static void
1586 tcp_destroy(void *unused __unused)
1587 {
1588 	int n;
1589 #ifdef TCP_HHOOK
1590 	int error;
1591 #endif
1592 
1593 	/*
1594 	 * All our processes are gone, all our sockets should be cleaned
1595 	 * up, which means, we should be past the tcp_discardcb() calls.
1596 	 * Sleep to let all tcpcb timers really disappear and cleanup.
1597 	 */
1598 	for (;;) {
1599 		INP_LIST_RLOCK(&V_tcbinfo);
1600 		n = V_tcbinfo.ipi_count;
1601 		INP_LIST_RUNLOCK(&V_tcbinfo);
1602 		if (n == 0)
1603 			break;
1604 		pause("tcpdes", hz / 10);
1605 	}
1606 	tcp_hc_destroy();
1607 	syncache_destroy();
1608 	tcp_tw_destroy();
1609 	in_pcbinfo_destroy(&V_tcbinfo);
1610 	/* tcp_discardcb() clears the sack_holes up. */
1611 	uma_zdestroy(V_sack_hole_zone);
1612 	uma_zdestroy(V_tcpcb_zone);
1613 
1614 	/*
1615 	 * Cannot free the zone until all tcpcbs are released as we attach
1616 	 * the allocations to them.
1617 	 */
1618 	tcp_fastopen_destroy();
1619 
1620 #ifdef TCP_HHOOK
1621 	error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]);
1622 	if (error != 0) {
1623 		printf("%s: WARNING: unable to deregister helper hook "
1624 		    "type=%d, id=%d: error %d returned\n", __func__,
1625 		    HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, error);
1626 	}
1627 	error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_OUT]);
1628 	if (error != 0) {
1629 		printf("%s: WARNING: unable to deregister helper hook "
1630 		    "type=%d, id=%d: error %d returned\n", __func__,
1631 		    HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error);
1632 	}
1633 #endif
1634 }
1635 VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL);
1636 #endif
1637 
1638 void
1639 tcp_fini(void *xtp)
1640 {
1641 
1642 }
1643 
1644 /*
1645  * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
1646  * tcp_template used to store this data in mbufs, but we now recopy it out
1647  * of the tcpcb each time to conserve mbufs.
1648  */
1649 void
1650 tcpip_fillheaders(struct inpcb *inp, uint16_t port, void *ip_ptr, void *tcp_ptr)
1651 {
1652 	struct tcphdr *th = (struct tcphdr *)tcp_ptr;
1653 
1654 	INP_WLOCK_ASSERT(inp);
1655 
1656 #ifdef INET6
1657 	if ((inp->inp_vflag & INP_IPV6) != 0) {
1658 		struct ip6_hdr *ip6;
1659 
1660 		ip6 = (struct ip6_hdr *)ip_ptr;
1661 		ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
1662 			(inp->inp_flow & IPV6_FLOWINFO_MASK);
1663 		ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
1664 			(IPV6_VERSION & IPV6_VERSION_MASK);
1665 		if (port == 0)
1666 			ip6->ip6_nxt = IPPROTO_TCP;
1667 		else
1668 			ip6->ip6_nxt = IPPROTO_UDP;
1669 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
1670 		ip6->ip6_src = inp->in6p_laddr;
1671 		ip6->ip6_dst = inp->in6p_faddr;
1672 	}
1673 #endif /* INET6 */
1674 #if defined(INET6) && defined(INET)
1675 	else
1676 #endif
1677 #ifdef INET
1678 	{
1679 		struct ip *ip;
1680 
1681 		ip = (struct ip *)ip_ptr;
1682 		ip->ip_v = IPVERSION;
1683 		ip->ip_hl = 5;
1684 		ip->ip_tos = inp->inp_ip_tos;
1685 		ip->ip_len = 0;
1686 		ip->ip_id = 0;
1687 		ip->ip_off = 0;
1688 		ip->ip_ttl = inp->inp_ip_ttl;
1689 		ip->ip_sum = 0;
1690 		if (port == 0)
1691 			ip->ip_p = IPPROTO_TCP;
1692 		else
1693 			ip->ip_p = IPPROTO_UDP;
1694 		ip->ip_src = inp->inp_laddr;
1695 		ip->ip_dst = inp->inp_faddr;
1696 	}
1697 #endif /* INET */
1698 	th->th_sport = inp->inp_lport;
1699 	th->th_dport = inp->inp_fport;
1700 	th->th_seq = 0;
1701 	th->th_ack = 0;
1702 	th->th_x2 = 0;
1703 	th->th_off = 5;
1704 	th->th_flags = 0;
1705 	th->th_win = 0;
1706 	th->th_urp = 0;
1707 	th->th_sum = 0;		/* in_pseudo() is called later for ipv4 */
1708 }
1709 
1710 /*
1711  * Create template to be used to send tcp packets on a connection.
1712  * Allocates an mbuf and fills in a skeletal tcp/ip header.  The only
1713  * use for this function is in keepalives, which use tcp_respond.
1714  */
1715 struct tcptemp *
1716 tcpip_maketemplate(struct inpcb *inp)
1717 {
1718 	struct tcptemp *t;
1719 
1720 	t = malloc(sizeof(*t), M_TEMP, M_NOWAIT);
1721 	if (t == NULL)
1722 		return (NULL);
1723 	tcpip_fillheaders(inp, 0, (void *)&t->tt_ipgen, (void *)&t->tt_t);
1724 	return (t);
1725 }
1726 
1727 /*
1728  * Send a single message to the TCP at address specified by
1729  * the given TCP/IP header.  If m == NULL, then we make a copy
1730  * of the tcpiphdr at th and send directly to the addressed host.
1731  * This is used to force keep alive messages out using the TCP
1732  * template for a connection.  If flags are given then we send
1733  * a message back to the TCP which originated the segment th,
1734  * and discard the mbuf containing it and any other attached mbufs.
1735  *
1736  * In any case the ack and sequence number of the transmitted
1737  * segment are as specified by the parameters.
1738  *
1739  * NOTE: If m != NULL, then th must point to *inside* the mbuf.
1740  */
1741 void
1742 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
1743     tcp_seq ack, tcp_seq seq, int flags)
1744 {
1745 	struct tcpopt to;
1746 	struct inpcb *inp;
1747 	struct ip *ip;
1748 	struct mbuf *optm;
1749 	struct udphdr *uh = NULL;
1750 	struct tcphdr *nth;
1751 	struct tcp_log_buffer *lgb;
1752 	u_char *optp;
1753 #ifdef INET6
1754 	struct ip6_hdr *ip6;
1755 	int isipv6;
1756 #endif /* INET6 */
1757 	int optlen, tlen, win, ulen;
1758 	bool incl_opts, lock_upgraded;
1759 	uint16_t port;
1760 	int output_ret;
1761 
1762 	KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
1763 	NET_EPOCH_ASSERT();
1764 
1765 #ifdef INET6
1766 	isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4);
1767 	ip6 = ipgen;
1768 #endif /* INET6 */
1769 	ip = ipgen;
1770 
1771 	if (tp != NULL) {
1772 		inp = tp->t_inpcb;
1773 		KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
1774 		INP_LOCK_ASSERT(inp);
1775 	} else
1776 		inp = NULL;
1777 
1778 	if (m != NULL) {
1779 #ifdef INET6
1780 		if (isipv6 && ip6 && (ip6->ip6_nxt == IPPROTO_UDP))
1781 			port = m->m_pkthdr.tcp_tun_port;
1782 		else
1783 #endif
1784 		if (ip && (ip->ip_p == IPPROTO_UDP))
1785 			port = m->m_pkthdr.tcp_tun_port;
1786 		else
1787 			port = 0;
1788 	} else
1789 		port = tp->t_port;
1790 
1791 	incl_opts = false;
1792 	win = 0;
1793 	if (tp != NULL) {
1794 		if (!(flags & TH_RST)) {
1795 			win = sbspace(&inp->inp_socket->so_rcv);
1796 			if (win > TCP_MAXWIN << tp->rcv_scale)
1797 				win = TCP_MAXWIN << tp->rcv_scale;
1798 		}
1799 		if ((tp->t_flags & TF_NOOPT) == 0)
1800 			incl_opts = true;
1801 	}
1802 	if (m == NULL) {
1803 		m = m_gethdr(M_NOWAIT, MT_DATA);
1804 		if (m == NULL)
1805 			return;
1806 		m->m_data += max_linkhdr;
1807 #ifdef INET6
1808 		if (isipv6) {
1809 			bcopy((caddr_t)ip6, mtod(m, caddr_t),
1810 			      sizeof(struct ip6_hdr));
1811 			ip6 = mtod(m, struct ip6_hdr *);
1812 			nth = (struct tcphdr *)(ip6 + 1);
1813 			if (port) {
1814 				/* Insert a UDP header */
1815 				uh = (struct udphdr *)nth;
1816 				uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1817 				uh->uh_dport = port;
1818 				nth = (struct tcphdr *)(uh + 1);
1819 			}
1820 		} else
1821 #endif /* INET6 */
1822 		{
1823 			bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
1824 			ip = mtod(m, struct ip *);
1825 			nth = (struct tcphdr *)(ip + 1);
1826 			if (port) {
1827 				/* Insert a UDP header */
1828 				uh = (struct udphdr *)nth;
1829 				uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1830 				uh->uh_dport = port;
1831 				nth = (struct tcphdr *)(uh + 1);
1832 			}
1833 		}
1834 		bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
1835 		flags = TH_ACK;
1836 	} else if ((!M_WRITABLE(m)) || (port != 0)) {
1837 		struct mbuf *n;
1838 
1839 		/* Can't reuse 'm', allocate a new mbuf. */
1840 		n = m_gethdr(M_NOWAIT, MT_DATA);
1841 		if (n == NULL) {
1842 			m_freem(m);
1843 			return;
1844 		}
1845 
1846 		if (!m_dup_pkthdr(n, m, M_NOWAIT)) {
1847 			m_freem(m);
1848 			m_freem(n);
1849 			return;
1850 		}
1851 
1852 		n->m_data += max_linkhdr;
1853 		/* m_len is set later */
1854 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
1855 #ifdef INET6
1856 		if (isipv6) {
1857 			bcopy((caddr_t)ip6, mtod(n, caddr_t),
1858 			      sizeof(struct ip6_hdr));
1859 			ip6 = mtod(n, struct ip6_hdr *);
1860 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1861 			nth = (struct tcphdr *)(ip6 + 1);
1862 			if (port) {
1863 				/* Insert a UDP header */
1864 				uh = (struct udphdr *)nth;
1865 				uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1866 				uh->uh_dport = port;
1867 				nth = (struct tcphdr *)(uh + 1);
1868 			}
1869 		} else
1870 #endif /* INET6 */
1871 		{
1872 			bcopy((caddr_t)ip, mtod(n, caddr_t), sizeof(struct ip));
1873 			ip = mtod(n, struct ip *);
1874 			xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
1875 			nth = (struct tcphdr *)(ip + 1);
1876 			if (port) {
1877 				/* Insert a UDP header */
1878 				uh = (struct udphdr *)nth;
1879 				uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1880 				uh->uh_dport = port;
1881 				nth = (struct tcphdr *)(uh + 1);
1882 			}
1883 		}
1884 		bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
1885 		xchg(nth->th_dport, nth->th_sport, uint16_t);
1886 		th = nth;
1887 		m_freem(m);
1888 		m = n;
1889 	} else {
1890 		/*
1891 		 *  reuse the mbuf.
1892 		 * XXX MRT We inherit the FIB, which is lucky.
1893 		 */
1894 		m_freem(m->m_next);
1895 		m->m_next = NULL;
1896 		m->m_data = (caddr_t)ipgen;
1897 		/* m_len is set later */
1898 #ifdef INET6
1899 		if (isipv6) {
1900 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1901 			nth = (struct tcphdr *)(ip6 + 1);
1902 		} else
1903 #endif /* INET6 */
1904 		{
1905 			xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
1906 			nth = (struct tcphdr *)(ip + 1);
1907 		}
1908 		if (th != nth) {
1909 			/*
1910 			 * this is usually a case when an extension header
1911 			 * exists between the IPv6 header and the
1912 			 * TCP header.
1913 			 */
1914 			nth->th_sport = th->th_sport;
1915 			nth->th_dport = th->th_dport;
1916 		}
1917 		xchg(nth->th_dport, nth->th_sport, uint16_t);
1918 #undef xchg
1919 	}
1920 	tlen = 0;
1921 #ifdef INET6
1922 	if (isipv6)
1923 		tlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
1924 #endif
1925 #if defined(INET) && defined(INET6)
1926 	else
1927 #endif
1928 #ifdef INET
1929 		tlen = sizeof (struct tcpiphdr);
1930 #endif
1931 	if (port)
1932 		tlen += sizeof (struct udphdr);
1933 #ifdef INVARIANTS
1934 	m->m_len = 0;
1935 	KASSERT(M_TRAILINGSPACE(m) >= tlen,
1936 	    ("Not enough trailing space for message (m=%p, need=%d, have=%ld)",
1937 	    m, tlen, (long)M_TRAILINGSPACE(m)));
1938 #endif
1939 	m->m_len = tlen;
1940 	to.to_flags = 0;
1941 	if (incl_opts) {
1942 		/* Make sure we have room. */
1943 		if (M_TRAILINGSPACE(m) < TCP_MAXOLEN) {
1944 			m->m_next = m_get(M_NOWAIT, MT_DATA);
1945 			if (m->m_next) {
1946 				optp = mtod(m->m_next, u_char *);
1947 				optm = m->m_next;
1948 			} else
1949 				incl_opts = false;
1950 		} else {
1951 			optp = (u_char *) (nth + 1);
1952 			optm = m;
1953 		}
1954 	}
1955 	if (incl_opts) {
1956 		/* Timestamps. */
1957 		if (tp->t_flags & TF_RCVD_TSTMP) {
1958 			to.to_tsval = tcp_ts_getticks() + tp->ts_offset;
1959 			to.to_tsecr = tp->ts_recent;
1960 			to.to_flags |= TOF_TS;
1961 		}
1962 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1963 		/* TCP-MD5 (RFC2385). */
1964 		if (tp->t_flags & TF_SIGNATURE)
1965 			to.to_flags |= TOF_SIGNATURE;
1966 #endif
1967 		/* Add the options. */
1968 		tlen += optlen = tcp_addoptions(&to, optp);
1969 
1970 		/* Update m_len in the correct mbuf. */
1971 		optm->m_len += optlen;
1972 	} else
1973 		optlen = 0;
1974 #ifdef INET6
1975 	if (isipv6) {
1976 		if (uh) {
1977 			ulen = tlen - sizeof(struct ip6_hdr);
1978 			uh->uh_ulen = htons(ulen);
1979 		}
1980 		ip6->ip6_flow = 0;
1981 		ip6->ip6_vfc = IPV6_VERSION;
1982 		if (port)
1983 			ip6->ip6_nxt = IPPROTO_UDP;
1984 		else
1985 			ip6->ip6_nxt = IPPROTO_TCP;
1986 		ip6->ip6_plen = htons(tlen - sizeof(*ip6));
1987 	}
1988 #endif
1989 #if defined(INET) && defined(INET6)
1990 	else
1991 #endif
1992 #ifdef INET
1993 	{
1994 		if (uh) {
1995 			ulen = tlen - sizeof(struct ip);
1996 			uh->uh_ulen = htons(ulen);
1997 		}
1998 		ip->ip_len = htons(tlen);
1999 		ip->ip_ttl = V_ip_defttl;
2000 		if (port) {
2001 			ip->ip_p = IPPROTO_UDP;
2002 		} else {
2003 			ip->ip_p = IPPROTO_TCP;
2004 		}
2005 		if (V_path_mtu_discovery)
2006 			ip->ip_off |= htons(IP_DF);
2007 	}
2008 #endif
2009 	m->m_pkthdr.len = tlen;
2010 	m->m_pkthdr.rcvif = NULL;
2011 #ifdef MAC
2012 	if (inp != NULL) {
2013 		/*
2014 		 * Packet is associated with a socket, so allow the
2015 		 * label of the response to reflect the socket label.
2016 		 */
2017 		INP_LOCK_ASSERT(inp);
2018 		mac_inpcb_create_mbuf(inp, m);
2019 	} else {
2020 		/*
2021 		 * Packet is not associated with a socket, so possibly
2022 		 * update the label in place.
2023 		 */
2024 		mac_netinet_tcp_reply(m);
2025 	}
2026 #endif
2027 	nth->th_seq = htonl(seq);
2028 	nth->th_ack = htonl(ack);
2029 	nth->th_x2 = 0;
2030 	nth->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
2031 	nth->th_flags = flags;
2032 	if (tp != NULL)
2033 		nth->th_win = htons((u_short) (win >> tp->rcv_scale));
2034 	else
2035 		nth->th_win = htons((u_short)win);
2036 	nth->th_urp = 0;
2037 
2038 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
2039 	if (to.to_flags & TOF_SIGNATURE) {
2040 		if (!TCPMD5_ENABLED() ||
2041 		    TCPMD5_OUTPUT(m, nth, to.to_signature) != 0) {
2042 			m_freem(m);
2043 			return;
2044 		}
2045 	}
2046 #endif
2047 
2048 #ifdef INET6
2049 	if (isipv6) {
2050 		if (port) {
2051 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
2052 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2053 			uh->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
2054 			nth->th_sum = 0;
2055 		} else {
2056 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
2057 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2058 			nth->th_sum = in6_cksum_pseudo(ip6,
2059 			    tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0);
2060 		}
2061 		ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
2062 		    NULL, NULL);
2063 	}
2064 #endif /* INET6 */
2065 #if defined(INET6) && defined(INET)
2066 	else
2067 #endif
2068 #ifdef INET
2069 	{
2070 		if (port) {
2071 			uh->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2072 			    htons(ulen + IPPROTO_UDP));
2073 			m->m_pkthdr.csum_flags = CSUM_UDP;
2074 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2075 			nth->th_sum = 0;
2076 		} else {
2077 			m->m_pkthdr.csum_flags = CSUM_TCP;
2078 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2079 			nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2080 			    htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
2081 		}
2082 	}
2083 #endif /* INET */
2084 #ifdef TCPDEBUG
2085 	if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
2086 		tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
2087 #endif
2088 	TCP_PROBE3(debug__output, tp, th, m);
2089 	if (flags & TH_RST)
2090 		TCP_PROBE5(accept__refused, NULL, NULL, m, tp, nth);
2091 	lock_upgraded = false;
2092 	lgb = NULL;
2093 	if ((tp != NULL) && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2094 		union tcp_log_stackspecific log;
2095 		struct timeval tv;
2096 
2097 		lock_upgraded = !INP_WLOCKED(inp) && INP_TRY_UPGRADE(inp);
2098 		/*
2099 		 *`If we don't already own the write lock and can't upgrade,
2100 		 * just don't log the event, but still send the response.
2101 		 */
2102 		if (INP_WLOCKED(inp)) {
2103 			memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2104 			log.u_bbr.inhpts = tp->t_inpcb->inp_in_hpts;
2105 			log.u_bbr.ininput = tp->t_inpcb->inp_in_input;
2106 			log.u_bbr.flex8 = 4;
2107 			log.u_bbr.pkts_out = tp->t_maxseg;
2108 			log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2109 			log.u_bbr.delivered = 0;
2110 			lgb = tcp_log_event_(tp, nth, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
2111 			                     0, &log, false, NULL, NULL, 0, &tv);
2112 		}
2113 	}
2114 
2115 #ifdef INET6
2116 	if (isipv6) {
2117 		TCP_PROBE5(send, NULL, tp, ip6, tp, nth);
2118 		output_ret = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp);
2119 	}
2120 #endif /* INET6 */
2121 #if defined(INET) && defined(INET6)
2122 	else
2123 #endif
2124 #ifdef INET
2125 	{
2126 		TCP_PROBE5(send, NULL, tp, ip, tp, nth);
2127 		output_ret = ip_output(m, NULL, NULL, 0, NULL, inp);
2128 	}
2129 #endif
2130 	if (lgb != NULL)
2131 		lgb->tlb_errno = output_ret;
2132 	if (lock_upgraded)
2133 		INP_DOWNGRADE(inp);
2134 }
2135 
2136 /*
2137  * Create a new TCP control block, making an
2138  * empty reassembly queue and hooking it to the argument
2139  * protocol control block.  The `inp' parameter must have
2140  * come from the zone allocator set up in tcp_init().
2141  */
2142 struct tcpcb *
2143 tcp_newtcpcb(struct inpcb *inp)
2144 {
2145 	struct tcpcb_mem *tm;
2146 	struct tcpcb *tp;
2147 #ifdef INET6
2148 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
2149 #endif /* INET6 */
2150 
2151 	tm = uma_zalloc(V_tcpcb_zone, M_NOWAIT | M_ZERO);
2152 	if (tm == NULL)
2153 		return (NULL);
2154 	tp = &tm->tcb;
2155 
2156 	/* Initialise cc_var struct for this tcpcb. */
2157 	tp->ccv = &tm->ccv;
2158 	tp->ccv->type = IPPROTO_TCP;
2159 	tp->ccv->ccvc.tcp = tp;
2160 	rw_rlock(&tcp_function_lock);
2161 	tp->t_fb = tcp_func_set_ptr;
2162 	refcount_acquire(&tp->t_fb->tfb_refcnt);
2163 	rw_runlock(&tcp_function_lock);
2164 	/*
2165 	 * Use the current system default CC algorithm.
2166 	 */
2167 	CC_LIST_RLOCK();
2168 	KASSERT(!STAILQ_EMPTY(&cc_list), ("cc_list is empty!"));
2169 	CC_ALGO(tp) = CC_DEFAULT_ALGO();
2170 	CC_LIST_RUNLOCK();
2171 
2172 	/*
2173 	 * The tcpcb will hold a reference on its inpcb until tcp_discardcb()
2174 	 * is called.
2175 	 */
2176 	in_pcbref(inp);	/* Reference for tcpcb */
2177 	tp->t_inpcb = inp;
2178 
2179 	if (CC_ALGO(tp)->cb_init != NULL)
2180 		if (CC_ALGO(tp)->cb_init(tp->ccv, NULL) > 0) {
2181 			if (tp->t_fb->tfb_tcp_fb_fini)
2182 				(*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2183 			in_pcbrele_wlocked(inp);
2184 			refcount_release(&tp->t_fb->tfb_refcnt);
2185 			uma_zfree(V_tcpcb_zone, tm);
2186 			return (NULL);
2187 		}
2188 
2189 #ifdef TCP_HHOOK
2190 	tp->osd = &tm->osd;
2191 	if (khelp_init_osd(HELPER_CLASS_TCP, tp->osd)) {
2192 		if (tp->t_fb->tfb_tcp_fb_fini)
2193 			(*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2194 		in_pcbrele_wlocked(inp);
2195 		refcount_release(&tp->t_fb->tfb_refcnt);
2196 		uma_zfree(V_tcpcb_zone, tm);
2197 		return (NULL);
2198 	}
2199 #endif
2200 
2201 #ifdef VIMAGE
2202 	tp->t_vnet = inp->inp_vnet;
2203 #endif
2204 	tp->t_timers = &tm->tt;
2205 	TAILQ_INIT(&tp->t_segq);
2206 	tp->t_maxseg =
2207 #ifdef INET6
2208 		isipv6 ? V_tcp_v6mssdflt :
2209 #endif /* INET6 */
2210 		V_tcp_mssdflt;
2211 
2212 	/* Set up our timeouts. */
2213 	callout_init(&tp->t_timers->tt_rexmt, 1);
2214 	callout_init(&tp->t_timers->tt_persist, 1);
2215 	callout_init(&tp->t_timers->tt_keep, 1);
2216 	callout_init(&tp->t_timers->tt_2msl, 1);
2217 	callout_init(&tp->t_timers->tt_delack, 1);
2218 
2219 	if (V_tcp_do_rfc1323)
2220 		tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
2221 	if (V_tcp_do_sack)
2222 		tp->t_flags |= TF_SACK_PERMIT;
2223 	TAILQ_INIT(&tp->snd_holes);
2224 
2225 	/*
2226 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
2227 	 * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
2228 	 * reasonable initial retransmit time.
2229 	 */
2230 	tp->t_srtt = TCPTV_SRTTBASE;
2231 	tp->t_rttvar = ((tcp_rexmit_initial - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
2232 	tp->t_rttmin = tcp_rexmit_min;
2233 	tp->t_rxtcur = tcp_rexmit_initial;
2234 	tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
2235 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
2236 	tp->t_rcvtime = ticks;
2237 	/*
2238 	 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
2239 	 * because the socket may be bound to an IPv6 wildcard address,
2240 	 * which may match an IPv4-mapped IPv6 address.
2241 	 */
2242 	inp->inp_ip_ttl = V_ip_defttl;
2243 	inp->inp_ppcb = tp;
2244 #ifdef TCPPCAP
2245 	/*
2246 	 * Init the TCP PCAP queues.
2247 	 */
2248 	tcp_pcap_tcpcb_init(tp);
2249 #endif
2250 #ifdef TCP_BLACKBOX
2251 	/* Initialize the per-TCPCB log data. */
2252 	tcp_log_tcpcbinit(tp);
2253 #endif
2254 	tp->t_pacing_rate = -1;
2255 	if (tp->t_fb->tfb_tcp_fb_init) {
2256 		if ((*tp->t_fb->tfb_tcp_fb_init)(tp)) {
2257 			refcount_release(&tp->t_fb->tfb_refcnt);
2258 			in_pcbrele_wlocked(inp);
2259 			uma_zfree(V_tcpcb_zone, tm);
2260 			return (NULL);
2261 		}
2262 	}
2263 #ifdef STATS
2264 	if (V_tcp_perconn_stats_enable == 1)
2265 		tp->t_stats = stats_blob_alloc(V_tcp_perconn_stats_dflt_tpl, 0);
2266 #endif
2267 	if (V_tcp_do_lrd)
2268 		tp->t_flags |= TF_LRD;
2269 	return (tp);		/* XXX */
2270 }
2271 
2272 /*
2273  * Switch the congestion control algorithm back to Vnet default for any active
2274  * control blocks using an algorithm which is about to go away. If the algorithm
2275  * has a cb_init function and it fails (no memory) then the operation fails and
2276  * the unload will not succeed.
2277  *
2278  */
2279 int
2280 tcp_ccalgounload(struct cc_algo *unload_algo)
2281 {
2282 	struct cc_algo *oldalgo, *newalgo;
2283 	struct inpcb *inp;
2284 	struct tcpcb *tp;
2285 	VNET_ITERATOR_DECL(vnet_iter);
2286 
2287 	/*
2288 	 * Check all active control blocks across all network stacks and change
2289 	 * any that are using "unload_algo" back to its default. If "unload_algo"
2290 	 * requires cleanup code to be run, call it.
2291 	 */
2292 	VNET_LIST_RLOCK();
2293 	VNET_FOREACH(vnet_iter) {
2294 		CURVNET_SET(vnet_iter);
2295 		INP_INFO_WLOCK(&V_tcbinfo);
2296 		/*
2297 		 * New connections already part way through being initialised
2298 		 * with the CC algo we're removing will not race with this code
2299 		 * because the INP_INFO_WLOCK is held during initialisation. We
2300 		 * therefore don't enter the loop below until the connection
2301 		 * list has stabilised.
2302 		 */
2303 		newalgo = CC_DEFAULT_ALGO();
2304 		CK_LIST_FOREACH(inp, &V_tcb, inp_list) {
2305 			INP_WLOCK(inp);
2306 			/* Important to skip tcptw structs. */
2307 			if (!(inp->inp_flags & INP_TIMEWAIT) &&
2308 			    (tp = intotcpcb(inp)) != NULL) {
2309 				/*
2310 				 * By holding INP_WLOCK here, we are assured
2311 				 * that the connection is not currently
2312 				 * executing inside the CC module's functions.
2313 				 * We attempt to switch to the Vnets default,
2314 				 * if the init fails then we fail the whole
2315 				 * operation and the module unload will fail.
2316 				 */
2317 				if (CC_ALGO(tp) == unload_algo) {
2318 					struct cc_var cc_mem;
2319 					int err;
2320 
2321 					oldalgo = CC_ALGO(tp);
2322 					memset(&cc_mem, 0, sizeof(cc_mem));
2323 					cc_mem.ccvc.tcp = tp;
2324 					if (newalgo->cb_init == NULL) {
2325 						/*
2326 						 * No init we can skip the
2327 						 * dance around a possible failure.
2328 						 */
2329 						CC_DATA(tp) = NULL;
2330 						goto proceed;
2331 					}
2332 					err = (newalgo->cb_init)(&cc_mem, NULL);
2333 					if (err) {
2334 						/*
2335 						 * Presumably no memory the caller will
2336 						 * need to try again.
2337 						 */
2338 						INP_WUNLOCK(inp);
2339 						INP_INFO_WUNLOCK(&V_tcbinfo);
2340 						CURVNET_RESTORE();
2341 						VNET_LIST_RUNLOCK();
2342 						return (err);
2343 					}
2344 proceed:
2345 					if (oldalgo->cb_destroy != NULL)
2346 						oldalgo->cb_destroy(tp->ccv);
2347 					CC_ALGO(tp) = newalgo;
2348 					memcpy(tp->ccv, &cc_mem, sizeof(struct cc_var));
2349 					if (TCPS_HAVEESTABLISHED(tp->t_state) &&
2350 					    (CC_ALGO(tp)->conn_init != NULL)) {
2351 						/* Yep run the connection init for the new CC */
2352 						CC_ALGO(tp)->conn_init(tp->ccv);
2353 					}
2354 				}
2355 			}
2356 			INP_WUNLOCK(inp);
2357 		}
2358 		INP_INFO_WUNLOCK(&V_tcbinfo);
2359 		CURVNET_RESTORE();
2360 	}
2361 	VNET_LIST_RUNLOCK();
2362 	return (0);
2363 }
2364 
2365 /*
2366  * Drop a TCP connection, reporting
2367  * the specified error.  If connection is synchronized,
2368  * then send a RST to peer.
2369  */
2370 struct tcpcb *
2371 tcp_drop(struct tcpcb *tp, int errno)
2372 {
2373 	struct socket *so = tp->t_inpcb->inp_socket;
2374 
2375 	NET_EPOCH_ASSERT();
2376 	INP_INFO_LOCK_ASSERT(&V_tcbinfo);
2377 	INP_WLOCK_ASSERT(tp->t_inpcb);
2378 
2379 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
2380 		tcp_state_change(tp, TCPS_CLOSED);
2381 		(void) tp->t_fb->tfb_tcp_output(tp);
2382 		TCPSTAT_INC(tcps_drops);
2383 	} else
2384 		TCPSTAT_INC(tcps_conndrops);
2385 	if (errno == ETIMEDOUT && tp->t_softerror)
2386 		errno = tp->t_softerror;
2387 	so->so_error = errno;
2388 	return (tcp_close(tp));
2389 }
2390 
2391 void
2392 tcp_discardcb(struct tcpcb *tp)
2393 {
2394 	struct inpcb *inp = tp->t_inpcb;
2395 
2396 	INP_WLOCK_ASSERT(inp);
2397 
2398 	/*
2399 	 * Make sure that all of our timers are stopped before we delete the
2400 	 * PCB.
2401 	 *
2402 	 * If stopping a timer fails, we schedule a discard function in same
2403 	 * callout, and the last discard function called will take care of
2404 	 * deleting the tcpcb.
2405 	 */
2406 	tp->t_timers->tt_draincnt = 0;
2407 	tcp_timer_stop(tp, TT_REXMT);
2408 	tcp_timer_stop(tp, TT_PERSIST);
2409 	tcp_timer_stop(tp, TT_KEEP);
2410 	tcp_timer_stop(tp, TT_2MSL);
2411 	tcp_timer_stop(tp, TT_DELACK);
2412 	if (tp->t_fb->tfb_tcp_timer_stop_all) {
2413 		/*
2414 		 * Call the stop-all function of the methods,
2415 		 * this function should call the tcp_timer_stop()
2416 		 * method with each of the function specific timeouts.
2417 		 * That stop will be called via the tfb_tcp_timer_stop()
2418 		 * which should use the async drain function of the
2419 		 * callout system (see tcp_var.h).
2420 		 */
2421 		tp->t_fb->tfb_tcp_timer_stop_all(tp);
2422 	}
2423 
2424 	/* free the reassembly queue, if any */
2425 	tcp_reass_flush(tp);
2426 
2427 #ifdef TCP_OFFLOAD
2428 	/* Disconnect offload device, if any. */
2429 	if (tp->t_flags & TF_TOE)
2430 		tcp_offload_detach(tp);
2431 #endif
2432 
2433 	tcp_free_sackholes(tp);
2434 
2435 #ifdef TCPPCAP
2436 	/* Free the TCP PCAP queues. */
2437 	tcp_pcap_drain(&(tp->t_inpkts));
2438 	tcp_pcap_drain(&(tp->t_outpkts));
2439 #endif
2440 
2441 	/* Allow the CC algorithm to clean up after itself. */
2442 	if (CC_ALGO(tp)->cb_destroy != NULL)
2443 		CC_ALGO(tp)->cb_destroy(tp->ccv);
2444 	CC_DATA(tp) = NULL;
2445 
2446 #ifdef TCP_HHOOK
2447 	khelp_destroy_osd(tp->osd);
2448 #endif
2449 #ifdef STATS
2450 	stats_blob_destroy(tp->t_stats);
2451 #endif
2452 
2453 	CC_ALGO(tp) = NULL;
2454 	inp->inp_ppcb = NULL;
2455 	if (tp->t_timers->tt_draincnt == 0) {
2456 		bool released __diagused;
2457 
2458 		released = tcp_freecb(tp);
2459 		KASSERT(!released, ("%s: inp %p should not have been released "
2460 		    "here", __func__, inp));
2461 	}
2462 }
2463 
2464 bool
2465 tcp_freecb(struct tcpcb *tp)
2466 {
2467 	struct inpcb *inp = tp->t_inpcb;
2468 	struct socket *so = inp->inp_socket;
2469 #ifdef INET6
2470 	bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
2471 #endif
2472 
2473 	INP_WLOCK_ASSERT(inp);
2474 	MPASS(tp->t_timers->tt_draincnt == 0);
2475 
2476 	/* We own the last reference on tcpcb, let's free it. */
2477 #ifdef TCP_BLACKBOX
2478 	tcp_log_tcpcbfini(tp);
2479 #endif
2480 	TCPSTATES_DEC(tp->t_state);
2481 	if (tp->t_fb->tfb_tcp_fb_fini)
2482 		(*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2483 
2484 	/*
2485 	 * If we got enough samples through the srtt filter,
2486 	 * save the rtt and rttvar in the routing entry.
2487 	 * 'Enough' is arbitrarily defined as 4 rtt samples.
2488 	 * 4 samples is enough for the srtt filter to converge
2489 	 * to within enough % of the correct value; fewer samples
2490 	 * and we could save a bogus rtt. The danger is not high
2491 	 * as tcp quickly recovers from everything.
2492 	 * XXX: Works very well but needs some more statistics!
2493 	 *
2494 	 * XXXRRS: Updating must be after the stack fini() since
2495 	 * that may be converting some internal representation of
2496 	 * say srtt etc into the general one used by other stacks.
2497 	 * Lets also at least protect against the so being NULL
2498 	 * as RW stated below.
2499 	 */
2500 	if ((tp->t_rttupdated >= 4) && (so != NULL)) {
2501 		struct hc_metrics_lite metrics;
2502 		uint32_t ssthresh;
2503 
2504 		bzero(&metrics, sizeof(metrics));
2505 		/*
2506 		 * Update the ssthresh always when the conditions below
2507 		 * are satisfied. This gives us better new start value
2508 		 * for the congestion avoidance for new connections.
2509 		 * ssthresh is only set if packet loss occurred on a session.
2510 		 *
2511 		 * XXXRW: 'so' may be NULL here, and/or socket buffer may be
2512 		 * being torn down.  Ideally this code would not use 'so'.
2513 		 */
2514 		ssthresh = tp->snd_ssthresh;
2515 		if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
2516 			/*
2517 			 * convert the limit from user data bytes to
2518 			 * packets then to packet data bytes.
2519 			 */
2520 			ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
2521 			if (ssthresh < 2)
2522 				ssthresh = 2;
2523 			ssthresh *= (tp->t_maxseg +
2524 #ifdef INET6
2525 			    (isipv6 ? sizeof (struct ip6_hdr) +
2526 			    sizeof (struct tcphdr) :
2527 #endif
2528 			    sizeof (struct tcpiphdr)
2529 #ifdef INET6
2530 			    )
2531 #endif
2532 			    );
2533 		} else
2534 			ssthresh = 0;
2535 		metrics.rmx_ssthresh = ssthresh;
2536 
2537 		metrics.rmx_rtt = tp->t_srtt;
2538 		metrics.rmx_rttvar = tp->t_rttvar;
2539 		metrics.rmx_cwnd = tp->snd_cwnd;
2540 		metrics.rmx_sendpipe = 0;
2541 		metrics.rmx_recvpipe = 0;
2542 
2543 		tcp_hc_update(&inp->inp_inc, &metrics);
2544 	}
2545 
2546 	refcount_release(&tp->t_fb->tfb_refcnt);
2547 	uma_zfree(V_tcpcb_zone, tp);
2548 
2549 	return (in_pcbrele_wlocked(inp));
2550 }
2551 
2552 /*
2553  * Attempt to close a TCP control block, marking it as dropped, and freeing
2554  * the socket if we hold the only reference.
2555  */
2556 struct tcpcb *
2557 tcp_close(struct tcpcb *tp)
2558 {
2559 	struct inpcb *inp = tp->t_inpcb;
2560 	struct socket *so;
2561 
2562 	INP_INFO_LOCK_ASSERT(&V_tcbinfo);
2563 	INP_WLOCK_ASSERT(inp);
2564 
2565 #ifdef TCP_OFFLOAD
2566 	if (tp->t_state == TCPS_LISTEN)
2567 		tcp_offload_listen_stop(tp);
2568 #endif
2569 	/*
2570 	 * This releases the TFO pending counter resource for TFO listen
2571 	 * sockets as well as passively-created TFO sockets that transition
2572 	 * from SYN_RECEIVED to CLOSED.
2573 	 */
2574 	if (tp->t_tfo_pending) {
2575 		tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2576 		tp->t_tfo_pending = NULL;
2577 	}
2578 	in_pcbdrop(inp);
2579 	TCPSTAT_INC(tcps_closed);
2580 	if (tp->t_state != TCPS_CLOSED)
2581 		tcp_state_change(tp, TCPS_CLOSED);
2582 	KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
2583 	so = inp->inp_socket;
2584 	soisdisconnected(so);
2585 	if (inp->inp_flags & INP_SOCKREF) {
2586 		KASSERT(so->so_state & SS_PROTOREF,
2587 		    ("tcp_close: !SS_PROTOREF"));
2588 		inp->inp_flags &= ~INP_SOCKREF;
2589 		INP_WUNLOCK(inp);
2590 		SOCK_LOCK(so);
2591 		so->so_state &= ~SS_PROTOREF;
2592 		sofree(so);
2593 		return (NULL);
2594 	}
2595 	return (tp);
2596 }
2597 
2598 void
2599 tcp_drain(void)
2600 {
2601 	VNET_ITERATOR_DECL(vnet_iter);
2602 
2603 	if (!do_tcpdrain)
2604 		return;
2605 
2606 	VNET_LIST_RLOCK_NOSLEEP();
2607 	VNET_FOREACH(vnet_iter) {
2608 		CURVNET_SET(vnet_iter);
2609 		struct inpcb *inpb;
2610 		struct tcpcb *tcpb;
2611 
2612 	/*
2613 	 * Walk the tcpbs, if existing, and flush the reassembly queue,
2614 	 * if there is one...
2615 	 * XXX: The "Net/3" implementation doesn't imply that the TCP
2616 	 *      reassembly queue should be flushed, but in a situation
2617 	 *	where we're really low on mbufs, this is potentially
2618 	 *	useful.
2619 	 */
2620 		INP_INFO_WLOCK(&V_tcbinfo);
2621 		CK_LIST_FOREACH(inpb, V_tcbinfo.ipi_listhead, inp_list) {
2622 			INP_WLOCK(inpb);
2623 			if (inpb->inp_flags & INP_TIMEWAIT) {
2624 				INP_WUNLOCK(inpb);
2625 				continue;
2626 			}
2627 			if ((tcpb = intotcpcb(inpb)) != NULL) {
2628 				tcp_reass_flush(tcpb);
2629 				tcp_clean_sackreport(tcpb);
2630 #ifdef TCP_BLACKBOX
2631 				tcp_log_drain(tcpb);
2632 #endif
2633 #ifdef TCPPCAP
2634 				if (tcp_pcap_aggressive_free) {
2635 					/* Free the TCP PCAP queues. */
2636 					tcp_pcap_drain(&(tcpb->t_inpkts));
2637 					tcp_pcap_drain(&(tcpb->t_outpkts));
2638 				}
2639 #endif
2640 			}
2641 			INP_WUNLOCK(inpb);
2642 		}
2643 		INP_INFO_WUNLOCK(&V_tcbinfo);
2644 		CURVNET_RESTORE();
2645 	}
2646 	VNET_LIST_RUNLOCK_NOSLEEP();
2647 }
2648 
2649 /*
2650  * Notify a tcp user of an asynchronous error;
2651  * store error as soft error, but wake up user
2652  * (for now, won't do anything until can select for soft error).
2653  *
2654  * Do not wake up user since there currently is no mechanism for
2655  * reporting soft errors (yet - a kqueue filter may be added).
2656  */
2657 static struct inpcb *
2658 tcp_notify(struct inpcb *inp, int error)
2659 {
2660 	struct tcpcb *tp;
2661 
2662 	INP_INFO_LOCK_ASSERT(&V_tcbinfo);
2663 	INP_WLOCK_ASSERT(inp);
2664 
2665 	if ((inp->inp_flags & INP_TIMEWAIT) ||
2666 	    (inp->inp_flags & INP_DROPPED))
2667 		return (inp);
2668 
2669 	tp = intotcpcb(inp);
2670 	KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
2671 
2672 	/*
2673 	 * Ignore some errors if we are hooked up.
2674 	 * If connection hasn't completed, has retransmitted several times,
2675 	 * and receives a second error, give up now.  This is better
2676 	 * than waiting a long time to establish a connection that
2677 	 * can never complete.
2678 	 */
2679 	if (tp->t_state == TCPS_ESTABLISHED &&
2680 	    (error == EHOSTUNREACH || error == ENETUNREACH ||
2681 	     error == EHOSTDOWN)) {
2682 		if (inp->inp_route.ro_nh) {
2683 			NH_FREE(inp->inp_route.ro_nh);
2684 			inp->inp_route.ro_nh = (struct nhop_object *)NULL;
2685 		}
2686 		return (inp);
2687 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
2688 	    tp->t_softerror) {
2689 		tp = tcp_drop(tp, error);
2690 		if (tp != NULL)
2691 			return (inp);
2692 		else
2693 			return (NULL);
2694 	} else {
2695 		tp->t_softerror = error;
2696 		return (inp);
2697 	}
2698 #if 0
2699 	wakeup( &so->so_timeo);
2700 	sorwakeup(so);
2701 	sowwakeup(so);
2702 #endif
2703 }
2704 
2705 static int
2706 tcp_pcblist(SYSCTL_HANDLER_ARGS)
2707 {
2708 	struct epoch_tracker et;
2709 	struct inpcb *inp;
2710 	struct xinpgen xig;
2711 	int error;
2712 
2713 	if (req->newptr != NULL)
2714 		return (EPERM);
2715 
2716 	if (req->oldptr == NULL) {
2717 		int n;
2718 
2719 		n = V_tcbinfo.ipi_count +
2720 		    counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2721 		n += imax(n / 8, 10);
2722 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb);
2723 		return (0);
2724 	}
2725 
2726 	if ((error = sysctl_wire_old_buffer(req, 0)) != 0)
2727 		return (error);
2728 
2729 	bzero(&xig, sizeof(xig));
2730 	xig.xig_len = sizeof xig;
2731 	xig.xig_count = V_tcbinfo.ipi_count +
2732 	    counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2733 	xig.xig_gen = V_tcbinfo.ipi_gencnt;
2734 	xig.xig_sogen = so_gencnt;
2735 	error = SYSCTL_OUT(req, &xig, sizeof xig);
2736 	if (error)
2737 		return (error);
2738 
2739 	error = syncache_pcblist(req);
2740 	if (error)
2741 		return (error);
2742 
2743 	NET_EPOCH_ENTER(et);
2744 	for (inp = CK_LIST_FIRST(V_tcbinfo.ipi_listhead);
2745 	    inp != NULL;
2746 	    inp = CK_LIST_NEXT(inp, inp_list)) {
2747 		INP_RLOCK(inp);
2748 		if (inp->inp_gencnt <= xig.xig_gen) {
2749 			int crerr;
2750 
2751 			/*
2752 			 * XXX: This use of cr_cansee(), introduced with
2753 			 * TCP state changes, is not quite right, but for
2754 			 * now, better than nothing.
2755 			 */
2756 			if (inp->inp_flags & INP_TIMEWAIT) {
2757 				if (intotw(inp) != NULL)
2758 					crerr = cr_cansee(req->td->td_ucred,
2759 					    intotw(inp)->tw_cred);
2760 				else
2761 					crerr = EINVAL;	/* Skip this inp. */
2762 			} else
2763 				crerr = cr_canseeinpcb(req->td->td_ucred, inp);
2764 			if (crerr == 0) {
2765 				struct xtcpcb xt;
2766 
2767 				tcp_inptoxtp(inp, &xt);
2768 				INP_RUNLOCK(inp);
2769 				error = SYSCTL_OUT(req, &xt, sizeof xt);
2770 				if (error)
2771 					break;
2772 				else
2773 					continue;
2774 			}
2775 		}
2776 		INP_RUNLOCK(inp);
2777 	}
2778 	NET_EPOCH_EXIT(et);
2779 
2780 	if (!error) {
2781 		/*
2782 		 * Give the user an updated idea of our state.
2783 		 * If the generation differs from what we told
2784 		 * her before, she knows that something happened
2785 		 * while we were processing this request, and it
2786 		 * might be necessary to retry.
2787 		 */
2788 		xig.xig_gen = V_tcbinfo.ipi_gencnt;
2789 		xig.xig_sogen = so_gencnt;
2790 		xig.xig_count = V_tcbinfo.ipi_count +
2791 		    counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2792 		error = SYSCTL_OUT(req, &xig, sizeof xig);
2793 	}
2794 
2795 	return (error);
2796 }
2797 
2798 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
2799     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2800     NULL, 0, tcp_pcblist, "S,xtcpcb",
2801     "List of active TCP connections");
2802 
2803 #ifdef INET
2804 static int
2805 tcp_getcred(SYSCTL_HANDLER_ARGS)
2806 {
2807 	struct xucred xuc;
2808 	struct sockaddr_in addrs[2];
2809 	struct epoch_tracker et;
2810 	struct inpcb *inp;
2811 	int error;
2812 
2813 	error = priv_check(req->td, PRIV_NETINET_GETCRED);
2814 	if (error)
2815 		return (error);
2816 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
2817 	if (error)
2818 		return (error);
2819 	NET_EPOCH_ENTER(et);
2820 	inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
2821 	    addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL);
2822 	NET_EPOCH_EXIT(et);
2823 	if (inp != NULL) {
2824 		if (inp->inp_socket == NULL)
2825 			error = ENOENT;
2826 		if (error == 0)
2827 			error = cr_canseeinpcb(req->td->td_ucred, inp);
2828 		if (error == 0)
2829 			cru2x(inp->inp_cred, &xuc);
2830 		INP_RUNLOCK(inp);
2831 	} else
2832 		error = ENOENT;
2833 	if (error == 0)
2834 		error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
2835 	return (error);
2836 }
2837 
2838 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
2839     CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT,
2840     0, 0, tcp_getcred, "S,xucred",
2841     "Get the xucred of a TCP connection");
2842 #endif /* INET */
2843 
2844 #ifdef INET6
2845 static int
2846 tcp6_getcred(SYSCTL_HANDLER_ARGS)
2847 {
2848 	struct epoch_tracker et;
2849 	struct xucred xuc;
2850 	struct sockaddr_in6 addrs[2];
2851 	struct inpcb *inp;
2852 	int error;
2853 #ifdef INET
2854 	int mapped = 0;
2855 #endif
2856 
2857 	error = priv_check(req->td, PRIV_NETINET_GETCRED);
2858 	if (error)
2859 		return (error);
2860 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
2861 	if (error)
2862 		return (error);
2863 	if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
2864 	    (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
2865 		return (error);
2866 	}
2867 	if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
2868 #ifdef INET
2869 		if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
2870 			mapped = 1;
2871 		else
2872 #endif
2873 			return (EINVAL);
2874 	}
2875 
2876 	NET_EPOCH_ENTER(et);
2877 #ifdef INET
2878 	if (mapped == 1)
2879 		inp = in_pcblookup(&V_tcbinfo,
2880 			*(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
2881 			addrs[1].sin6_port,
2882 			*(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
2883 			addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL);
2884 	else
2885 #endif
2886 		inp = in6_pcblookup(&V_tcbinfo,
2887 			&addrs[1].sin6_addr, addrs[1].sin6_port,
2888 			&addrs[0].sin6_addr, addrs[0].sin6_port,
2889 			INPLOOKUP_RLOCKPCB, NULL);
2890 	NET_EPOCH_EXIT(et);
2891 	if (inp != NULL) {
2892 		if (inp->inp_socket == NULL)
2893 			error = ENOENT;
2894 		if (error == 0)
2895 			error = cr_canseeinpcb(req->td->td_ucred, inp);
2896 		if (error == 0)
2897 			cru2x(inp->inp_cred, &xuc);
2898 		INP_RUNLOCK(inp);
2899 	} else
2900 		error = ENOENT;
2901 	if (error == 0)
2902 		error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
2903 	return (error);
2904 }
2905 
2906 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
2907     CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT,
2908     0, 0, tcp6_getcred, "S,xucred",
2909     "Get the xucred of a TCP6 connection");
2910 #endif /* INET6 */
2911 
2912 #ifdef INET
2913 /* Path MTU to try next when a fragmentation-needed message is received. */
2914 static inline int
2915 tcp_next_pmtu(const struct icmp *icp, const struct ip *ip)
2916 {
2917 	int mtu = ntohs(icp->icmp_nextmtu);
2918 
2919 	/* If no alternative MTU was proposed, try the next smaller one. */
2920 	if (!mtu)
2921 		mtu = ip_next_mtu(ntohs(ip->ip_len), 1);
2922 	if (mtu < V_tcp_minmss + sizeof(struct tcpiphdr))
2923 		mtu = V_tcp_minmss + sizeof(struct tcpiphdr);
2924 
2925 	return (mtu);
2926 }
2927 
2928 static void
2929 tcp_ctlinput_with_port(int cmd, struct sockaddr *sa, void *vip, uint16_t port)
2930 {
2931 	struct ip *ip = vip;
2932 	struct tcphdr *th;
2933 	struct in_addr faddr;
2934 	struct inpcb *inp;
2935 	struct tcpcb *tp;
2936 	struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
2937 	struct icmp *icp;
2938 	struct in_conninfo inc;
2939 	tcp_seq icmp_tcp_seq;
2940 	int mtu;
2941 
2942 	faddr = ((struct sockaddr_in *)sa)->sin_addr;
2943 	if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
2944 		return;
2945 
2946 	if (cmd == PRC_MSGSIZE)
2947 		notify = tcp_mtudisc_notify;
2948 	else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2949 		cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2950 		cmd == PRC_TIMXCEED_INTRANS) && ip)
2951 		notify = tcp_drop_syn_sent;
2952 
2953 	/*
2954 	 * Hostdead is ugly because it goes linearly through all PCBs.
2955 	 * XXX: We never get this from ICMP, otherwise it makes an
2956 	 * excellent DoS attack on machines with many connections.
2957 	 */
2958 	else if (cmd == PRC_HOSTDEAD)
2959 		ip = NULL;
2960 	else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
2961 		return;
2962 
2963 	if (ip == NULL) {
2964 		in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify);
2965 		return;
2966 	}
2967 
2968 	icp = (struct icmp *)((caddr_t)ip - offsetof(struct icmp, icmp_ip));
2969 	th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2970 	inp = in_pcblookup(&V_tcbinfo, faddr, th->th_dport, ip->ip_src,
2971 	    th->th_sport, INPLOOKUP_WLOCKPCB, NULL);
2972 	if (inp != NULL && PRC_IS_REDIRECT(cmd)) {
2973 		/* signal EHOSTDOWN, as it flushes the cached route */
2974 		inp = (*notify)(inp, EHOSTDOWN);
2975 		goto out;
2976 	}
2977 	icmp_tcp_seq = th->th_seq;
2978 	if (inp != NULL)  {
2979 		if (!(inp->inp_flags & INP_TIMEWAIT) &&
2980 		    !(inp->inp_flags & INP_DROPPED) &&
2981 		    !(inp->inp_socket == NULL)) {
2982 			tp = intotcpcb(inp);
2983 #ifdef TCP_OFFLOAD
2984 			if (tp->t_flags & TF_TOE && cmd == PRC_MSGSIZE) {
2985 				/*
2986 				 * MTU discovery for offloaded connections.  Let
2987 				 * the TOE driver verify seq# and process it.
2988 				 */
2989 				mtu = tcp_next_pmtu(icp, ip);
2990 				tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu);
2991 				goto out;
2992 			}
2993 #endif
2994 			if (tp->t_port != port) {
2995 				goto out;
2996 			}
2997 			if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
2998 			    SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
2999 				if (cmd == PRC_MSGSIZE) {
3000 					/*
3001 					 * MTU discovery: we got a needfrag and
3002 					 * will potentially try a lower MTU.
3003 					 */
3004 					mtu = tcp_next_pmtu(icp, ip);
3005 
3006 					/*
3007 					 * Only process the offered MTU if it
3008 					 * is smaller than the current one.
3009 					 */
3010 					if (mtu < tp->t_maxseg +
3011 					    sizeof(struct tcpiphdr)) {
3012 						bzero(&inc, sizeof(inc));
3013 						inc.inc_faddr = faddr;
3014 						inc.inc_fibnum =
3015 						    inp->inp_inc.inc_fibnum;
3016 						tcp_hc_updatemtu(&inc, mtu);
3017 						tcp_mtudisc(inp, mtu);
3018 					}
3019 				} else
3020 					inp = (*notify)(inp,
3021 					    inetctlerrmap[cmd]);
3022 			}
3023 		}
3024 	} else {
3025 		bzero(&inc, sizeof(inc));
3026 		inc.inc_fport = th->th_dport;
3027 		inc.inc_lport = th->th_sport;
3028 		inc.inc_faddr = faddr;
3029 		inc.inc_laddr = ip->ip_src;
3030 		syncache_unreach(&inc, icmp_tcp_seq, port);
3031 	}
3032 out:
3033 	if (inp != NULL)
3034 		INP_WUNLOCK(inp);
3035 }
3036 
3037 void
3038 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
3039 {
3040 	tcp_ctlinput_with_port(cmd, sa, vip, htons(0));
3041 }
3042 
3043 void
3044 tcp_ctlinput_viaudp(int cmd, struct sockaddr *sa, void *vip, void *unused)
3045 {
3046 	/* Its a tunneled TCP over UDP icmp */
3047 	struct ip *outer_ip, *inner_ip;
3048 	struct icmp *icmp;
3049 	struct udphdr *udp;
3050 	struct tcphdr *th, ttemp;
3051 	int i_hlen, o_len;
3052 	uint16_t port;
3053 
3054 	inner_ip = (struct ip *)vip;
3055 	icmp = (struct icmp *)((caddr_t)inner_ip -
3056 	    (sizeof(struct icmp) - sizeof(struct ip)));
3057 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
3058 	i_hlen = inner_ip->ip_hl << 2;
3059 	o_len = ntohs(outer_ip->ip_len);
3060 	if (o_len <
3061 	    (sizeof(struct ip) + 8 + i_hlen + sizeof(struct udphdr) + offsetof(struct tcphdr, th_ack))) {
3062 		/* Not enough data present */
3063 		return;
3064 	}
3065 	/* Ok lets strip out the inner udphdr header by copying up on top of it the tcp hdr */
3066 	udp = (struct udphdr *)(((caddr_t)inner_ip) + i_hlen);
3067 	if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) {
3068 		return;
3069 	}
3070 	port = udp->uh_dport;
3071 	th = (struct tcphdr *)(udp + 1);
3072 	memcpy(&ttemp, th, sizeof(struct tcphdr));
3073 	memcpy(udp, &ttemp, sizeof(struct tcphdr));
3074 	/* Now adjust down the size of the outer IP header */
3075 	o_len -= sizeof(struct udphdr);
3076 	outer_ip->ip_len = htons(o_len);
3077 	/* Now call in to the normal handling code */
3078 	tcp_ctlinput_with_port(cmd, sa, vip, port);
3079 }
3080 #endif /* INET */
3081 
3082 #ifdef INET6
3083 static inline int
3084 tcp6_next_pmtu(const struct icmp6_hdr *icmp6)
3085 {
3086 	int mtu = ntohl(icmp6->icmp6_mtu);
3087 
3088 	/*
3089 	 * If no alternative MTU was proposed, or the proposed MTU was too
3090 	 * small, set to the min.
3091 	 */
3092 	if (mtu < IPV6_MMTU)
3093 		mtu = IPV6_MMTU - 8;	/* XXXNP: what is the adjustment for? */
3094 	return (mtu);
3095 }
3096 
3097 static void
3098 tcp6_ctlinput_with_port(int cmd, struct sockaddr *sa, void *d, uint16_t port)
3099 {
3100 	struct in6_addr *dst;
3101 	struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
3102 	struct ip6_hdr *ip6;
3103 	struct mbuf *m;
3104 	struct inpcb *inp;
3105 	struct tcpcb *tp;
3106 	struct icmp6_hdr *icmp6;
3107 	struct ip6ctlparam *ip6cp = NULL;
3108 	const struct sockaddr_in6 *sa6_src = NULL;
3109 	struct in_conninfo inc;
3110 	struct tcp_ports {
3111 		uint16_t th_sport;
3112 		uint16_t th_dport;
3113 	} t_ports;
3114 	tcp_seq icmp_tcp_seq;
3115 	unsigned int mtu;
3116 	unsigned int off;
3117 
3118 	if (sa->sa_family != AF_INET6 ||
3119 	    sa->sa_len != sizeof(struct sockaddr_in6))
3120 		return;
3121 
3122 	/* if the parameter is from icmp6, decode it. */
3123 	if (d != NULL) {
3124 		ip6cp = (struct ip6ctlparam *)d;
3125 		icmp6 = ip6cp->ip6c_icmp6;
3126 		m = ip6cp->ip6c_m;
3127 		ip6 = ip6cp->ip6c_ip6;
3128 		off = ip6cp->ip6c_off;
3129 		sa6_src = ip6cp->ip6c_src;
3130 		dst = ip6cp->ip6c_finaldst;
3131 	} else {
3132 		m = NULL;
3133 		ip6 = NULL;
3134 		off = 0;	/* fool gcc */
3135 		sa6_src = &sa6_any;
3136 		dst = NULL;
3137 	}
3138 
3139 	if (cmd == PRC_MSGSIZE)
3140 		notify = tcp_mtudisc_notify;
3141 	else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
3142 		cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
3143 		cmd == PRC_TIMXCEED_INTRANS) && ip6 != NULL)
3144 		notify = tcp_drop_syn_sent;
3145 
3146 	/*
3147 	 * Hostdead is ugly because it goes linearly through all PCBs.
3148 	 * XXX: We never get this from ICMP, otherwise it makes an
3149 	 * excellent DoS attack on machines with many connections.
3150 	 */
3151 	else if (cmd == PRC_HOSTDEAD)
3152 		ip6 = NULL;
3153 	else if ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0)
3154 		return;
3155 
3156 	if (ip6 == NULL) {
3157 		in6_pcbnotify(&V_tcbinfo, sa, 0,
3158 			      (const struct sockaddr *)sa6_src,
3159 			      0, cmd, NULL, notify);
3160 		return;
3161 	}
3162 
3163 	/* Check if we can safely get the ports from the tcp hdr */
3164 	if (m == NULL ||
3165 	    (m->m_pkthdr.len <
3166 		(int32_t) (off + sizeof(struct tcp_ports)))) {
3167 		return;
3168 	}
3169 	bzero(&t_ports, sizeof(struct tcp_ports));
3170 	m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
3171 	inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_dst, t_ports.th_dport,
3172 	    &ip6->ip6_src, t_ports.th_sport, INPLOOKUP_WLOCKPCB, NULL);
3173 	if (inp != NULL && PRC_IS_REDIRECT(cmd)) {
3174 		/* signal EHOSTDOWN, as it flushes the cached route */
3175 		inp = (*notify)(inp, EHOSTDOWN);
3176 		goto out;
3177 	}
3178 	off += sizeof(struct tcp_ports);
3179 	if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
3180 		goto out;
3181 	}
3182 	m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
3183 	if (inp != NULL)  {
3184 		if (!(inp->inp_flags & INP_TIMEWAIT) &&
3185 		    !(inp->inp_flags & INP_DROPPED) &&
3186 		    !(inp->inp_socket == NULL)) {
3187 			tp = intotcpcb(inp);
3188 #ifdef TCP_OFFLOAD
3189 			if (tp->t_flags & TF_TOE && cmd == PRC_MSGSIZE) {
3190 				/* MTU discovery for offloaded connections. */
3191 				mtu = tcp6_next_pmtu(icmp6);
3192 				tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu);
3193 				goto out;
3194 			}
3195 #endif
3196 			if (tp->t_port != port) {
3197 				goto out;
3198 			}
3199 			if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
3200 			    SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
3201 				if (cmd == PRC_MSGSIZE) {
3202 					/*
3203 					 * MTU discovery:
3204 					 * If we got a needfrag set the MTU
3205 					 * in the route to the suggested new
3206 					 * value (if given) and then notify.
3207 					 */
3208 					mtu = tcp6_next_pmtu(icmp6);
3209 
3210 					bzero(&inc, sizeof(inc));
3211 					inc.inc_fibnum = M_GETFIB(m);
3212 					inc.inc_flags |= INC_ISIPV6;
3213 					inc.inc6_faddr = *dst;
3214 					if (in6_setscope(&inc.inc6_faddr,
3215 						m->m_pkthdr.rcvif, NULL))
3216 						goto out;
3217 					/*
3218 					 * Only process the offered MTU if it
3219 					 * is smaller than the current one.
3220 					 */
3221 					if (mtu < tp->t_maxseg +
3222 					    sizeof (struct tcphdr) +
3223 					    sizeof (struct ip6_hdr)) {
3224 						tcp_hc_updatemtu(&inc, mtu);
3225 						tcp_mtudisc(inp, mtu);
3226 						ICMP6STAT_INC(icp6s_pmtuchg);
3227 					}
3228 				} else
3229 					inp = (*notify)(inp,
3230 					    inet6ctlerrmap[cmd]);
3231 			}
3232 		}
3233 	} else {
3234 		bzero(&inc, sizeof(inc));
3235 		inc.inc_fibnum = M_GETFIB(m);
3236 		inc.inc_flags |= INC_ISIPV6;
3237 		inc.inc_fport = t_ports.th_dport;
3238 		inc.inc_lport = t_ports.th_sport;
3239 		inc.inc6_faddr = *dst;
3240 		inc.inc6_laddr = ip6->ip6_src;
3241 		syncache_unreach(&inc, icmp_tcp_seq, port);
3242 	}
3243 out:
3244 	if (inp != NULL)
3245 		INP_WUNLOCK(inp);
3246 }
3247 
3248 void
3249 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
3250 {
3251 	tcp6_ctlinput_with_port(cmd, sa, d, htons(0));
3252 }
3253 
3254 void
3255 tcp6_ctlinput_viaudp(int cmd, struct sockaddr *sa, void *d, void *unused)
3256 {
3257 	struct ip6ctlparam *ip6cp;
3258 	struct mbuf *m;
3259 	struct udphdr *udp;
3260 	uint16_t port;
3261 
3262 	ip6cp = (struct ip6ctlparam *)d;
3263 	m = m_pulldown(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(struct udphdr), NULL);
3264 	if (m == NULL) {
3265 		return;
3266 	}
3267 	udp = mtod(m, struct udphdr *);
3268 	if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) {
3269 		return;
3270 	}
3271 	port = udp->uh_dport;
3272 	m_adj(m, sizeof(struct udphdr));
3273 	if ((m->m_flags & M_PKTHDR) == 0) {
3274 		ip6cp->ip6c_m->m_pkthdr.len -= sizeof(struct udphdr);
3275 	}
3276 	/* Now call in to the normal handling code */
3277 	tcp6_ctlinput_with_port(cmd, sa, d, port);
3278 }
3279 
3280 #endif /* INET6 */
3281 
3282 static uint32_t
3283 tcp_keyed_hash(struct in_conninfo *inc, u_char *key, u_int len)
3284 {
3285 	SIPHASH_CTX ctx;
3286 	uint32_t hash[2];
3287 
3288 	KASSERT(len >= SIPHASH_KEY_LENGTH,
3289 	    ("%s: keylen %u too short ", __func__, len));
3290 	SipHash24_Init(&ctx);
3291 	SipHash_SetKey(&ctx, (uint8_t *)key);
3292 	SipHash_Update(&ctx, &inc->inc_fport, sizeof(uint16_t));
3293 	SipHash_Update(&ctx, &inc->inc_lport, sizeof(uint16_t));
3294 	switch (inc->inc_flags & INC_ISIPV6) {
3295 #ifdef INET
3296 	case 0:
3297 		SipHash_Update(&ctx, &inc->inc_faddr, sizeof(struct in_addr));
3298 		SipHash_Update(&ctx, &inc->inc_laddr, sizeof(struct in_addr));
3299 		break;
3300 #endif
3301 #ifdef INET6
3302 	case INC_ISIPV6:
3303 		SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(struct in6_addr));
3304 		SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(struct in6_addr));
3305 		break;
3306 #endif
3307 	}
3308 	SipHash_Final((uint8_t *)hash, &ctx);
3309 
3310 	return (hash[0] ^ hash[1]);
3311 }
3312 
3313 uint32_t
3314 tcp_new_ts_offset(struct in_conninfo *inc)
3315 {
3316 	struct in_conninfo inc_store, *local_inc;
3317 
3318 	if (!V_tcp_ts_offset_per_conn) {
3319 		memcpy(&inc_store, inc, sizeof(struct in_conninfo));
3320 		inc_store.inc_lport = 0;
3321 		inc_store.inc_fport = 0;
3322 		local_inc = &inc_store;
3323 	} else {
3324 		local_inc = inc;
3325 	}
3326 	return (tcp_keyed_hash(local_inc, V_ts_offset_secret,
3327 	    sizeof(V_ts_offset_secret)));
3328 }
3329 
3330 /*
3331  * Following is where TCP initial sequence number generation occurs.
3332  *
3333  * There are two places where we must use initial sequence numbers:
3334  * 1.  In SYN-ACK packets.
3335  * 2.  In SYN packets.
3336  *
3337  * All ISNs for SYN-ACK packets are generated by the syncache.  See
3338  * tcp_syncache.c for details.
3339  *
3340  * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
3341  * depends on this property.  In addition, these ISNs should be
3342  * unguessable so as to prevent connection hijacking.  To satisfy
3343  * the requirements of this situation, the algorithm outlined in
3344  * RFC 1948 is used, with only small modifications.
3345  *
3346  * Implementation details:
3347  *
3348  * Time is based off the system timer, and is corrected so that it
3349  * increases by one megabyte per second.  This allows for proper
3350  * recycling on high speed LANs while still leaving over an hour
3351  * before rollover.
3352  *
3353  * As reading the *exact* system time is too expensive to be done
3354  * whenever setting up a TCP connection, we increment the time
3355  * offset in two ways.  First, a small random positive increment
3356  * is added to isn_offset for each connection that is set up.
3357  * Second, the function tcp_isn_tick fires once per clock tick
3358  * and increments isn_offset as necessary so that sequence numbers
3359  * are incremented at approximately ISN_BYTES_PER_SECOND.  The
3360  * random positive increments serve only to ensure that the same
3361  * exact sequence number is never sent out twice (as could otherwise
3362  * happen when a port is recycled in less than the system tick
3363  * interval.)
3364  *
3365  * net.inet.tcp.isn_reseed_interval controls the number of seconds
3366  * between seeding of isn_secret.  This is normally set to zero,
3367  * as reseeding should not be necessary.
3368  *
3369  * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
3370  * isn_offset_old, and isn_ctx is performed using the ISN lock.  In
3371  * general, this means holding an exclusive (write) lock.
3372  */
3373 
3374 #define ISN_BYTES_PER_SECOND 1048576
3375 #define ISN_STATIC_INCREMENT 4096
3376 #define ISN_RANDOM_INCREMENT (4096 - 1)
3377 #define ISN_SECRET_LENGTH    SIPHASH_KEY_LENGTH
3378 
3379 VNET_DEFINE_STATIC(u_char, isn_secret[ISN_SECRET_LENGTH]);
3380 VNET_DEFINE_STATIC(int, isn_last);
3381 VNET_DEFINE_STATIC(int, isn_last_reseed);
3382 VNET_DEFINE_STATIC(u_int32_t, isn_offset);
3383 VNET_DEFINE_STATIC(u_int32_t, isn_offset_old);
3384 
3385 #define	V_isn_secret			VNET(isn_secret)
3386 #define	V_isn_last			VNET(isn_last)
3387 #define	V_isn_last_reseed		VNET(isn_last_reseed)
3388 #define	V_isn_offset			VNET(isn_offset)
3389 #define	V_isn_offset_old		VNET(isn_offset_old)
3390 
3391 tcp_seq
3392 tcp_new_isn(struct in_conninfo *inc)
3393 {
3394 	tcp_seq new_isn;
3395 	u_int32_t projected_offset;
3396 
3397 	ISN_LOCK();
3398 	/* Seed if this is the first use, reseed if requested. */
3399 	if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
3400 	     (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
3401 		< (u_int)ticks))) {
3402 		arc4rand(&V_isn_secret, sizeof(V_isn_secret), 0);
3403 		V_isn_last_reseed = ticks;
3404 	}
3405 
3406 	/* Compute the hash and return the ISN. */
3407 	new_isn = (tcp_seq)tcp_keyed_hash(inc, V_isn_secret,
3408 	    sizeof(V_isn_secret));
3409 	V_isn_offset += ISN_STATIC_INCREMENT +
3410 		(arc4random() & ISN_RANDOM_INCREMENT);
3411 	if (ticks != V_isn_last) {
3412 		projected_offset = V_isn_offset_old +
3413 		    ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last);
3414 		if (SEQ_GT(projected_offset, V_isn_offset))
3415 			V_isn_offset = projected_offset;
3416 		V_isn_offset_old = V_isn_offset;
3417 		V_isn_last = ticks;
3418 	}
3419 	new_isn += V_isn_offset;
3420 	ISN_UNLOCK();
3421 	return (new_isn);
3422 }
3423 
3424 /*
3425  * When a specific ICMP unreachable message is received and the
3426  * connection state is SYN-SENT, drop the connection.  This behavior
3427  * is controlled by the icmp_may_rst sysctl.
3428  */
3429 struct inpcb *
3430 tcp_drop_syn_sent(struct inpcb *inp, int errno)
3431 {
3432 	struct tcpcb *tp;
3433 
3434 	NET_EPOCH_ASSERT();
3435 	INP_WLOCK_ASSERT(inp);
3436 
3437 	if ((inp->inp_flags & INP_TIMEWAIT) ||
3438 	    (inp->inp_flags & INP_DROPPED))
3439 		return (inp);
3440 
3441 	tp = intotcpcb(inp);
3442 	if (tp->t_state != TCPS_SYN_SENT)
3443 		return (inp);
3444 
3445 	if (IS_FASTOPEN(tp->t_flags))
3446 		tcp_fastopen_disable_path(tp);
3447 
3448 	tp = tcp_drop(tp, errno);
3449 	if (tp != NULL)
3450 		return (inp);
3451 	else
3452 		return (NULL);
3453 }
3454 
3455 /*
3456  * When `need fragmentation' ICMP is received, update our idea of the MSS
3457  * based on the new value. Also nudge TCP to send something, since we
3458  * know the packet we just sent was dropped.
3459  * This duplicates some code in the tcp_mss() function in tcp_input.c.
3460  */
3461 static struct inpcb *
3462 tcp_mtudisc_notify(struct inpcb *inp, int error)
3463 {
3464 
3465 	tcp_mtudisc(inp, -1);
3466 	return (inp);
3467 }
3468 
3469 static void
3470 tcp_mtudisc(struct inpcb *inp, int mtuoffer)
3471 {
3472 	struct tcpcb *tp;
3473 	struct socket *so;
3474 
3475 	INP_WLOCK_ASSERT(inp);
3476 	if ((inp->inp_flags & INP_TIMEWAIT) ||
3477 	    (inp->inp_flags & INP_DROPPED))
3478 		return;
3479 
3480 	tp = intotcpcb(inp);
3481 	KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
3482 
3483 	tcp_mss_update(tp, -1, mtuoffer, NULL, NULL);
3484 
3485 	so = inp->inp_socket;
3486 	SOCKBUF_LOCK(&so->so_snd);
3487 	/* If the mss is larger than the socket buffer, decrease the mss. */
3488 	if (so->so_snd.sb_hiwat < tp->t_maxseg)
3489 		tp->t_maxseg = so->so_snd.sb_hiwat;
3490 	SOCKBUF_UNLOCK(&so->so_snd);
3491 
3492 	TCPSTAT_INC(tcps_mturesent);
3493 	tp->t_rtttime = 0;
3494 	tp->snd_nxt = tp->snd_una;
3495 	tcp_free_sackholes(tp);
3496 	tp->snd_recover = tp->snd_max;
3497 	if (tp->t_flags & TF_SACK_PERMIT)
3498 		EXIT_FASTRECOVERY(tp->t_flags);
3499 	if (tp->t_fb->tfb_tcp_mtu_chg != NULL) {
3500 		/*
3501 		 * Conceptually the snd_nxt setting
3502 		 * and freeing sack holes should
3503 		 * be done by the default stacks
3504 		 * own tfb_tcp_mtu_chg().
3505 		 */
3506 		tp->t_fb->tfb_tcp_mtu_chg(tp);
3507 	}
3508 	tp->t_fb->tfb_tcp_output(tp);
3509 }
3510 
3511 #ifdef INET
3512 /*
3513  * Look-up the routing entry to the peer of this inpcb.  If no route
3514  * is found and it cannot be allocated, then return 0.  This routine
3515  * is called by TCP routines that access the rmx structure and by
3516  * tcp_mss_update to get the peer/interface MTU.
3517  */
3518 uint32_t
3519 tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap)
3520 {
3521 	struct nhop_object *nh;
3522 	struct ifnet *ifp;
3523 	uint32_t maxmtu = 0;
3524 
3525 	KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
3526 
3527 	if (inc->inc_faddr.s_addr != INADDR_ANY) {
3528 		nh = fib4_lookup(inc->inc_fibnum, inc->inc_faddr, 0, NHR_NONE, 0);
3529 		if (nh == NULL)
3530 			return (0);
3531 
3532 		ifp = nh->nh_ifp;
3533 		maxmtu = nh->nh_mtu;
3534 
3535 		/* Report additional interface capabilities. */
3536 		if (cap != NULL) {
3537 			if (ifp->if_capenable & IFCAP_TSO4 &&
3538 			    ifp->if_hwassist & CSUM_TSO) {
3539 				cap->ifcap |= CSUM_TSO;
3540 				cap->tsomax = ifp->if_hw_tsomax;
3541 				cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
3542 				cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
3543 			}
3544 		}
3545 	}
3546 	return (maxmtu);
3547 }
3548 #endif /* INET */
3549 
3550 #ifdef INET6
3551 uint32_t
3552 tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap)
3553 {
3554 	struct nhop_object *nh;
3555 	struct in6_addr dst6;
3556 	uint32_t scopeid;
3557 	struct ifnet *ifp;
3558 	uint32_t maxmtu = 0;
3559 
3560 	KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
3561 
3562 	if (inc->inc_flags & INC_IPV6MINMTU)
3563 		return (IPV6_MMTU);
3564 
3565 	if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
3566 		in6_splitscope(&inc->inc6_faddr, &dst6, &scopeid);
3567 		nh = fib6_lookup(inc->inc_fibnum, &dst6, scopeid, NHR_NONE, 0);
3568 		if (nh == NULL)
3569 			return (0);
3570 
3571 		ifp = nh->nh_ifp;
3572 		maxmtu = nh->nh_mtu;
3573 
3574 		/* Report additional interface capabilities. */
3575 		if (cap != NULL) {
3576 			if (ifp->if_capenable & IFCAP_TSO6 &&
3577 			    ifp->if_hwassist & CSUM_TSO) {
3578 				cap->ifcap |= CSUM_TSO;
3579 				cap->tsomax = ifp->if_hw_tsomax;
3580 				cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
3581 				cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
3582 			}
3583 		}
3584 	}
3585 
3586 	return (maxmtu);
3587 }
3588 
3589 /*
3590  * Handle setsockopt(IPV6_USE_MIN_MTU) by a TCP stack.
3591  *
3592  * XXXGL: we are updating inpcb here with INC_IPV6MINMTU flag.
3593  * The right place to do that is ip6_setpktopt() that has just been
3594  * executed.  By the way it just filled ip6po_minmtu for us.
3595  */
3596 void
3597 tcp6_use_min_mtu(struct tcpcb *tp)
3598 {
3599 	struct inpcb *inp = tp->t_inpcb;
3600 
3601 	INP_WLOCK_ASSERT(inp);
3602 	/*
3603 	 * In case of the IPV6_USE_MIN_MTU socket
3604 	 * option, the INC_IPV6MINMTU flag to announce
3605 	 * a corresponding MSS during the initial
3606 	 * handshake.  If the TCP connection is not in
3607 	 * the front states, just reduce the MSS being
3608 	 * used.  This avoids the sending of TCP
3609 	 * segments which will be fragmented at the
3610 	 * IPv6 layer.
3611 	 */
3612 	inp->inp_inc.inc_flags |= INC_IPV6MINMTU;
3613 	if ((tp->t_state >= TCPS_SYN_SENT) &&
3614 	    (inp->inp_inc.inc_flags & INC_ISIPV6)) {
3615 		struct ip6_pktopts *opt;
3616 
3617 		opt = inp->in6p_outputopts;
3618 		if (opt != NULL && opt->ip6po_minmtu == IP6PO_MINMTU_ALL &&
3619 		    tp->t_maxseg > TCP6_MSS)
3620 			tp->t_maxseg = TCP6_MSS;
3621 	}
3622 }
3623 #endif /* INET6 */
3624 
3625 /*
3626  * Calculate effective SMSS per RFC5681 definition for a given TCP
3627  * connection at its current state, taking into account SACK and etc.
3628  */
3629 u_int
3630 tcp_maxseg(const struct tcpcb *tp)
3631 {
3632 	u_int optlen;
3633 
3634 	if (tp->t_flags & TF_NOOPT)
3635 		return (tp->t_maxseg);
3636 
3637 	/*
3638 	 * Here we have a simplified code from tcp_addoptions(),
3639 	 * without a proper loop, and having most of paddings hardcoded.
3640 	 * We might make mistakes with padding here in some edge cases,
3641 	 * but this is harmless, since result of tcp_maxseg() is used
3642 	 * only in cwnd and ssthresh estimations.
3643 	 */
3644 	if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3645 		if (tp->t_flags & TF_RCVD_TSTMP)
3646 			optlen = TCPOLEN_TSTAMP_APPA;
3647 		else
3648 			optlen = 0;
3649 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3650 		if (tp->t_flags & TF_SIGNATURE)
3651 			optlen += PADTCPOLEN(TCPOLEN_SIGNATURE);
3652 #endif
3653 		if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0) {
3654 			optlen += TCPOLEN_SACKHDR;
3655 			optlen += tp->rcv_numsacks * TCPOLEN_SACK;
3656 			optlen = PADTCPOLEN(optlen);
3657 		}
3658 	} else {
3659 		if (tp->t_flags & TF_REQ_TSTMP)
3660 			optlen = TCPOLEN_TSTAMP_APPA;
3661 		else
3662 			optlen = PADTCPOLEN(TCPOLEN_MAXSEG);
3663 		if (tp->t_flags & TF_REQ_SCALE)
3664 			optlen += PADTCPOLEN(TCPOLEN_WINDOW);
3665 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3666 		if (tp->t_flags & TF_SIGNATURE)
3667 			optlen += PADTCPOLEN(TCPOLEN_SIGNATURE);
3668 #endif
3669 		if (tp->t_flags & TF_SACK_PERMIT)
3670 			optlen += PADTCPOLEN(TCPOLEN_SACK_PERMITTED);
3671 	}
3672 #undef PAD
3673 	optlen = min(optlen, TCP_MAXOLEN);
3674 	return (tp->t_maxseg - optlen);
3675 }
3676 
3677 
3678 u_int
3679 tcp_fixed_maxseg(const struct tcpcb *tp)
3680 {
3681 	int optlen;
3682 
3683 	if (tp->t_flags & TF_NOOPT)
3684 		return (tp->t_maxseg);
3685 
3686 	/*
3687 	 * Here we have a simplified code from tcp_addoptions(),
3688 	 * without a proper loop, and having most of paddings hardcoded.
3689 	 * We only consider fixed options that we would send every
3690 	 * time I.e. SACK is not considered. This is important
3691 	 * for cc modules to figure out what the modulo of the
3692 	 * cwnd should be.
3693 	 */
3694 #define	PAD(len)	((((len) / 4) + !!((len) % 4)) * 4)
3695 	if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3696 		if (tp->t_flags & TF_RCVD_TSTMP)
3697 			optlen = TCPOLEN_TSTAMP_APPA;
3698 		else
3699 			optlen = 0;
3700 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3701 		if (tp->t_flags & TF_SIGNATURE)
3702 			optlen += PAD(TCPOLEN_SIGNATURE);
3703 #endif
3704 	} else {
3705 		if (tp->t_flags & TF_REQ_TSTMP)
3706 			optlen = TCPOLEN_TSTAMP_APPA;
3707 		else
3708 			optlen = PAD(TCPOLEN_MAXSEG);
3709 		if (tp->t_flags & TF_REQ_SCALE)
3710 			optlen += PAD(TCPOLEN_WINDOW);
3711 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3712 		if (tp->t_flags & TF_SIGNATURE)
3713 			optlen += PAD(TCPOLEN_SIGNATURE);
3714 #endif
3715 		if (tp->t_flags & TF_SACK_PERMIT)
3716 			optlen += PAD(TCPOLEN_SACK_PERMITTED);
3717 	}
3718 #undef PAD
3719 	optlen = min(optlen, TCP_MAXOLEN);
3720 	return (tp->t_maxseg - optlen);
3721 }
3722 
3723 
3724 
3725 static int
3726 sysctl_drop(SYSCTL_HANDLER_ARGS)
3727 {
3728 	/* addrs[0] is a foreign socket, addrs[1] is a local one. */
3729 	struct sockaddr_storage addrs[2];
3730 	struct inpcb *inp;
3731 	struct tcpcb *tp;
3732 	struct tcptw *tw;
3733 	struct sockaddr_in *fin, *lin;
3734 	struct epoch_tracker et;
3735 #ifdef INET6
3736 	struct sockaddr_in6 *fin6, *lin6;
3737 #endif
3738 	int error;
3739 
3740 	inp = NULL;
3741 	fin = lin = NULL;
3742 #ifdef INET6
3743 	fin6 = lin6 = NULL;
3744 #endif
3745 	error = 0;
3746 
3747 	if (req->oldptr != NULL || req->oldlen != 0)
3748 		return (EINVAL);
3749 	if (req->newptr == NULL)
3750 		return (EPERM);
3751 	if (req->newlen < sizeof(addrs))
3752 		return (ENOMEM);
3753 	error = SYSCTL_IN(req, &addrs, sizeof(addrs));
3754 	if (error)
3755 		return (error);
3756 
3757 	switch (addrs[0].ss_family) {
3758 #ifdef INET6
3759 	case AF_INET6:
3760 		fin6 = (struct sockaddr_in6 *)&addrs[0];
3761 		lin6 = (struct sockaddr_in6 *)&addrs[1];
3762 		if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
3763 		    lin6->sin6_len != sizeof(struct sockaddr_in6))
3764 			return (EINVAL);
3765 		if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
3766 			if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
3767 				return (EINVAL);
3768 			in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
3769 			in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
3770 			fin = (struct sockaddr_in *)&addrs[0];
3771 			lin = (struct sockaddr_in *)&addrs[1];
3772 			break;
3773 		}
3774 		error = sa6_embedscope(fin6, V_ip6_use_defzone);
3775 		if (error)
3776 			return (error);
3777 		error = sa6_embedscope(lin6, V_ip6_use_defzone);
3778 		if (error)
3779 			return (error);
3780 		break;
3781 #endif
3782 #ifdef INET
3783 	case AF_INET:
3784 		fin = (struct sockaddr_in *)&addrs[0];
3785 		lin = (struct sockaddr_in *)&addrs[1];
3786 		if (fin->sin_len != sizeof(struct sockaddr_in) ||
3787 		    lin->sin_len != sizeof(struct sockaddr_in))
3788 			return (EINVAL);
3789 		break;
3790 #endif
3791 	default:
3792 		return (EINVAL);
3793 	}
3794 	NET_EPOCH_ENTER(et);
3795 	switch (addrs[0].ss_family) {
3796 #ifdef INET6
3797 	case AF_INET6:
3798 		inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
3799 		    fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
3800 		    INPLOOKUP_WLOCKPCB, NULL);
3801 		break;
3802 #endif
3803 #ifdef INET
3804 	case AF_INET:
3805 		inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
3806 		    lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
3807 		break;
3808 #endif
3809 	}
3810 	if (inp != NULL) {
3811 		if (inp->inp_flags & INP_TIMEWAIT) {
3812 			/*
3813 			 * XXXRW: There currently exists a state where an
3814 			 * inpcb is present, but its timewait state has been
3815 			 * discarded.  For now, don't allow dropping of this
3816 			 * type of inpcb.
3817 			 */
3818 			tw = intotw(inp);
3819 			if (tw != NULL)
3820 				tcp_twclose(tw, 0);
3821 			else
3822 				INP_WUNLOCK(inp);
3823 		} else if ((inp->inp_flags & INP_DROPPED) == 0 &&
3824 		    !SOLISTENING(inp->inp_socket)) {
3825 			tp = intotcpcb(inp);
3826 			tp = tcp_drop(tp, ECONNABORTED);
3827 			if (tp != NULL)
3828 				INP_WUNLOCK(inp);
3829 		} else
3830 			INP_WUNLOCK(inp);
3831 	} else
3832 		error = ESRCH;
3833 	NET_EPOCH_EXIT(et);
3834 	return (error);
3835 }
3836 
3837 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
3838     CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3839     CTLFLAG_NEEDGIANT, NULL, 0, sysctl_drop, "",
3840     "Drop TCP connection");
3841 
3842 #ifdef KERN_TLS
3843 static int
3844 sysctl_switch_tls(SYSCTL_HANDLER_ARGS)
3845 {
3846 	/* addrs[0] is a foreign socket, addrs[1] is a local one. */
3847 	struct sockaddr_storage addrs[2];
3848 	struct inpcb *inp;
3849 	struct sockaddr_in *fin, *lin;
3850 	struct epoch_tracker et;
3851 #ifdef INET6
3852 	struct sockaddr_in6 *fin6, *lin6;
3853 #endif
3854 	int error;
3855 
3856 	inp = NULL;
3857 	fin = lin = NULL;
3858 #ifdef INET6
3859 	fin6 = lin6 = NULL;
3860 #endif
3861 	error = 0;
3862 
3863 	if (req->oldptr != NULL || req->oldlen != 0)
3864 		return (EINVAL);
3865 	if (req->newptr == NULL)
3866 		return (EPERM);
3867 	if (req->newlen < sizeof(addrs))
3868 		return (ENOMEM);
3869 	error = SYSCTL_IN(req, &addrs, sizeof(addrs));
3870 	if (error)
3871 		return (error);
3872 
3873 	switch (addrs[0].ss_family) {
3874 #ifdef INET6
3875 	case AF_INET6:
3876 		fin6 = (struct sockaddr_in6 *)&addrs[0];
3877 		lin6 = (struct sockaddr_in6 *)&addrs[1];
3878 		if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
3879 		    lin6->sin6_len != sizeof(struct sockaddr_in6))
3880 			return (EINVAL);
3881 		if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
3882 			if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
3883 				return (EINVAL);
3884 			in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
3885 			in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
3886 			fin = (struct sockaddr_in *)&addrs[0];
3887 			lin = (struct sockaddr_in *)&addrs[1];
3888 			break;
3889 		}
3890 		error = sa6_embedscope(fin6, V_ip6_use_defzone);
3891 		if (error)
3892 			return (error);
3893 		error = sa6_embedscope(lin6, V_ip6_use_defzone);
3894 		if (error)
3895 			return (error);
3896 		break;
3897 #endif
3898 #ifdef INET
3899 	case AF_INET:
3900 		fin = (struct sockaddr_in *)&addrs[0];
3901 		lin = (struct sockaddr_in *)&addrs[1];
3902 		if (fin->sin_len != sizeof(struct sockaddr_in) ||
3903 		    lin->sin_len != sizeof(struct sockaddr_in))
3904 			return (EINVAL);
3905 		break;
3906 #endif
3907 	default:
3908 		return (EINVAL);
3909 	}
3910 	NET_EPOCH_ENTER(et);
3911 	switch (addrs[0].ss_family) {
3912 #ifdef INET6
3913 	case AF_INET6:
3914 		inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
3915 		    fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
3916 		    INPLOOKUP_WLOCKPCB, NULL);
3917 		break;
3918 #endif
3919 #ifdef INET
3920 	case AF_INET:
3921 		inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
3922 		    lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
3923 		break;
3924 #endif
3925 	}
3926 	NET_EPOCH_EXIT(et);
3927 	if (inp != NULL) {
3928 		if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) != 0 ||
3929 		    inp->inp_socket == NULL) {
3930 			error = ECONNRESET;
3931 			INP_WUNLOCK(inp);
3932 		} else {
3933 			struct socket *so;
3934 
3935 			so = inp->inp_socket;
3936 			soref(so);
3937 			error = ktls_set_tx_mode(so,
3938 			    arg2 == 0 ? TCP_TLS_MODE_SW : TCP_TLS_MODE_IFNET);
3939 			INP_WUNLOCK(inp);
3940 			sorele(so);
3941 		}
3942 	} else
3943 		error = ESRCH;
3944 	return (error);
3945 }
3946 
3947 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_sw_tls,
3948     CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3949     CTLFLAG_NEEDGIANT, NULL, 0, sysctl_switch_tls, "",
3950     "Switch TCP connection to SW TLS");
3951 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_ifnet_tls,
3952     CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3953     CTLFLAG_NEEDGIANT, NULL, 1, sysctl_switch_tls, "",
3954     "Switch TCP connection to ifnet TLS");
3955 #endif
3956 
3957 /*
3958  * Generate a standardized TCP log line for use throughout the
3959  * tcp subsystem.  Memory allocation is done with M_NOWAIT to
3960  * allow use in the interrupt context.
3961  *
3962  * NB: The caller MUST free(s, M_TCPLOG) the returned string.
3963  * NB: The function may return NULL if memory allocation failed.
3964  *
3965  * Due to header inclusion and ordering limitations the struct ip
3966  * and ip6_hdr pointers have to be passed as void pointers.
3967  */
3968 char *
3969 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
3970     const void *ip6hdr)
3971 {
3972 
3973 	/* Is logging enabled? */
3974 	if (V_tcp_log_in_vain == 0)
3975 		return (NULL);
3976 
3977 	return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
3978 }
3979 
3980 char *
3981 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
3982     const void *ip6hdr)
3983 {
3984 
3985 	/* Is logging enabled? */
3986 	if (tcp_log_debug == 0)
3987 		return (NULL);
3988 
3989 	return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
3990 }
3991 
3992 static char *
3993 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
3994     const void *ip6hdr)
3995 {
3996 	char *s, *sp;
3997 	size_t size;
3998 	struct ip *ip;
3999 #ifdef INET6
4000 	const struct ip6_hdr *ip6;
4001 
4002 	ip6 = (const struct ip6_hdr *)ip6hdr;
4003 #endif /* INET6 */
4004 	ip = (struct ip *)ip4hdr;
4005 
4006 	/*
4007 	 * The log line looks like this:
4008 	 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
4009 	 */
4010 	size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
4011 	    sizeof(PRINT_TH_FLAGS) + 1 +
4012 #ifdef INET6
4013 	    2 * INET6_ADDRSTRLEN;
4014 #else
4015 	    2 * INET_ADDRSTRLEN;
4016 #endif /* INET6 */
4017 
4018 	s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
4019 	if (s == NULL)
4020 		return (NULL);
4021 
4022 	strcat(s, "TCP: [");
4023 	sp = s + strlen(s);
4024 
4025 	if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) {
4026 		inet_ntoa_r(inc->inc_faddr, sp);
4027 		sp = s + strlen(s);
4028 		sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
4029 		sp = s + strlen(s);
4030 		inet_ntoa_r(inc->inc_laddr, sp);
4031 		sp = s + strlen(s);
4032 		sprintf(sp, "]:%i", ntohs(inc->inc_lport));
4033 #ifdef INET6
4034 	} else if (inc) {
4035 		ip6_sprintf(sp, &inc->inc6_faddr);
4036 		sp = s + strlen(s);
4037 		sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
4038 		sp = s + strlen(s);
4039 		ip6_sprintf(sp, &inc->inc6_laddr);
4040 		sp = s + strlen(s);
4041 		sprintf(sp, "]:%i", ntohs(inc->inc_lport));
4042 	} else if (ip6 && th) {
4043 		ip6_sprintf(sp, &ip6->ip6_src);
4044 		sp = s + strlen(s);
4045 		sprintf(sp, "]:%i to [", ntohs(th->th_sport));
4046 		sp = s + strlen(s);
4047 		ip6_sprintf(sp, &ip6->ip6_dst);
4048 		sp = s + strlen(s);
4049 		sprintf(sp, "]:%i", ntohs(th->th_dport));
4050 #endif /* INET6 */
4051 #ifdef INET
4052 	} else if (ip && th) {
4053 		inet_ntoa_r(ip->ip_src, sp);
4054 		sp = s + strlen(s);
4055 		sprintf(sp, "]:%i to [", ntohs(th->th_sport));
4056 		sp = s + strlen(s);
4057 		inet_ntoa_r(ip->ip_dst, sp);
4058 		sp = s + strlen(s);
4059 		sprintf(sp, "]:%i", ntohs(th->th_dport));
4060 #endif /* INET */
4061 	} else {
4062 		free(s, M_TCPLOG);
4063 		return (NULL);
4064 	}
4065 	sp = s + strlen(s);
4066 	if (th)
4067 		sprintf(sp, " tcpflags 0x%b", th->th_flags, PRINT_TH_FLAGS);
4068 	if (*(s + size - 1) != '\0')
4069 		panic("%s: string too long", __func__);
4070 	return (s);
4071 }
4072 
4073 /*
4074  * A subroutine which makes it easy to track TCP state changes with DTrace.
4075  * This function shouldn't be called for t_state initializations that don't
4076  * correspond to actual TCP state transitions.
4077  */
4078 void
4079 tcp_state_change(struct tcpcb *tp, int newstate)
4080 {
4081 #if defined(KDTRACE_HOOKS)
4082 	int pstate = tp->t_state;
4083 #endif
4084 
4085 	TCPSTATES_DEC(tp->t_state);
4086 	TCPSTATES_INC(newstate);
4087 	tp->t_state = newstate;
4088 	TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, pstate);
4089 }
4090 
4091 /*
4092  * Create an external-format (``xtcpcb'') structure using the information in
4093  * the kernel-format tcpcb structure pointed to by tp.  This is done to
4094  * reduce the spew of irrelevant information over this interface, to isolate
4095  * user code from changes in the kernel structure, and potentially to provide
4096  * information-hiding if we decide that some of this information should be
4097  * hidden from users.
4098  */
4099 void
4100 tcp_inptoxtp(const struct inpcb *inp, struct xtcpcb *xt)
4101 {
4102 	struct tcpcb *tp = intotcpcb(inp);
4103 	struct tcptw *tw = intotw(inp);
4104 	sbintime_t now;
4105 
4106 	bzero(xt, sizeof(*xt));
4107 	if (inp->inp_flags & INP_TIMEWAIT) {
4108 		xt->t_state = TCPS_TIME_WAIT;
4109 		xt->xt_encaps_port = tw->t_port;
4110 	} else {
4111 		xt->t_state = tp->t_state;
4112 		xt->t_logstate = tp->t_logstate;
4113 		xt->t_flags = tp->t_flags;
4114 		xt->t_sndzerowin = tp->t_sndzerowin;
4115 		xt->t_sndrexmitpack = tp->t_sndrexmitpack;
4116 		xt->t_rcvoopack = tp->t_rcvoopack;
4117 		xt->t_rcv_wnd = tp->rcv_wnd;
4118 		xt->t_snd_wnd = tp->snd_wnd;
4119 		xt->t_snd_cwnd = tp->snd_cwnd;
4120 		xt->t_snd_ssthresh = tp->snd_ssthresh;
4121 		xt->t_dsack_bytes = tp->t_dsack_bytes;
4122 		xt->t_dsack_tlp_bytes = tp->t_dsack_tlp_bytes;
4123 		xt->t_dsack_pack = tp->t_dsack_pack;
4124 		xt->t_maxseg = tp->t_maxseg;
4125 		xt->xt_ecn = (tp->t_flags2 & TF2_ECN_PERMIT) ? 1 : 0 +
4126 			     (tp->t_flags2 & TF2_ACE_PERMIT) ? 2 : 0;
4127 
4128 		now = getsbinuptime();
4129 #define	COPYTIMER(ttt)	do {						\
4130 		if (callout_active(&tp->t_timers->ttt))			\
4131 			xt->ttt = (tp->t_timers->ttt.c_time - now) /	\
4132 			    SBT_1MS;					\
4133 		else							\
4134 			xt->ttt = 0;					\
4135 } while (0)
4136 		COPYTIMER(tt_delack);
4137 		COPYTIMER(tt_rexmt);
4138 		COPYTIMER(tt_persist);
4139 		COPYTIMER(tt_keep);
4140 		COPYTIMER(tt_2msl);
4141 #undef COPYTIMER
4142 		xt->t_rcvtime = 1000 * (ticks - tp->t_rcvtime) / hz;
4143 
4144 		xt->xt_encaps_port = tp->t_port;
4145 		bcopy(tp->t_fb->tfb_tcp_block_name, xt->xt_stack,
4146 		    TCP_FUNCTION_NAME_LEN_MAX);
4147 		bcopy(CC_ALGO(tp)->name, xt->xt_cc,
4148 		    TCP_CA_NAME_MAX);
4149 #ifdef TCP_BLACKBOX
4150 		(void)tcp_log_get_id(tp, xt->xt_logid);
4151 #endif
4152 	}
4153 
4154 	xt->xt_len = sizeof(struct xtcpcb);
4155 	in_pcbtoxinpcb(inp, &xt->xt_inp);
4156 	if (inp->inp_socket == NULL)
4157 		xt->xt_inp.xi_socket.xso_protocol = IPPROTO_TCP;
4158 }
4159 
4160 void
4161 tcp_log_end_status(struct tcpcb *tp, uint8_t status)
4162 {
4163 	uint32_t bit, i;
4164 
4165 	if ((tp == NULL) ||
4166 	    (status > TCP_EI_STATUS_MAX_VALUE) ||
4167 	    (status == 0)) {
4168 		/* Invalid */
4169 		return;
4170 	}
4171 	if (status > (sizeof(uint32_t) * 8)) {
4172 		/* Should this be a KASSERT? */
4173 		return;
4174 	}
4175 	bit = 1U << (status - 1);
4176 	if (bit & tp->t_end_info_status) {
4177 		/* already logged */
4178 		return;
4179 	}
4180 	for (i = 0; i < TCP_END_BYTE_INFO; i++) {
4181 		if (tp->t_end_info_bytes[i] == TCP_EI_EMPTY_SLOT) {
4182 			tp->t_end_info_bytes[i] = status;
4183 			tp->t_end_info_status |= bit;
4184 			break;
4185 		}
4186 	}
4187 }
4188 
4189 int
4190 tcp_can_enable_pacing(void)
4191 {
4192 
4193 	if ((tcp_pacing_limit == -1) ||
4194 	    (tcp_pacing_limit > number_of_tcp_connections_pacing)) {
4195 		atomic_fetchadd_int(&number_of_tcp_connections_pacing, 1);
4196 		shadow_num_connections = number_of_tcp_connections_pacing;
4197 		return (1);
4198 	} else {
4199 		return (0);
4200 	}
4201 }
4202 
4203 static uint8_t tcp_pacing_warning = 0;
4204 
4205 void
4206 tcp_decrement_paced_conn(void)
4207 {
4208 	uint32_t ret;
4209 
4210 	ret = atomic_fetchadd_int(&number_of_tcp_connections_pacing, -1);
4211 	shadow_num_connections = number_of_tcp_connections_pacing;
4212 	KASSERT(ret != 0, ("tcp_paced_connection_exits -1 would cause wrap?"));
4213 	if (ret == 0) {
4214 		if (tcp_pacing_limit != -1) {
4215 			printf("Warning all pacing is now disabled, count decrements invalidly!\n");
4216 			tcp_pacing_limit = 0;
4217 		} else if (tcp_pacing_warning == 0) {
4218 			printf("Warning pacing count is invalid, invalid decrement\n");
4219 			tcp_pacing_warning = 1;
4220 		}
4221 	}
4222 }
4223