xref: /freebsd/sys/netinet/tcp_usrreq.c (revision b13788e396c2b24f88697e7d4a74bab429ef4d0c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1993
5  *	The Regents of the University of California.
6  * Copyright (c) 2006-2007 Robert N. M. Watson
7  * Copyright (c) 2010-2011 Juniper Networks, Inc.
8  * All rights reserved.
9  *
10  * Portions of this software were developed by Robert N. M. Watson under
11  * contract to Juniper Networks, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	From: @(#)tcp_usrreq.c	8.2 (Berkeley) 1/3/94
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include "opt_ddb.h"
44 #include "opt_inet.h"
45 #include "opt_inet6.h"
46 #include "opt_ipsec.h"
47 #include "opt_kern_tls.h"
48 #include "opt_tcpdebug.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/arb.h>
53 #include <sys/limits.h>
54 #include <sys/malloc.h>
55 #include <sys/refcount.h>
56 #include <sys/kernel.h>
57 #include <sys/ktls.h>
58 #include <sys/qmath.h>
59 #include <sys/sysctl.h>
60 #include <sys/mbuf.h>
61 #ifdef INET6
62 #include <sys/domain.h>
63 #endif /* INET6 */
64 #include <sys/socket.h>
65 #include <sys/socketvar.h>
66 #include <sys/protosw.h>
67 #include <sys/proc.h>
68 #include <sys/jail.h>
69 #include <sys/syslog.h>
70 #include <sys/stats.h>
71 
72 #ifdef DDB
73 #include <ddb/ddb.h>
74 #endif
75 
76 #include <net/if.h>
77 #include <net/if_var.h>
78 #include <net/route.h>
79 #include <net/vnet.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/in_kdtrace.h>
83 #include <netinet/in_pcb.h>
84 #include <netinet/in_systm.h>
85 #include <netinet/in_var.h>
86 #include <netinet/ip_var.h>
87 #ifdef INET6
88 #include <netinet/ip6.h>
89 #include <netinet6/in6_pcb.h>
90 #include <netinet6/ip6_var.h>
91 #include <netinet6/scope6_var.h>
92 #endif
93 #include <netinet/tcp.h>
94 #include <netinet/tcp_fsm.h>
95 #include <netinet/tcp_seq.h>
96 #include <netinet/tcp_timer.h>
97 #include <netinet/tcp_var.h>
98 #include <netinet/tcp_log_buf.h>
99 #include <netinet/tcpip.h>
100 #include <netinet/cc/cc.h>
101 #include <netinet/tcp_fastopen.h>
102 #include <netinet/tcp_hpts.h>
103 #ifdef TCPPCAP
104 #include <netinet/tcp_pcap.h>
105 #endif
106 #ifdef TCPDEBUG
107 #include <netinet/tcp_debug.h>
108 #endif
109 #ifdef TCP_OFFLOAD
110 #include <netinet/tcp_offload.h>
111 #endif
112 #include <netipsec/ipsec_support.h>
113 
114 #include <vm/vm.h>
115 #include <vm/vm_param.h>
116 #include <vm/pmap.h>
117 #include <vm/vm_extern.h>
118 #include <vm/vm_map.h>
119 #include <vm/vm_page.h>
120 
121 /*
122  * TCP protocol interface to socket abstraction.
123  */
124 #ifdef INET
125 static int	tcp_connect(struct tcpcb *, struct sockaddr *,
126 		    struct thread *td);
127 #endif /* INET */
128 #ifdef INET6
129 static int	tcp6_connect(struct tcpcb *, struct sockaddr *,
130 		    struct thread *td);
131 #endif /* INET6 */
132 static void	tcp_disconnect(struct tcpcb *);
133 static void	tcp_usrclosed(struct tcpcb *);
134 static void	tcp_fill_info(struct tcpcb *, struct tcp_info *);
135 
136 static int	tcp_pru_options_support(struct tcpcb *tp, int flags);
137 
138 #ifdef TCPDEBUG
139 #define	TCPDEBUG0	int ostate = 0
140 #define	TCPDEBUG1()	ostate = tp ? tp->t_state : 0
141 #define	TCPDEBUG2(req)	if (tp && (so->so_options & SO_DEBUG)) \
142 				tcp_trace(TA_USER, ostate, tp, 0, 0, req)
143 #else
144 #define	TCPDEBUG0
145 #define	TCPDEBUG1()
146 #define	TCPDEBUG2(req)
147 #endif
148 
149 /*
150  * TCP attaches to socket via pru_attach(), reserving space,
151  * and an internet control block.
152  */
153 static int
154 tcp_usr_attach(struct socket *so, int proto, struct thread *td)
155 {
156 	struct inpcb *inp;
157 	struct tcpcb *tp = NULL;
158 	int error;
159 	TCPDEBUG0;
160 
161 	inp = sotoinpcb(so);
162 	KASSERT(inp == NULL, ("tcp_usr_attach: inp != NULL"));
163 	TCPDEBUG1();
164 
165 	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
166 		error = soreserve(so, V_tcp_sendspace, V_tcp_recvspace);
167 		if (error)
168 			goto out;
169 	}
170 
171 	so->so_rcv.sb_flags |= SB_AUTOSIZE;
172 	so->so_snd.sb_flags |= SB_AUTOSIZE;
173 	error = in_pcballoc(so, &V_tcbinfo);
174 	if (error)
175 		goto out;
176 	inp = sotoinpcb(so);
177 #ifdef INET6
178 	if (inp->inp_vflag & INP_IPV6PROTO) {
179 		inp->inp_vflag |= INP_IPV6;
180 		if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0)
181 			inp->inp_vflag |= INP_IPV4;
182 		inp->in6p_hops = -1;	/* use kernel default */
183 	}
184 	else
185 #endif
186 		inp->inp_vflag |= INP_IPV4;
187 	tp = tcp_newtcpcb(inp);
188 	if (tp == NULL) {
189 		error = ENOBUFS;
190 		in_pcbdetach(inp);
191 		in_pcbfree(inp);
192 		goto out;
193 	}
194 	tp->t_state = TCPS_CLOSED;
195 	INP_WUNLOCK(inp);
196 	TCPSTATES_INC(TCPS_CLOSED);
197 	if ((so->so_options & SO_LINGER) && so->so_linger == 0)
198 		so->so_linger = TCP_LINGERTIME;
199 out:
200 	TCPDEBUG2(PRU_ATTACH);
201 	TCP_PROBE2(debug__user, tp, PRU_ATTACH);
202 	return (error);
203 }
204 
205 /*
206  * tcp_usr_detach is called when the socket layer loses its final reference
207  * to the socket, be it a file descriptor reference, a reference from TCP,
208  * etc.  At this point, there is only one case in which we will keep around
209  * inpcb state: time wait.
210  */
211 static void
212 tcp_usr_detach(struct socket *so)
213 {
214 	struct inpcb *inp;
215 	struct tcpcb *tp;
216 
217 	inp = sotoinpcb(so);
218 	KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
219 	INP_WLOCK(inp);
220 	KASSERT(so->so_pcb == inp && inp->inp_socket == so,
221 		("%s: socket %p inp %p mismatch", __func__, so, inp));
222 
223 	tp = intotcpcb(inp);
224 
225 	if (inp->inp_flags & INP_TIMEWAIT) {
226 		/*
227 		 * There are two cases to handle: one in which the time wait
228 		 * state is being discarded (INP_DROPPED), and one in which
229 		 * this connection will remain in timewait.  In the former,
230 		 * it is time to discard all state (except tcptw, which has
231 		 * already been discarded by the timewait close code, which
232 		 * should be further up the call stack somewhere).  In the
233 		 * latter case, we detach from the socket, but leave the pcb
234 		 * present until timewait ends.
235 		 *
236 		 * XXXRW: Would it be cleaner to free the tcptw here?
237 		 *
238 		 * Astute question indeed, from twtcp perspective there are
239 		 * four cases to consider:
240 		 *
241 		 * #1 tcp_usr_detach is called at tcptw creation time by
242 		 *  tcp_twstart, then do not discard the newly created tcptw
243 		 *  and leave inpcb present until timewait ends
244 		 * #2 tcp_usr_detach is called at tcptw creation time by
245 		 *  tcp_twstart, but connection is local and tw will be
246 		 *  discarded immediately
247 		 * #3 tcp_usr_detach is called at timewait end (or reuse) by
248 		 *  tcp_twclose, then the tcptw has already been discarded
249 		 *  (or reused) and inpcb is freed here
250 		 * #4 tcp_usr_detach is called() after timewait ends (or reuse)
251 		 *  (e.g. by soclose), then tcptw has already been discarded
252 		 *  (or reused) and inpcb is freed here
253 		 *
254 		 *  In all three cases the tcptw should not be freed here.
255 		 */
256 		if (inp->inp_flags & INP_DROPPED) {
257 			in_pcbdetach(inp);
258 			if (__predict_true(tp == NULL)) {
259 				in_pcbfree(inp);
260 			} else {
261 				/*
262 				 * This case should not happen as in TIMEWAIT
263 				 * state the inp should not be destroyed before
264 				 * its tcptw.  If INVARIANTS is defined, panic.
265 				 */
266 #ifdef INVARIANTS
267 				panic("%s: Panic before an inp double-free: "
268 				    "INP_TIMEWAIT && INP_DROPPED && tp != NULL"
269 				    , __func__);
270 #else
271 				log(LOG_ERR, "%s: Avoid an inp double-free: "
272 				    "INP_TIMEWAIT && INP_DROPPED && tp != NULL"
273 				    , __func__);
274 #endif
275 				INP_WUNLOCK(inp);
276 			}
277 		} else {
278 			in_pcbdetach(inp);
279 			INP_WUNLOCK(inp);
280 		}
281 	} else {
282 		/*
283 		 * If the connection is not in timewait, we consider two
284 		 * two conditions: one in which no further processing is
285 		 * necessary (dropped || embryonic), and one in which TCP is
286 		 * not yet done, but no longer requires the socket, so the
287 		 * pcb will persist for the time being.
288 		 *
289 		 * XXXRW: Does the second case still occur?
290 		 */
291 		if (inp->inp_flags & INP_DROPPED ||
292 		    tp->t_state < TCPS_SYN_SENT) {
293 			tcp_discardcb(tp);
294 			in_pcbdetach(inp);
295 			in_pcbfree(inp);
296 		} else {
297 			in_pcbdetach(inp);
298 			INP_WUNLOCK(inp);
299 		}
300 	}
301 }
302 
303 #ifdef INET
304 /*
305  * Give the socket an address.
306  */
307 static int
308 tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
309 {
310 	int error = 0;
311 	struct inpcb *inp;
312 	struct tcpcb *tp = NULL;
313 	struct sockaddr_in *sinp;
314 
315 	sinp = (struct sockaddr_in *)nam;
316 	if (nam->sa_len != sizeof (*sinp))
317 		return (EINVAL);
318 	/*
319 	 * Must check for multicast addresses and disallow binding
320 	 * to them.
321 	 */
322 	if (sinp->sin_family == AF_INET &&
323 	    IN_MULTICAST(ntohl(sinp->sin_addr.s_addr)))
324 		return (EAFNOSUPPORT);
325 
326 	TCPDEBUG0;
327 	inp = sotoinpcb(so);
328 	KASSERT(inp != NULL, ("tcp_usr_bind: inp == NULL"));
329 	INP_WLOCK(inp);
330 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
331 		error = EINVAL;
332 		goto out;
333 	}
334 	tp = intotcpcb(inp);
335 	TCPDEBUG1();
336 	INP_HASH_WLOCK(&V_tcbinfo);
337 	error = in_pcbbind(inp, nam, td->td_ucred);
338 	INP_HASH_WUNLOCK(&V_tcbinfo);
339 out:
340 	TCPDEBUG2(PRU_BIND);
341 	TCP_PROBE2(debug__user, tp, PRU_BIND);
342 	INP_WUNLOCK(inp);
343 
344 	return (error);
345 }
346 #endif /* INET */
347 
348 #ifdef INET6
349 static int
350 tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
351 {
352 	int error = 0;
353 	struct inpcb *inp;
354 	struct tcpcb *tp = NULL;
355 	struct sockaddr_in6 *sin6;
356 	u_char vflagsav;
357 
358 	sin6 = (struct sockaddr_in6 *)nam;
359 	if (nam->sa_len != sizeof (*sin6))
360 		return (EINVAL);
361 	/*
362 	 * Must check for multicast addresses and disallow binding
363 	 * to them.
364 	 */
365 	if (sin6->sin6_family == AF_INET6 &&
366 	    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
367 		return (EAFNOSUPPORT);
368 
369 	TCPDEBUG0;
370 	inp = sotoinpcb(so);
371 	KASSERT(inp != NULL, ("tcp6_usr_bind: inp == NULL"));
372 	INP_WLOCK(inp);
373 	vflagsav = inp->inp_vflag;
374 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
375 		error = EINVAL;
376 		goto out;
377 	}
378 	tp = intotcpcb(inp);
379 	TCPDEBUG1();
380 	INP_HASH_WLOCK(&V_tcbinfo);
381 	inp->inp_vflag &= ~INP_IPV4;
382 	inp->inp_vflag |= INP_IPV6;
383 #ifdef INET
384 	if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
385 		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
386 			inp->inp_vflag |= INP_IPV4;
387 		else if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
388 			struct sockaddr_in sin;
389 
390 			in6_sin6_2_sin(&sin, sin6);
391 			if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
392 				error = EAFNOSUPPORT;
393 				INP_HASH_WUNLOCK(&V_tcbinfo);
394 				goto out;
395 			}
396 			inp->inp_vflag |= INP_IPV4;
397 			inp->inp_vflag &= ~INP_IPV6;
398 			error = in_pcbbind(inp, (struct sockaddr *)&sin,
399 			    td->td_ucred);
400 			INP_HASH_WUNLOCK(&V_tcbinfo);
401 			goto out;
402 		}
403 	}
404 #endif
405 	error = in6_pcbbind(inp, nam, td->td_ucred);
406 	INP_HASH_WUNLOCK(&V_tcbinfo);
407 out:
408 	if (error != 0)
409 		inp->inp_vflag = vflagsav;
410 	TCPDEBUG2(PRU_BIND);
411 	TCP_PROBE2(debug__user, tp, PRU_BIND);
412 	INP_WUNLOCK(inp);
413 	return (error);
414 }
415 #endif /* INET6 */
416 
417 #ifdef INET
418 /*
419  * Prepare to accept connections.
420  */
421 static int
422 tcp_usr_listen(struct socket *so, int backlog, struct thread *td)
423 {
424 	int error = 0;
425 	struct inpcb *inp;
426 	struct tcpcb *tp = NULL;
427 
428 	TCPDEBUG0;
429 	inp = sotoinpcb(so);
430 	KASSERT(inp != NULL, ("tcp_usr_listen: inp == NULL"));
431 	INP_WLOCK(inp);
432 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
433 		error = EINVAL;
434 		goto out;
435 	}
436 	tp = intotcpcb(inp);
437 	TCPDEBUG1();
438 	SOCK_LOCK(so);
439 	error = solisten_proto_check(so);
440 	INP_HASH_WLOCK(&V_tcbinfo);
441 	if (error == 0 && inp->inp_lport == 0)
442 		error = in_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
443 	INP_HASH_WUNLOCK(&V_tcbinfo);
444 	if (error == 0) {
445 		tcp_state_change(tp, TCPS_LISTEN);
446 		solisten_proto(so, backlog);
447 #ifdef TCP_OFFLOAD
448 		if ((so->so_options & SO_NO_OFFLOAD) == 0)
449 			tcp_offload_listen_start(tp);
450 #endif
451 	}
452 	SOCK_UNLOCK(so);
453 
454 	if (IS_FASTOPEN(tp->t_flags))
455 		tp->t_tfo_pending = tcp_fastopen_alloc_counter();
456 
457 out:
458 	TCPDEBUG2(PRU_LISTEN);
459 	TCP_PROBE2(debug__user, tp, PRU_LISTEN);
460 	INP_WUNLOCK(inp);
461 	return (error);
462 }
463 #endif /* INET */
464 
465 #ifdef INET6
466 static int
467 tcp6_usr_listen(struct socket *so, int backlog, struct thread *td)
468 {
469 	int error = 0;
470 	struct inpcb *inp;
471 	struct tcpcb *tp = NULL;
472 	u_char vflagsav;
473 
474 	TCPDEBUG0;
475 	inp = sotoinpcb(so);
476 	KASSERT(inp != NULL, ("tcp6_usr_listen: inp == NULL"));
477 	INP_WLOCK(inp);
478 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
479 		error = EINVAL;
480 		goto out;
481 	}
482 	vflagsav = inp->inp_vflag;
483 	tp = intotcpcb(inp);
484 	TCPDEBUG1();
485 	SOCK_LOCK(so);
486 	error = solisten_proto_check(so);
487 	INP_HASH_WLOCK(&V_tcbinfo);
488 	if (error == 0 && inp->inp_lport == 0) {
489 		inp->inp_vflag &= ~INP_IPV4;
490 		if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0)
491 			inp->inp_vflag |= INP_IPV4;
492 		error = in6_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
493 	}
494 	INP_HASH_WUNLOCK(&V_tcbinfo);
495 	if (error == 0) {
496 		tcp_state_change(tp, TCPS_LISTEN);
497 		solisten_proto(so, backlog);
498 #ifdef TCP_OFFLOAD
499 		if ((so->so_options & SO_NO_OFFLOAD) == 0)
500 			tcp_offload_listen_start(tp);
501 #endif
502 	}
503 	SOCK_UNLOCK(so);
504 
505 	if (IS_FASTOPEN(tp->t_flags))
506 		tp->t_tfo_pending = tcp_fastopen_alloc_counter();
507 
508 	if (error != 0)
509 		inp->inp_vflag = vflagsav;
510 
511 out:
512 	TCPDEBUG2(PRU_LISTEN);
513 	TCP_PROBE2(debug__user, tp, PRU_LISTEN);
514 	INP_WUNLOCK(inp);
515 	return (error);
516 }
517 #endif /* INET6 */
518 
519 #ifdef INET
520 /*
521  * Initiate connection to peer.
522  * Create a template for use in transmissions on this connection.
523  * Enter SYN_SENT state, and mark socket as connecting.
524  * Start keep-alive timer, and seed output sequence space.
525  * Send initial segment on connection.
526  */
527 static int
528 tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
529 {
530 	struct epoch_tracker et;
531 	int error = 0;
532 	struct inpcb *inp;
533 	struct tcpcb *tp = NULL;
534 	struct sockaddr_in *sinp;
535 
536 	sinp = (struct sockaddr_in *)nam;
537 	if (nam->sa_len != sizeof (*sinp))
538 		return (EINVAL);
539 	/*
540 	 * Must disallow TCP ``connections'' to multicast addresses.
541 	 */
542 	if (sinp->sin_family == AF_INET
543 	    && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr)))
544 		return (EAFNOSUPPORT);
545 	if ((error = prison_remote_ip4(td->td_ucred, &sinp->sin_addr)) != 0)
546 		return (error);
547 
548 	TCPDEBUG0;
549 	inp = sotoinpcb(so);
550 	KASSERT(inp != NULL, ("tcp_usr_connect: inp == NULL"));
551 	INP_WLOCK(inp);
552 	if (inp->inp_flags & INP_TIMEWAIT) {
553 		error = EADDRINUSE;
554 		goto out;
555 	}
556 	if (inp->inp_flags & INP_DROPPED) {
557 		error = ECONNREFUSED;
558 		goto out;
559 	}
560 	tp = intotcpcb(inp);
561 	TCPDEBUG1();
562 	NET_EPOCH_ENTER(et);
563 	if ((error = tcp_connect(tp, nam, td)) != 0)
564 		goto out_in_epoch;
565 #ifdef TCP_OFFLOAD
566 	if (registered_toedevs > 0 &&
567 	    (so->so_options & SO_NO_OFFLOAD) == 0 &&
568 	    (error = tcp_offload_connect(so, nam)) == 0)
569 		goto out_in_epoch;
570 #endif
571 	tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
572 	error = tp->t_fb->tfb_tcp_output(tp);
573 out_in_epoch:
574 	NET_EPOCH_EXIT(et);
575 out:
576 	TCPDEBUG2(PRU_CONNECT);
577 	TCP_PROBE2(debug__user, tp, PRU_CONNECT);
578 	INP_WUNLOCK(inp);
579 	return (error);
580 }
581 #endif /* INET */
582 
583 #ifdef INET6
584 static int
585 tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
586 {
587 	struct epoch_tracker et;
588 	int error = 0;
589 	struct inpcb *inp;
590 	struct tcpcb *tp = NULL;
591 	struct sockaddr_in6 *sin6;
592 	u_int8_t incflagsav;
593 	u_char vflagsav;
594 
595 	TCPDEBUG0;
596 
597 	sin6 = (struct sockaddr_in6 *)nam;
598 	if (nam->sa_len != sizeof (*sin6))
599 		return (EINVAL);
600 	/*
601 	 * Must disallow TCP ``connections'' to multicast addresses.
602 	 */
603 	if (sin6->sin6_family == AF_INET6
604 	    && IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
605 		return (EAFNOSUPPORT);
606 
607 	inp = sotoinpcb(so);
608 	KASSERT(inp != NULL, ("tcp6_usr_connect: inp == NULL"));
609 	INP_WLOCK(inp);
610 	vflagsav = inp->inp_vflag;
611 	incflagsav = inp->inp_inc.inc_flags;
612 	if (inp->inp_flags & INP_TIMEWAIT) {
613 		error = EADDRINUSE;
614 		goto out;
615 	}
616 	if (inp->inp_flags & INP_DROPPED) {
617 		error = ECONNREFUSED;
618 		goto out;
619 	}
620 	tp = intotcpcb(inp);
621 	TCPDEBUG1();
622 #ifdef INET
623 	/*
624 	 * XXXRW: Some confusion: V4/V6 flags relate to binding, and
625 	 * therefore probably require the hash lock, which isn't held here.
626 	 * Is this a significant problem?
627 	 */
628 	if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
629 		struct sockaddr_in sin;
630 
631 		if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) {
632 			error = EINVAL;
633 			goto out;
634 		}
635 		if ((inp->inp_vflag & INP_IPV4) == 0) {
636 			error = EAFNOSUPPORT;
637 			goto out;
638 		}
639 
640 		in6_sin6_2_sin(&sin, sin6);
641 		if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
642 			error = EAFNOSUPPORT;
643 			goto out;
644 		}
645 		if ((error = prison_remote_ip4(td->td_ucred,
646 		    &sin.sin_addr)) != 0)
647 			goto out;
648 		inp->inp_vflag |= INP_IPV4;
649 		inp->inp_vflag &= ~INP_IPV6;
650 		NET_EPOCH_ENTER(et);
651 		if ((error = tcp_connect(tp, (struct sockaddr *)&sin, td)) != 0)
652 			goto out_in_epoch;
653 #ifdef TCP_OFFLOAD
654 		if (registered_toedevs > 0 &&
655 		    (so->so_options & SO_NO_OFFLOAD) == 0 &&
656 		    (error = tcp_offload_connect(so, nam)) == 0)
657 			goto out_in_epoch;
658 #endif
659 		error = tp->t_fb->tfb_tcp_output(tp);
660 		goto out_in_epoch;
661 	} else {
662 		if ((inp->inp_vflag & INP_IPV6) == 0) {
663 			error = EAFNOSUPPORT;
664 			goto out;
665 		}
666 	}
667 #endif
668 	if ((error = prison_remote_ip6(td->td_ucred, &sin6->sin6_addr)) != 0)
669 		goto out;
670 	inp->inp_vflag &= ~INP_IPV4;
671 	inp->inp_vflag |= INP_IPV6;
672 	inp->inp_inc.inc_flags |= INC_ISIPV6;
673 	if ((error = tcp6_connect(tp, nam, td)) != 0)
674 		goto out;
675 #ifdef TCP_OFFLOAD
676 	if (registered_toedevs > 0 &&
677 	    (so->so_options & SO_NO_OFFLOAD) == 0 &&
678 	    (error = tcp_offload_connect(so, nam)) == 0)
679 		goto out;
680 #endif
681 	tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
682 	NET_EPOCH_ENTER(et);
683 	error = tp->t_fb->tfb_tcp_output(tp);
684 #ifdef INET
685 out_in_epoch:
686 #endif
687 	NET_EPOCH_EXIT(et);
688 out:
689 	/*
690 	 * If the implicit bind in the connect call fails, restore
691 	 * the flags we modified.
692 	 */
693 	if (error != 0 && inp->inp_lport == 0) {
694 		inp->inp_vflag = vflagsav;
695 		inp->inp_inc.inc_flags = incflagsav;
696 	}
697 
698 	TCPDEBUG2(PRU_CONNECT);
699 	TCP_PROBE2(debug__user, tp, PRU_CONNECT);
700 	INP_WUNLOCK(inp);
701 	return (error);
702 }
703 #endif /* INET6 */
704 
705 /*
706  * Initiate disconnect from peer.
707  * If connection never passed embryonic stage, just drop;
708  * else if don't need to let data drain, then can just drop anyways,
709  * else have to begin TCP shutdown process: mark socket disconnecting,
710  * drain unread data, state switch to reflect user close, and
711  * send segment (e.g. FIN) to peer.  Socket will be really disconnected
712  * when peer sends FIN and acks ours.
713  *
714  * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
715  */
716 static int
717 tcp_usr_disconnect(struct socket *so)
718 {
719 	struct inpcb *inp;
720 	struct tcpcb *tp = NULL;
721 	struct epoch_tracker et;
722 	int error = 0;
723 
724 	TCPDEBUG0;
725 	NET_EPOCH_ENTER(et);
726 	inp = sotoinpcb(so);
727 	KASSERT(inp != NULL, ("tcp_usr_disconnect: inp == NULL"));
728 	INP_WLOCK(inp);
729 	if (inp->inp_flags & INP_TIMEWAIT)
730 		goto out;
731 	if (inp->inp_flags & INP_DROPPED) {
732 		error = ECONNRESET;
733 		goto out;
734 	}
735 	tp = intotcpcb(inp);
736 	TCPDEBUG1();
737 	tcp_disconnect(tp);
738 out:
739 	TCPDEBUG2(PRU_DISCONNECT);
740 	TCP_PROBE2(debug__user, tp, PRU_DISCONNECT);
741 	INP_WUNLOCK(inp);
742 	NET_EPOCH_EXIT(et);
743 	return (error);
744 }
745 
746 #ifdef INET
747 /*
748  * Accept a connection.  Essentially all the work is done at higher levels;
749  * just return the address of the peer, storing through addr.
750  */
751 static int
752 tcp_usr_accept(struct socket *so, struct sockaddr **nam)
753 {
754 	int error = 0;
755 	struct inpcb *inp = NULL;
756 	struct tcpcb *tp = NULL;
757 	struct in_addr addr;
758 	in_port_t port = 0;
759 	TCPDEBUG0;
760 
761 	if (so->so_state & SS_ISDISCONNECTED)
762 		return (ECONNABORTED);
763 
764 	inp = sotoinpcb(so);
765 	KASSERT(inp != NULL, ("tcp_usr_accept: inp == NULL"));
766 	INP_WLOCK(inp);
767 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
768 		error = ECONNABORTED;
769 		goto out;
770 	}
771 	tp = intotcpcb(inp);
772 	TCPDEBUG1();
773 
774 	/*
775 	 * We inline in_getpeeraddr and COMMON_END here, so that we can
776 	 * copy the data of interest and defer the malloc until after we
777 	 * release the lock.
778 	 */
779 	port = inp->inp_fport;
780 	addr = inp->inp_faddr;
781 
782 out:
783 	TCPDEBUG2(PRU_ACCEPT);
784 	TCP_PROBE2(debug__user, tp, PRU_ACCEPT);
785 	INP_WUNLOCK(inp);
786 	if (error == 0)
787 		*nam = in_sockaddr(port, &addr);
788 	return error;
789 }
790 #endif /* INET */
791 
792 #ifdef INET6
793 static int
794 tcp6_usr_accept(struct socket *so, struct sockaddr **nam)
795 {
796 	struct inpcb *inp = NULL;
797 	int error = 0;
798 	struct tcpcb *tp = NULL;
799 	struct in_addr addr;
800 	struct in6_addr addr6;
801 	struct epoch_tracker et;
802 	in_port_t port = 0;
803 	int v4 = 0;
804 	TCPDEBUG0;
805 
806 	if (so->so_state & SS_ISDISCONNECTED)
807 		return (ECONNABORTED);
808 
809 	inp = sotoinpcb(so);
810 	KASSERT(inp != NULL, ("tcp6_usr_accept: inp == NULL"));
811 	NET_EPOCH_ENTER(et);
812 	INP_WLOCK(inp);
813 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
814 		error = ECONNABORTED;
815 		goto out;
816 	}
817 	tp = intotcpcb(inp);
818 	TCPDEBUG1();
819 
820 	/*
821 	 * We inline in6_mapped_peeraddr and COMMON_END here, so that we can
822 	 * copy the data of interest and defer the malloc until after we
823 	 * release the lock.
824 	 */
825 	if (inp->inp_vflag & INP_IPV4) {
826 		v4 = 1;
827 		port = inp->inp_fport;
828 		addr = inp->inp_faddr;
829 	} else {
830 		port = inp->inp_fport;
831 		addr6 = inp->in6p_faddr;
832 	}
833 
834 out:
835 	TCPDEBUG2(PRU_ACCEPT);
836 	TCP_PROBE2(debug__user, tp, PRU_ACCEPT);
837 	INP_WUNLOCK(inp);
838 	NET_EPOCH_EXIT(et);
839 	if (error == 0) {
840 		if (v4)
841 			*nam = in6_v4mapsin6_sockaddr(port, &addr);
842 		else
843 			*nam = in6_sockaddr(port, &addr6);
844 	}
845 	return error;
846 }
847 #endif /* INET6 */
848 
849 /*
850  * Mark the connection as being incapable of further output.
851  */
852 static int
853 tcp_usr_shutdown(struct socket *so)
854 {
855 	int error = 0;
856 	struct inpcb *inp;
857 	struct tcpcb *tp = NULL;
858 	struct epoch_tracker et;
859 
860 	TCPDEBUG0;
861 	NET_EPOCH_ENTER(et);
862 	inp = sotoinpcb(so);
863 	KASSERT(inp != NULL, ("inp == NULL"));
864 	INP_WLOCK(inp);
865 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
866 		error = ECONNRESET;
867 		goto out;
868 	}
869 	tp = intotcpcb(inp);
870 	TCPDEBUG1();
871 	socantsendmore(so);
872 	tcp_usrclosed(tp);
873 	if (!(inp->inp_flags & INP_DROPPED))
874 		error = tp->t_fb->tfb_tcp_output(tp);
875 
876 out:
877 	TCPDEBUG2(PRU_SHUTDOWN);
878 	TCP_PROBE2(debug__user, tp, PRU_SHUTDOWN);
879 	INP_WUNLOCK(inp);
880 	NET_EPOCH_EXIT(et);
881 
882 	return (error);
883 }
884 
885 /*
886  * After a receive, possibly send window update to peer.
887  */
888 static int
889 tcp_usr_rcvd(struct socket *so, int flags)
890 {
891 	struct epoch_tracker et;
892 	struct inpcb *inp;
893 	struct tcpcb *tp = NULL;
894 	int error = 0;
895 
896 	TCPDEBUG0;
897 	inp = sotoinpcb(so);
898 	KASSERT(inp != NULL, ("tcp_usr_rcvd: inp == NULL"));
899 	INP_WLOCK(inp);
900 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
901 		error = ECONNRESET;
902 		goto out;
903 	}
904 	tp = intotcpcb(inp);
905 	TCPDEBUG1();
906 	/*
907 	 * For passively-created TFO connections, don't attempt a window
908 	 * update while still in SYN_RECEIVED as this may trigger an early
909 	 * SYN|ACK.  It is preferable to have the SYN|ACK be sent along with
910 	 * application response data, or failing that, when the DELACK timer
911 	 * expires.
912 	 */
913 	if (IS_FASTOPEN(tp->t_flags) &&
914 	    (tp->t_state == TCPS_SYN_RECEIVED))
915 		goto out;
916 	NET_EPOCH_ENTER(et);
917 #ifdef TCP_OFFLOAD
918 	if (tp->t_flags & TF_TOE)
919 		tcp_offload_rcvd(tp);
920 	else
921 #endif
922 	tp->t_fb->tfb_tcp_output(tp);
923 	NET_EPOCH_EXIT(et);
924 out:
925 	TCPDEBUG2(PRU_RCVD);
926 	TCP_PROBE2(debug__user, tp, PRU_RCVD);
927 	INP_WUNLOCK(inp);
928 	return (error);
929 }
930 
931 /*
932  * Do a send by putting data in output queue and updating urgent
933  * marker if URG set.  Possibly send more data.  Unlike the other
934  * pru_*() routines, the mbuf chains are our responsibility.  We
935  * must either enqueue them or free them.  The other pru_* routines
936  * generally are caller-frees.
937  */
938 static int
939 tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
940     struct sockaddr *nam, struct mbuf *control, struct thread *td)
941 {
942 	struct epoch_tracker et;
943 	int error = 0;
944 	struct inpcb *inp;
945 	struct tcpcb *tp = NULL;
946 #ifdef INET
947 #ifdef INET6
948 	struct sockaddr_in sin;
949 #endif
950 	struct sockaddr_in *sinp;
951 #endif
952 #ifdef INET6
953 	int isipv6;
954 #endif
955 	u_int8_t incflagsav;
956 	u_char vflagsav;
957 	bool restoreflags;
958 	TCPDEBUG0;
959 
960 	/*
961 	 * We require the pcbinfo "read lock" if we will close the socket
962 	 * as part of this call.
963 	 */
964 	NET_EPOCH_ENTER(et);
965 	inp = sotoinpcb(so);
966 	KASSERT(inp != NULL, ("tcp_usr_send: inp == NULL"));
967 	INP_WLOCK(inp);
968 	vflagsav = inp->inp_vflag;
969 	incflagsav = inp->inp_inc.inc_flags;
970 	restoreflags = false;
971 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
972 		if (control)
973 			m_freem(control);
974 		/*
975 		 * In case of PRUS_NOTREADY, tcp_usr_ready() is responsible
976 		 * for freeing memory.
977 		 */
978 		if (m && (flags & PRUS_NOTREADY) == 0)
979 			m_freem(m);
980 		error = ECONNRESET;
981 		goto out;
982 	}
983 	tp = intotcpcb(inp);
984 	if (flags & PRUS_OOB) {
985 		if ((error = tcp_pru_options_support(tp, PRUS_OOB)) != 0) {
986 			if (control)
987 				m_freem(control);
988 			if (m && (flags & PRUS_NOTREADY) == 0)
989 				m_freem(m);
990 			goto out;
991 		}
992 	}
993 	TCPDEBUG1();
994 	if (nam != NULL && tp->t_state < TCPS_SYN_SENT) {
995 		switch (nam->sa_family) {
996 #ifdef INET
997 		case AF_INET:
998 			sinp = (struct sockaddr_in *)nam;
999 			if (sinp->sin_len != sizeof(struct sockaddr_in)) {
1000 				if (m)
1001 					m_freem(m);
1002 				error = EINVAL;
1003 				goto out;
1004 			}
1005 			if ((inp->inp_vflag & INP_IPV6) != 0) {
1006 				if (m)
1007 					m_freem(m);
1008 				error = EAFNOSUPPORT;
1009 				goto out;
1010 			}
1011 			if (IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
1012 				if (m)
1013 					m_freem(m);
1014 				error = EAFNOSUPPORT;
1015 				goto out;
1016 			}
1017 			if ((error = prison_remote_ip4(td->td_ucred,
1018 			    &sinp->sin_addr))) {
1019 				if (m)
1020 					m_freem(m);
1021 				goto out;
1022 			}
1023 #ifdef INET6
1024 			isipv6 = 0;
1025 #endif
1026 			break;
1027 #endif /* INET */
1028 #ifdef INET6
1029 		case AF_INET6:
1030 		{
1031 			struct sockaddr_in6 *sin6;
1032 
1033 			sin6 = (struct sockaddr_in6 *)nam;
1034 			if (sin6->sin6_len != sizeof(*sin6)) {
1035 				if (m)
1036 					m_freem(m);
1037 				error = EINVAL;
1038 				goto out;
1039 			}
1040 			if ((inp->inp_vflag & INP_IPV6PROTO) == 0) {
1041 				if (m != NULL)
1042 					m_freem(m);
1043 				error = EAFNOSUPPORT;
1044 				goto out;
1045 			}
1046 			if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
1047 				if (m)
1048 					m_freem(m);
1049 				error = EAFNOSUPPORT;
1050 				goto out;
1051 			}
1052 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
1053 #ifdef INET
1054 				if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) {
1055 					error = EINVAL;
1056 					if (m)
1057 						m_freem(m);
1058 					goto out;
1059 				}
1060 				if ((inp->inp_vflag & INP_IPV4) == 0) {
1061 					error = EAFNOSUPPORT;
1062 					if (m)
1063 						m_freem(m);
1064 					goto out;
1065 				}
1066 				restoreflags = true;
1067 				inp->inp_vflag &= ~INP_IPV6;
1068 				sinp = &sin;
1069 				in6_sin6_2_sin(sinp, sin6);
1070 				if (IN_MULTICAST(
1071 				    ntohl(sinp->sin_addr.s_addr))) {
1072 					error = EAFNOSUPPORT;
1073 					if (m)
1074 						m_freem(m);
1075 					goto out;
1076 				}
1077 				if ((error = prison_remote_ip4(td->td_ucred,
1078 				    &sinp->sin_addr))) {
1079 					if (m)
1080 						m_freem(m);
1081 					goto out;
1082 				}
1083 				isipv6 = 0;
1084 #else /* !INET */
1085 				error = EAFNOSUPPORT;
1086 				if (m)
1087 					m_freem(m);
1088 				goto out;
1089 #endif /* INET */
1090 			} else {
1091 				if ((inp->inp_vflag & INP_IPV6) == 0) {
1092 					if (m)
1093 						m_freem(m);
1094 					error = EAFNOSUPPORT;
1095 					goto out;
1096 				}
1097 				restoreflags = true;
1098 				inp->inp_vflag &= ~INP_IPV4;
1099 				inp->inp_inc.inc_flags |= INC_ISIPV6;
1100 				if ((error = prison_remote_ip6(td->td_ucred,
1101 				    &sin6->sin6_addr))) {
1102 					if (m)
1103 						m_freem(m);
1104 					goto out;
1105 				}
1106 				isipv6 = 1;
1107 			}
1108 			break;
1109 		}
1110 #endif /* INET6 */
1111 		default:
1112 			if (m)
1113 				m_freem(m);
1114 			error = EAFNOSUPPORT;
1115 			goto out;
1116 		}
1117 	}
1118 	if (control) {
1119 		/* TCP doesn't do control messages (rights, creds, etc) */
1120 		if (control->m_len) {
1121 			m_freem(control);
1122 			if (m)
1123 				m_freem(m);
1124 			error = EINVAL;
1125 			goto out;
1126 		}
1127 		m_freem(control);	/* empty control, just free it */
1128 	}
1129 	if (!(flags & PRUS_OOB)) {
1130 		sbappendstream(&so->so_snd, m, flags);
1131 		if (nam && tp->t_state < TCPS_SYN_SENT) {
1132 			/*
1133 			 * Do implied connect if not yet connected,
1134 			 * initialize window to default value, and
1135 			 * initialize maxseg using peer's cached MSS.
1136 			 */
1137 #ifdef INET6
1138 			if (isipv6)
1139 				error = tcp6_connect(tp, nam, td);
1140 #endif /* INET6 */
1141 #if defined(INET6) && defined(INET)
1142 			else
1143 #endif
1144 #ifdef INET
1145 				error = tcp_connect(tp,
1146 				    (struct sockaddr *)sinp, td);
1147 #endif
1148 			/*
1149 			 * The bind operation in tcp_connect succeeded. We
1150 			 * no longer want to restore the flags if later
1151 			 * operations fail.
1152 			 */
1153 			if (error == 0 || inp->inp_lport != 0)
1154 				restoreflags = false;
1155 
1156 			if (error)
1157 				goto out;
1158 			if (IS_FASTOPEN(tp->t_flags))
1159 				tcp_fastopen_connect(tp);
1160 			else {
1161 				tp->snd_wnd = TTCP_CLIENT_SND_WND;
1162 				tcp_mss(tp, -1);
1163 			}
1164 		}
1165 		if (flags & PRUS_EOF) {
1166 			/*
1167 			 * Close the send side of the connection after
1168 			 * the data is sent.
1169 			 */
1170 			socantsendmore(so);
1171 			tcp_usrclosed(tp);
1172 		}
1173 		if (!(inp->inp_flags & INP_DROPPED) &&
1174 		    !(flags & PRUS_NOTREADY)) {
1175 			if (flags & PRUS_MORETOCOME)
1176 				tp->t_flags |= TF_MORETOCOME;
1177 			error = tp->t_fb->tfb_tcp_output(tp);
1178 			if (flags & PRUS_MORETOCOME)
1179 				tp->t_flags &= ~TF_MORETOCOME;
1180 		}
1181 	} else {
1182 		/*
1183 		 * XXXRW: PRUS_EOF not implemented with PRUS_OOB?
1184 		 */
1185 		SOCKBUF_LOCK(&so->so_snd);
1186 		if (sbspace(&so->so_snd) < -512) {
1187 			SOCKBUF_UNLOCK(&so->so_snd);
1188 			m_freem(m);
1189 			error = ENOBUFS;
1190 			goto out;
1191 		}
1192 		/*
1193 		 * According to RFC961 (Assigned Protocols),
1194 		 * the urgent pointer points to the last octet
1195 		 * of urgent data.  We continue, however,
1196 		 * to consider it to indicate the first octet
1197 		 * of data past the urgent section.
1198 		 * Otherwise, snd_up should be one lower.
1199 		 */
1200 		sbappendstream_locked(&so->so_snd, m, flags);
1201 		SOCKBUF_UNLOCK(&so->so_snd);
1202 		if (nam && tp->t_state < TCPS_SYN_SENT) {
1203 			/*
1204 			 * Do implied connect if not yet connected,
1205 			 * initialize window to default value, and
1206 			 * initialize maxseg using peer's cached MSS.
1207 			 */
1208 
1209 			/*
1210 			 * Not going to contemplate SYN|URG
1211 			 */
1212 			if (IS_FASTOPEN(tp->t_flags))
1213 				tp->t_flags &= ~TF_FASTOPEN;
1214 #ifdef INET6
1215 			if (isipv6)
1216 				error = tcp6_connect(tp, nam, td);
1217 #endif /* INET6 */
1218 #if defined(INET6) && defined(INET)
1219 			else
1220 #endif
1221 #ifdef INET
1222 				error = tcp_connect(tp,
1223 				    (struct sockaddr *)sinp, td);
1224 #endif
1225 			/*
1226 			 * The bind operation in tcp_connect succeeded. We
1227 			 * no longer want to restore the flags if later
1228 			 * operations fail.
1229 			 */
1230 			if (error == 0 || inp->inp_lport != 0)
1231 				restoreflags = false;
1232 
1233 			if (error)
1234 				goto out;
1235 			tp->snd_wnd = TTCP_CLIENT_SND_WND;
1236 			tcp_mss(tp, -1);
1237 		}
1238 		tp->snd_up = tp->snd_una + sbavail(&so->so_snd);
1239 		if (!(flags & PRUS_NOTREADY)) {
1240 			tp->t_flags |= TF_FORCEDATA;
1241 			error = tp->t_fb->tfb_tcp_output(tp);
1242 			tp->t_flags &= ~TF_FORCEDATA;
1243 		}
1244 	}
1245 	TCP_LOG_EVENT(tp, NULL,
1246 	    &inp->inp_socket->so_rcv,
1247 	    &inp->inp_socket->so_snd,
1248 	    TCP_LOG_USERSEND, error,
1249 	    0, NULL, false);
1250 out:
1251 	/*
1252 	 * If the request was unsuccessful and we changed flags,
1253 	 * restore the original flags.
1254 	 */
1255 	if (error != 0 && restoreflags) {
1256 		inp->inp_vflag = vflagsav;
1257 		inp->inp_inc.inc_flags = incflagsav;
1258 	}
1259 	TCPDEBUG2((flags & PRUS_OOB) ? PRU_SENDOOB :
1260 		  ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND));
1261 	TCP_PROBE2(debug__user, tp, (flags & PRUS_OOB) ? PRU_SENDOOB :
1262 		   ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND));
1263 	INP_WUNLOCK(inp);
1264 	NET_EPOCH_EXIT(et);
1265 	return (error);
1266 }
1267 
1268 static int
1269 tcp_usr_ready(struct socket *so, struct mbuf *m, int count)
1270 {
1271 	struct epoch_tracker et;
1272 	struct inpcb *inp;
1273 	struct tcpcb *tp;
1274 	int error;
1275 
1276 	inp = sotoinpcb(so);
1277 	INP_WLOCK(inp);
1278 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1279 		INP_WUNLOCK(inp);
1280 		mb_free_notready(m, count);
1281 		return (ECONNRESET);
1282 	}
1283 	tp = intotcpcb(inp);
1284 
1285 	SOCKBUF_LOCK(&so->so_snd);
1286 	error = sbready(&so->so_snd, m, count);
1287 	SOCKBUF_UNLOCK(&so->so_snd);
1288 	if (error == 0) {
1289 		NET_EPOCH_ENTER(et);
1290 		error = tp->t_fb->tfb_tcp_output(tp);
1291 		NET_EPOCH_EXIT(et);
1292 	}
1293 	INP_WUNLOCK(inp);
1294 
1295 	return (error);
1296 }
1297 
1298 /*
1299  * Abort the TCP.  Drop the connection abruptly.
1300  */
1301 static void
1302 tcp_usr_abort(struct socket *so)
1303 {
1304 	struct inpcb *inp;
1305 	struct tcpcb *tp = NULL;
1306 	struct epoch_tracker et;
1307 	TCPDEBUG0;
1308 
1309 	inp = sotoinpcb(so);
1310 	KASSERT(inp != NULL, ("tcp_usr_abort: inp == NULL"));
1311 
1312 	NET_EPOCH_ENTER(et);
1313 	INP_WLOCK(inp);
1314 	KASSERT(inp->inp_socket != NULL,
1315 	    ("tcp_usr_abort: inp_socket == NULL"));
1316 
1317 	/*
1318 	 * If we still have full TCP state, and we're not dropped, drop.
1319 	 */
1320 	if (!(inp->inp_flags & INP_TIMEWAIT) &&
1321 	    !(inp->inp_flags & INP_DROPPED)) {
1322 		tp = intotcpcb(inp);
1323 		TCPDEBUG1();
1324 		tp = tcp_drop(tp, ECONNABORTED);
1325 		if (tp == NULL)
1326 			goto dropped;
1327 		TCPDEBUG2(PRU_ABORT);
1328 		TCP_PROBE2(debug__user, tp, PRU_ABORT);
1329 	}
1330 	if (!(inp->inp_flags & INP_DROPPED)) {
1331 		SOCK_LOCK(so);
1332 		so->so_state |= SS_PROTOREF;
1333 		SOCK_UNLOCK(so);
1334 		inp->inp_flags |= INP_SOCKREF;
1335 	}
1336 	INP_WUNLOCK(inp);
1337 dropped:
1338 	NET_EPOCH_EXIT(et);
1339 }
1340 
1341 /*
1342  * TCP socket is closed.  Start friendly disconnect.
1343  */
1344 static void
1345 tcp_usr_close(struct socket *so)
1346 {
1347 	struct inpcb *inp;
1348 	struct tcpcb *tp = NULL;
1349 	struct epoch_tracker et;
1350 	TCPDEBUG0;
1351 
1352 	inp = sotoinpcb(so);
1353 	KASSERT(inp != NULL, ("tcp_usr_close: inp == NULL"));
1354 
1355 	NET_EPOCH_ENTER(et);
1356 	INP_WLOCK(inp);
1357 	KASSERT(inp->inp_socket != NULL,
1358 	    ("tcp_usr_close: inp_socket == NULL"));
1359 
1360 	/*
1361 	 * If we still have full TCP state, and we're not dropped, initiate
1362 	 * a disconnect.
1363 	 */
1364 	if (!(inp->inp_flags & INP_TIMEWAIT) &&
1365 	    !(inp->inp_flags & INP_DROPPED)) {
1366 		tp = intotcpcb(inp);
1367 		TCPDEBUG1();
1368 		tcp_disconnect(tp);
1369 		TCPDEBUG2(PRU_CLOSE);
1370 		TCP_PROBE2(debug__user, tp, PRU_CLOSE);
1371 	}
1372 	if (!(inp->inp_flags & INP_DROPPED)) {
1373 		SOCK_LOCK(so);
1374 		so->so_state |= SS_PROTOREF;
1375 		SOCK_UNLOCK(so);
1376 		inp->inp_flags |= INP_SOCKREF;
1377 	}
1378 	INP_WUNLOCK(inp);
1379 	NET_EPOCH_EXIT(et);
1380 }
1381 
1382 static int
1383 tcp_pru_options_support(struct tcpcb *tp, int flags)
1384 {
1385 	/*
1386 	 * If the specific TCP stack has a pru_options
1387 	 * specified then it does not always support
1388 	 * all the PRU_XX options and we must ask it.
1389 	 * If the function is not specified then all
1390 	 * of the PRU_XX options are supported.
1391 	 */
1392 	int ret = 0;
1393 
1394 	if (tp->t_fb->tfb_pru_options) {
1395 		ret = (*tp->t_fb->tfb_pru_options)(tp, flags);
1396 	}
1397 	return (ret);
1398 }
1399 
1400 /*
1401  * Receive out-of-band data.
1402  */
1403 static int
1404 tcp_usr_rcvoob(struct socket *so, struct mbuf *m, int flags)
1405 {
1406 	int error = 0;
1407 	struct inpcb *inp;
1408 	struct tcpcb *tp = NULL;
1409 
1410 	TCPDEBUG0;
1411 	inp = sotoinpcb(so);
1412 	KASSERT(inp != NULL, ("tcp_usr_rcvoob: inp == NULL"));
1413 	INP_WLOCK(inp);
1414 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1415 		error = ECONNRESET;
1416 		goto out;
1417 	}
1418 	tp = intotcpcb(inp);
1419 	error = tcp_pru_options_support(tp, PRUS_OOB);
1420 	if (error) {
1421 		goto out;
1422 	}
1423 	TCPDEBUG1();
1424 	if ((so->so_oobmark == 0 &&
1425 	     (so->so_rcv.sb_state & SBS_RCVATMARK) == 0) ||
1426 	    so->so_options & SO_OOBINLINE ||
1427 	    tp->t_oobflags & TCPOOB_HADDATA) {
1428 		error = EINVAL;
1429 		goto out;
1430 	}
1431 	if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) {
1432 		error = EWOULDBLOCK;
1433 		goto out;
1434 	}
1435 	m->m_len = 1;
1436 	*mtod(m, caddr_t) = tp->t_iobc;
1437 	if ((flags & MSG_PEEK) == 0)
1438 		tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA);
1439 
1440 out:
1441 	TCPDEBUG2(PRU_RCVOOB);
1442 	TCP_PROBE2(debug__user, tp, PRU_RCVOOB);
1443 	INP_WUNLOCK(inp);
1444 	return (error);
1445 }
1446 
1447 #ifdef INET
1448 struct pr_usrreqs tcp_usrreqs = {
1449 	.pru_abort =		tcp_usr_abort,
1450 	.pru_accept =		tcp_usr_accept,
1451 	.pru_attach =		tcp_usr_attach,
1452 	.pru_bind =		tcp_usr_bind,
1453 	.pru_connect =		tcp_usr_connect,
1454 	.pru_control =		in_control,
1455 	.pru_detach =		tcp_usr_detach,
1456 	.pru_disconnect =	tcp_usr_disconnect,
1457 	.pru_listen =		tcp_usr_listen,
1458 	.pru_peeraddr =		in_getpeeraddr,
1459 	.pru_rcvd =		tcp_usr_rcvd,
1460 	.pru_rcvoob =		tcp_usr_rcvoob,
1461 	.pru_send =		tcp_usr_send,
1462 	.pru_ready =		tcp_usr_ready,
1463 	.pru_shutdown =		tcp_usr_shutdown,
1464 	.pru_sockaddr =		in_getsockaddr,
1465 	.pru_sosetlabel =	in_pcbsosetlabel,
1466 	.pru_close =		tcp_usr_close,
1467 };
1468 #endif /* INET */
1469 
1470 #ifdef INET6
1471 struct pr_usrreqs tcp6_usrreqs = {
1472 	.pru_abort =		tcp_usr_abort,
1473 	.pru_accept =		tcp6_usr_accept,
1474 	.pru_attach =		tcp_usr_attach,
1475 	.pru_bind =		tcp6_usr_bind,
1476 	.pru_connect =		tcp6_usr_connect,
1477 	.pru_control =		in6_control,
1478 	.pru_detach =		tcp_usr_detach,
1479 	.pru_disconnect =	tcp_usr_disconnect,
1480 	.pru_listen =		tcp6_usr_listen,
1481 	.pru_peeraddr =		in6_mapped_peeraddr,
1482 	.pru_rcvd =		tcp_usr_rcvd,
1483 	.pru_rcvoob =		tcp_usr_rcvoob,
1484 	.pru_send =		tcp_usr_send,
1485 	.pru_ready =		tcp_usr_ready,
1486 	.pru_shutdown =		tcp_usr_shutdown,
1487 	.pru_sockaddr =		in6_mapped_sockaddr,
1488 	.pru_sosetlabel =	in_pcbsosetlabel,
1489 	.pru_close =		tcp_usr_close,
1490 };
1491 #endif /* INET6 */
1492 
1493 #ifdef INET
1494 /*
1495  * Common subroutine to open a TCP connection to remote host specified
1496  * by struct sockaddr_in in mbuf *nam.  Call in_pcbbind to assign a local
1497  * port number if needed.  Call in_pcbconnect_setup to do the routing and
1498  * to choose a local host address (interface).  If there is an existing
1499  * incarnation of the same connection in TIME-WAIT state and if the remote
1500  * host was sending CC options and if the connection duration was < MSL, then
1501  * truncate the previous TIME-WAIT state and proceed.
1502  * Initialize connection parameters and enter SYN-SENT state.
1503  */
1504 static int
1505 tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
1506 {
1507 	struct inpcb *inp = tp->t_inpcb, *oinp;
1508 	struct socket *so = inp->inp_socket;
1509 	struct in_addr laddr;
1510 	u_short lport;
1511 	int error;
1512 
1513 	NET_EPOCH_ASSERT();
1514 	INP_WLOCK_ASSERT(inp);
1515 	INP_HASH_WLOCK(&V_tcbinfo);
1516 
1517 	if (inp->inp_lport == 0) {
1518 		error = in_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
1519 		if (error)
1520 			goto out;
1521 	}
1522 
1523 	/*
1524 	 * Cannot simply call in_pcbconnect, because there might be an
1525 	 * earlier incarnation of this same connection still in
1526 	 * TIME_WAIT state, creating an ADDRINUSE error.
1527 	 */
1528 	laddr = inp->inp_laddr;
1529 	lport = inp->inp_lport;
1530 	error = in_pcbconnect_setup(inp, nam, &laddr.s_addr, &lport,
1531 	    &inp->inp_faddr.s_addr, &inp->inp_fport, &oinp, td->td_ucred);
1532 	if (error && oinp == NULL)
1533 		goto out;
1534 	if (oinp) {
1535 		error = EADDRINUSE;
1536 		goto out;
1537 	}
1538 	inp->inp_laddr = laddr;
1539 	in_pcbrehash(inp);
1540 	INP_HASH_WUNLOCK(&V_tcbinfo);
1541 
1542 	/*
1543 	 * Compute window scaling to request:
1544 	 * Scale to fit into sweet spot.  See tcp_syncache.c.
1545 	 * XXX: This should move to tcp_output().
1546 	 */
1547 	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
1548 	    (TCP_MAXWIN << tp->request_r_scale) < sb_max)
1549 		tp->request_r_scale++;
1550 
1551 	soisconnecting(so);
1552 	TCPSTAT_INC(tcps_connattempt);
1553 	tcp_state_change(tp, TCPS_SYN_SENT);
1554 	tp->iss = tcp_new_isn(&inp->inp_inc);
1555 	if (tp->t_flags & TF_REQ_TSTMP)
1556 		tp->ts_offset = tcp_new_ts_offset(&inp->inp_inc);
1557 	tcp_sendseqinit(tp);
1558 
1559 	return 0;
1560 
1561 out:
1562 	INP_HASH_WUNLOCK(&V_tcbinfo);
1563 	return (error);
1564 }
1565 #endif /* INET */
1566 
1567 #ifdef INET6
1568 static int
1569 tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
1570 {
1571 	struct inpcb *inp = tp->t_inpcb;
1572 	int error;
1573 
1574 	INP_WLOCK_ASSERT(inp);
1575 	INP_HASH_WLOCK(&V_tcbinfo);
1576 
1577 	if (inp->inp_lport == 0) {
1578 		error = in6_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
1579 		if (error)
1580 			goto out;
1581 	}
1582 	error = in6_pcbconnect(inp, nam, td->td_ucred);
1583 	if (error != 0)
1584 		goto out;
1585 	INP_HASH_WUNLOCK(&V_tcbinfo);
1586 
1587 	/* Compute window scaling to request.  */
1588 	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
1589 	    (TCP_MAXWIN << tp->request_r_scale) < sb_max)
1590 		tp->request_r_scale++;
1591 
1592 	soisconnecting(inp->inp_socket);
1593 	TCPSTAT_INC(tcps_connattempt);
1594 	tcp_state_change(tp, TCPS_SYN_SENT);
1595 	tp->iss = tcp_new_isn(&inp->inp_inc);
1596 	if (tp->t_flags & TF_REQ_TSTMP)
1597 		tp->ts_offset = tcp_new_ts_offset(&inp->inp_inc);
1598 	tcp_sendseqinit(tp);
1599 
1600 	return 0;
1601 
1602 out:
1603 	INP_HASH_WUNLOCK(&V_tcbinfo);
1604 	return error;
1605 }
1606 #endif /* INET6 */
1607 
1608 /*
1609  * Export TCP internal state information via a struct tcp_info, based on the
1610  * Linux 2.6 API.  Not ABI compatible as our constants are mapped differently
1611  * (TCP state machine, etc).  We export all information using FreeBSD-native
1612  * constants -- for example, the numeric values for tcpi_state will differ
1613  * from Linux.
1614  */
1615 static void
1616 tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti)
1617 {
1618 
1619 	INP_WLOCK_ASSERT(tp->t_inpcb);
1620 	bzero(ti, sizeof(*ti));
1621 
1622 	ti->tcpi_state = tp->t_state;
1623 	if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
1624 		ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1625 	if (tp->t_flags & TF_SACK_PERMIT)
1626 		ti->tcpi_options |= TCPI_OPT_SACK;
1627 	if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
1628 		ti->tcpi_options |= TCPI_OPT_WSCALE;
1629 		ti->tcpi_snd_wscale = tp->snd_scale;
1630 		ti->tcpi_rcv_wscale = tp->rcv_scale;
1631 	}
1632 	if (tp->t_flags2 & TF2_ECN_PERMIT)
1633 		ti->tcpi_options |= TCPI_OPT_ECN;
1634 
1635 	ti->tcpi_rto = tp->t_rxtcur * tick;
1636 	ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
1637 	ti->tcpi_rtt = ((u_int64_t)tp->t_srtt * tick) >> TCP_RTT_SHIFT;
1638 	ti->tcpi_rttvar = ((u_int64_t)tp->t_rttvar * tick) >> TCP_RTTVAR_SHIFT;
1639 
1640 	ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
1641 	ti->tcpi_snd_cwnd = tp->snd_cwnd;
1642 
1643 	/*
1644 	 * FreeBSD-specific extension fields for tcp_info.
1645 	 */
1646 	ti->tcpi_rcv_space = tp->rcv_wnd;
1647 	ti->tcpi_rcv_nxt = tp->rcv_nxt;
1648 	ti->tcpi_snd_wnd = tp->snd_wnd;
1649 	ti->tcpi_snd_bwnd = 0;		/* Unused, kept for compat. */
1650 	ti->tcpi_snd_nxt = tp->snd_nxt;
1651 	ti->tcpi_snd_mss = tp->t_maxseg;
1652 	ti->tcpi_rcv_mss = tp->t_maxseg;
1653 	ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
1654 	ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
1655 	ti->tcpi_snd_zerowin = tp->t_sndzerowin;
1656 #ifdef TCP_OFFLOAD
1657 	if (tp->t_flags & TF_TOE) {
1658 		ti->tcpi_options |= TCPI_OPT_TOE;
1659 		tcp_offload_tcp_info(tp, ti);
1660 	}
1661 #endif
1662 }
1663 
1664 /*
1665  * tcp_ctloutput() must drop the inpcb lock before performing copyin on
1666  * socket option arguments.  When it re-acquires the lock after the copy, it
1667  * has to revalidate that the connection is still valid for the socket
1668  * option.
1669  */
1670 #define INP_WLOCK_RECHECK_CLEANUP(inp, cleanup) do {			\
1671 	INP_WLOCK(inp);							\
1672 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {		\
1673 		INP_WUNLOCK(inp);					\
1674 		cleanup;						\
1675 		return (ECONNRESET);					\
1676 	}								\
1677 	tp = intotcpcb(inp);						\
1678 } while(0)
1679 #define INP_WLOCK_RECHECK(inp) INP_WLOCK_RECHECK_CLEANUP((inp), /* noop */)
1680 
1681 int
1682 tcp_ctloutput(struct socket *so, struct sockopt *sopt)
1683 {
1684 	int	error;
1685 	struct	inpcb *inp;
1686 	struct	tcpcb *tp;
1687 	struct tcp_function_block *blk;
1688 	struct tcp_function_set fsn;
1689 
1690 	error = 0;
1691 	inp = sotoinpcb(so);
1692 	KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
1693 	if (sopt->sopt_level != IPPROTO_TCP) {
1694 #ifdef INET6
1695 		if (inp->inp_vflag & INP_IPV6PROTO) {
1696 			error = ip6_ctloutput(so, sopt);
1697 			/*
1698 			 * In case of the IPV6_USE_MIN_MTU socket option,
1699 			 * the INC_IPV6MINMTU flag to announce a corresponding
1700 			 * MSS during the initial handshake.
1701 			 * If the TCP connection is not in the front states,
1702 			 * just reduce the MSS being used.
1703 			 * This avoids the sending of TCP segments which will
1704 			 * be fragmented at the IPv6 layer.
1705 			 */
1706 			if ((error == 0) &&
1707 			    (sopt->sopt_dir == SOPT_SET) &&
1708 			    (sopt->sopt_level == IPPROTO_IPV6) &&
1709 			    (sopt->sopt_name == IPV6_USE_MIN_MTU)) {
1710 				INP_WLOCK(inp);
1711 				if ((inp->inp_flags &
1712 				    (INP_TIMEWAIT | INP_DROPPED))) {
1713 					INP_WUNLOCK(inp);
1714 					return (ECONNRESET);
1715 				}
1716 				inp->inp_inc.inc_flags |= INC_IPV6MINMTU;
1717 				tp = intotcpcb(inp);
1718 				if ((tp->t_state >= TCPS_SYN_SENT) &&
1719 				    (inp->inp_inc.inc_flags & INC_ISIPV6)) {
1720 					struct ip6_pktopts *opt;
1721 
1722 					opt = inp->in6p_outputopts;
1723 					if ((opt != NULL) &&
1724 					    (opt->ip6po_minmtu ==
1725 					    IP6PO_MINMTU_ALL)) {
1726 						if (tp->t_maxseg > TCP6_MSS) {
1727 							tp->t_maxseg = TCP6_MSS;
1728 						}
1729 					}
1730 				}
1731 				INP_WUNLOCK(inp);
1732 			}
1733 		}
1734 #endif /* INET6 */
1735 #if defined(INET6) && defined(INET)
1736 		else
1737 #endif
1738 #ifdef INET
1739 		{
1740 			error = ip_ctloutput(so, sopt);
1741 		}
1742 #endif
1743 		return (error);
1744 	}
1745 	INP_WLOCK(inp);
1746 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1747 		INP_WUNLOCK(inp);
1748 		return (ECONNRESET);
1749 	}
1750 	tp = intotcpcb(inp);
1751 	/*
1752 	 * Protect the TCP option TCP_FUNCTION_BLK so
1753 	 * that a sub-function can *never* overwrite this.
1754 	 */
1755 	if ((sopt->sopt_dir == SOPT_SET) &&
1756 	    (sopt->sopt_name == TCP_FUNCTION_BLK)) {
1757 		INP_WUNLOCK(inp);
1758 		error = sooptcopyin(sopt, &fsn, sizeof fsn,
1759 		    sizeof fsn);
1760 		if (error)
1761 			return (error);
1762 		INP_WLOCK_RECHECK(inp);
1763 		blk = find_and_ref_tcp_functions(&fsn);
1764 		if (blk == NULL) {
1765 			INP_WUNLOCK(inp);
1766 			return (ENOENT);
1767 		}
1768 		if (tp->t_fb == blk) {
1769 			/* You already have this */
1770 			refcount_release(&blk->tfb_refcnt);
1771 			INP_WUNLOCK(inp);
1772 			return (0);
1773 		}
1774 		if (tp->t_state != TCPS_CLOSED) {
1775 			/*
1776 			 * The user has advanced the state
1777 			 * past the initial point, we may not
1778 			 * be able to switch.
1779 			 */
1780 			if (blk->tfb_tcp_handoff_ok != NULL) {
1781 				/*
1782 				 * Does the stack provide a
1783 				 * query mechanism, if so it may
1784 				 * still be possible?
1785 				 */
1786 				error = (*blk->tfb_tcp_handoff_ok)(tp);
1787 			} else
1788 				error = EINVAL;
1789 			if (error) {
1790 				refcount_release(&blk->tfb_refcnt);
1791 				INP_WUNLOCK(inp);
1792 				return(error);
1793 			}
1794 		}
1795 		if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) {
1796 			refcount_release(&blk->tfb_refcnt);
1797 			INP_WUNLOCK(inp);
1798 			return (ENOENT);
1799 		}
1800 		/*
1801 		 * Release the old refcnt, the
1802 		 * lookup acquired a ref on the
1803 		 * new one already.
1804 		 */
1805 		if (tp->t_fb->tfb_tcp_fb_fini) {
1806 			/*
1807 			 * Tell the stack to cleanup with 0 i.e.
1808 			 * the tcb is not going away.
1809 			 */
1810 			(*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
1811 		}
1812 #ifdef TCPHPTS
1813 		/* Assure that we are not on any hpts */
1814 		tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_ALL);
1815 #endif
1816 		if (blk->tfb_tcp_fb_init) {
1817 			error = (*blk->tfb_tcp_fb_init)(tp);
1818 			if (error) {
1819 				refcount_release(&blk->tfb_refcnt);
1820 				if (tp->t_fb->tfb_tcp_fb_init) {
1821 					if((*tp->t_fb->tfb_tcp_fb_init)(tp) != 0)  {
1822 						/* Fall back failed, drop the connection */
1823 						INP_WUNLOCK(inp);
1824 						soabort(so);
1825 						return(error);
1826 					}
1827 				}
1828 				goto err_out;
1829 			}
1830 		}
1831 		refcount_release(&tp->t_fb->tfb_refcnt);
1832 		tp->t_fb = blk;
1833 #ifdef TCP_OFFLOAD
1834 		if (tp->t_flags & TF_TOE) {
1835 			tcp_offload_ctloutput(tp, sopt->sopt_dir,
1836 			     sopt->sopt_name);
1837 		}
1838 #endif
1839 err_out:
1840 		INP_WUNLOCK(inp);
1841 		return (error);
1842 	} else if ((sopt->sopt_dir == SOPT_GET) &&
1843 	    (sopt->sopt_name == TCP_FUNCTION_BLK)) {
1844 		strncpy(fsn.function_set_name, tp->t_fb->tfb_tcp_block_name,
1845 		    TCP_FUNCTION_NAME_LEN_MAX);
1846 		fsn.function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0';
1847 		fsn.pcbcnt = tp->t_fb->tfb_refcnt;
1848 		INP_WUNLOCK(inp);
1849 		error = sooptcopyout(sopt, &fsn, sizeof fsn);
1850 		return (error);
1851 	}
1852 	/* Pass in the INP locked, called must unlock it */
1853 	return (tp->t_fb->tfb_tcp_ctloutput(so, sopt, inp, tp));
1854 }
1855 
1856 /*
1857  * If this assert becomes untrue, we need to change the size of the buf
1858  * variable in tcp_default_ctloutput().
1859  */
1860 #ifdef CTASSERT
1861 CTASSERT(TCP_CA_NAME_MAX <= TCP_LOG_ID_LEN);
1862 CTASSERT(TCP_LOG_REASON_LEN <= TCP_LOG_ID_LEN);
1863 #endif
1864 
1865 #ifdef KERN_TLS
1866 static int
1867 copyin_tls_enable(struct sockopt *sopt, struct tls_enable *tls)
1868 {
1869 	struct tls_enable_v0 tls_v0;
1870 	int error;
1871 
1872 	if (sopt->sopt_valsize == sizeof(tls_v0)) {
1873 		error = sooptcopyin(sopt, &tls_v0, sizeof(tls_v0),
1874 		    sizeof(tls_v0));
1875 		if (error)
1876 			return (error);
1877 		memset(tls, 0, sizeof(*tls));
1878 		tls->cipher_key = tls_v0.cipher_key;
1879 		tls->iv = tls_v0.iv;
1880 		tls->auth_key = tls_v0.auth_key;
1881 		tls->cipher_algorithm = tls_v0.cipher_algorithm;
1882 		tls->cipher_key_len = tls_v0.cipher_key_len;
1883 		tls->iv_len = tls_v0.iv_len;
1884 		tls->auth_algorithm = tls_v0.auth_algorithm;
1885 		tls->auth_key_len = tls_v0.auth_key_len;
1886 		tls->flags = tls_v0.flags;
1887 		tls->tls_vmajor = tls_v0.tls_vmajor;
1888 		tls->tls_vminor = tls_v0.tls_vminor;
1889 		return (0);
1890 	}
1891 
1892 	return (sooptcopyin(sopt, tls, sizeof(*tls), sizeof(*tls)));
1893 }
1894 #endif
1895 
1896 int
1897 tcp_default_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
1898 {
1899 	int	error, opt, optval;
1900 	u_int	ui;
1901 	struct	tcp_info ti;
1902 #ifdef KERN_TLS
1903 	struct tls_enable tls;
1904 #endif
1905 	struct cc_algo *algo;
1906 	char	*pbuf, buf[TCP_LOG_ID_LEN];
1907 #ifdef STATS
1908 	struct statsblob *sbp;
1909 #endif
1910 	size_t	len;
1911 
1912 	/*
1913 	 * For TCP_CCALGOOPT forward the control to CC module, for both
1914 	 * SOPT_SET and SOPT_GET.
1915 	 */
1916 	switch (sopt->sopt_name) {
1917 	case TCP_CCALGOOPT:
1918 		INP_WUNLOCK(inp);
1919 		if (sopt->sopt_valsize > CC_ALGOOPT_LIMIT)
1920 			return (EINVAL);
1921 		pbuf = malloc(sopt->sopt_valsize, M_TEMP, M_WAITOK | M_ZERO);
1922 		error = sooptcopyin(sopt, pbuf, sopt->sopt_valsize,
1923 		    sopt->sopt_valsize);
1924 		if (error) {
1925 			free(pbuf, M_TEMP);
1926 			return (error);
1927 		}
1928 		INP_WLOCK_RECHECK_CLEANUP(inp, free(pbuf, M_TEMP));
1929 		if (CC_ALGO(tp)->ctl_output != NULL)
1930 			error = CC_ALGO(tp)->ctl_output(tp->ccv, sopt, pbuf);
1931 		else
1932 			error = ENOENT;
1933 		INP_WUNLOCK(inp);
1934 		if (error == 0 && sopt->sopt_dir == SOPT_GET)
1935 			error = sooptcopyout(sopt, pbuf, sopt->sopt_valsize);
1936 		free(pbuf, M_TEMP);
1937 		return (error);
1938 	}
1939 
1940 	switch (sopt->sopt_dir) {
1941 	case SOPT_SET:
1942 		switch (sopt->sopt_name) {
1943 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1944 		case TCP_MD5SIG:
1945 			if (!TCPMD5_ENABLED()) {
1946 				INP_WUNLOCK(inp);
1947 				return (ENOPROTOOPT);
1948 			}
1949 			error = TCPMD5_PCBCTL(inp, sopt);
1950 			if (error)
1951 				return (error);
1952 			goto unlock_and_done;
1953 #endif /* IPSEC */
1954 
1955 		case TCP_NODELAY:
1956 		case TCP_NOOPT:
1957 			INP_WUNLOCK(inp);
1958 			error = sooptcopyin(sopt, &optval, sizeof optval,
1959 			    sizeof optval);
1960 			if (error)
1961 				return (error);
1962 
1963 			INP_WLOCK_RECHECK(inp);
1964 			switch (sopt->sopt_name) {
1965 			case TCP_NODELAY:
1966 				opt = TF_NODELAY;
1967 				break;
1968 			case TCP_NOOPT:
1969 				opt = TF_NOOPT;
1970 				break;
1971 			default:
1972 				opt = 0; /* dead code to fool gcc */
1973 				break;
1974 			}
1975 
1976 			if (optval)
1977 				tp->t_flags |= opt;
1978 			else
1979 				tp->t_flags &= ~opt;
1980 unlock_and_done:
1981 #ifdef TCP_OFFLOAD
1982 			if (tp->t_flags & TF_TOE) {
1983 				tcp_offload_ctloutput(tp, sopt->sopt_dir,
1984 				    sopt->sopt_name);
1985 			}
1986 #endif
1987 			INP_WUNLOCK(inp);
1988 			break;
1989 
1990 		case TCP_NOPUSH:
1991 			INP_WUNLOCK(inp);
1992 			error = sooptcopyin(sopt, &optval, sizeof optval,
1993 			    sizeof optval);
1994 			if (error)
1995 				return (error);
1996 
1997 			INP_WLOCK_RECHECK(inp);
1998 			if (optval)
1999 				tp->t_flags |= TF_NOPUSH;
2000 			else if (tp->t_flags & TF_NOPUSH) {
2001 				tp->t_flags &= ~TF_NOPUSH;
2002 				if (TCPS_HAVEESTABLISHED(tp->t_state)) {
2003 					struct epoch_tracker et;
2004 
2005 					NET_EPOCH_ENTER(et);
2006 					error = tp->t_fb->tfb_tcp_output(tp);
2007 					NET_EPOCH_EXIT(et);
2008 				}
2009 			}
2010 			goto unlock_and_done;
2011 
2012 		case TCP_MAXSEG:
2013 			INP_WUNLOCK(inp);
2014 			error = sooptcopyin(sopt, &optval, sizeof optval,
2015 			    sizeof optval);
2016 			if (error)
2017 				return (error);
2018 
2019 			INP_WLOCK_RECHECK(inp);
2020 			if (optval > 0 && optval <= tp->t_maxseg &&
2021 			    optval + 40 >= V_tcp_minmss)
2022 				tp->t_maxseg = optval;
2023 			else
2024 				error = EINVAL;
2025 			goto unlock_and_done;
2026 
2027 		case TCP_INFO:
2028 			INP_WUNLOCK(inp);
2029 			error = EINVAL;
2030 			break;
2031 
2032 		case TCP_STATS:
2033 			INP_WUNLOCK(inp);
2034 #ifdef STATS
2035 			error = sooptcopyin(sopt, &optval, sizeof optval,
2036 			    sizeof optval);
2037 			if (error)
2038 				return (error);
2039 
2040 			if (optval > 0)
2041 				sbp = stats_blob_alloc(
2042 				    V_tcp_perconn_stats_dflt_tpl, 0);
2043 			else
2044 				sbp = NULL;
2045 
2046 			INP_WLOCK_RECHECK(inp);
2047 			if ((tp->t_stats != NULL && sbp == NULL) ||
2048 			    (tp->t_stats == NULL && sbp != NULL)) {
2049 				struct statsblob *t = tp->t_stats;
2050 				tp->t_stats = sbp;
2051 				sbp = t;
2052 			}
2053 			INP_WUNLOCK(inp);
2054 
2055 			stats_blob_destroy(sbp);
2056 #else
2057 			return (EOPNOTSUPP);
2058 #endif /* !STATS */
2059 			break;
2060 
2061 		case TCP_CONGESTION:
2062 			INP_WUNLOCK(inp);
2063 			error = sooptcopyin(sopt, buf, TCP_CA_NAME_MAX - 1, 1);
2064 			if (error)
2065 				break;
2066 			buf[sopt->sopt_valsize] = '\0';
2067 			INP_WLOCK_RECHECK(inp);
2068 			CC_LIST_RLOCK();
2069 			STAILQ_FOREACH(algo, &cc_list, entries)
2070 				if (strncmp(buf, algo->name,
2071 				    TCP_CA_NAME_MAX) == 0)
2072 					break;
2073 			CC_LIST_RUNLOCK();
2074 			if (algo == NULL) {
2075 				INP_WUNLOCK(inp);
2076 				error = EINVAL;
2077 				break;
2078 			}
2079 			/*
2080 			 * We hold a write lock over the tcb so it's safe to
2081 			 * do these things without ordering concerns.
2082 			 */
2083 			if (CC_ALGO(tp)->cb_destroy != NULL)
2084 				CC_ALGO(tp)->cb_destroy(tp->ccv);
2085 			CC_DATA(tp) = NULL;
2086 			CC_ALGO(tp) = algo;
2087 			/*
2088 			 * If something goes pear shaped initialising the new
2089 			 * algo, fall back to newreno (which does not
2090 			 * require initialisation).
2091 			 */
2092 			if (algo->cb_init != NULL &&
2093 			    algo->cb_init(tp->ccv) != 0) {
2094 				CC_ALGO(tp) = &newreno_cc_algo;
2095 				/*
2096 				 * The only reason init should fail is
2097 				 * because of malloc.
2098 				 */
2099 				error = ENOMEM;
2100 			}
2101 			INP_WUNLOCK(inp);
2102 			break;
2103 
2104 #ifdef KERN_TLS
2105 		case TCP_TXTLS_ENABLE:
2106 			INP_WUNLOCK(inp);
2107 			error = copyin_tls_enable(sopt, &tls);
2108 			if (error)
2109 				break;
2110 			error = ktls_enable_tx(so, &tls);
2111 			break;
2112 		case TCP_TXTLS_MODE:
2113 			INP_WUNLOCK(inp);
2114 			error = sooptcopyin(sopt, &ui, sizeof(ui), sizeof(ui));
2115 			if (error)
2116 				return (error);
2117 
2118 			INP_WLOCK_RECHECK(inp);
2119 			error = ktls_set_tx_mode(so, ui);
2120 			INP_WUNLOCK(inp);
2121 			break;
2122 		case TCP_RXTLS_ENABLE:
2123 			INP_WUNLOCK(inp);
2124 			error = sooptcopyin(sopt, &tls, sizeof(tls),
2125 			    sizeof(tls));
2126 			if (error)
2127 				break;
2128 			error = ktls_enable_rx(so, &tls);
2129 			break;
2130 #endif
2131 
2132 		case TCP_KEEPIDLE:
2133 		case TCP_KEEPINTVL:
2134 		case TCP_KEEPINIT:
2135 			INP_WUNLOCK(inp);
2136 			error = sooptcopyin(sopt, &ui, sizeof(ui), sizeof(ui));
2137 			if (error)
2138 				return (error);
2139 
2140 			if (ui > (UINT_MAX / hz)) {
2141 				error = EINVAL;
2142 				break;
2143 			}
2144 			ui *= hz;
2145 
2146 			INP_WLOCK_RECHECK(inp);
2147 			switch (sopt->sopt_name) {
2148 			case TCP_KEEPIDLE:
2149 				tp->t_keepidle = ui;
2150 				/*
2151 				 * XXX: better check current remaining
2152 				 * timeout and "merge" it with new value.
2153 				 */
2154 				if ((tp->t_state > TCPS_LISTEN) &&
2155 				    (tp->t_state <= TCPS_CLOSING))
2156 					tcp_timer_activate(tp, TT_KEEP,
2157 					    TP_KEEPIDLE(tp));
2158 				break;
2159 			case TCP_KEEPINTVL:
2160 				tp->t_keepintvl = ui;
2161 				if ((tp->t_state == TCPS_FIN_WAIT_2) &&
2162 				    (TP_MAXIDLE(tp) > 0))
2163 					tcp_timer_activate(tp, TT_2MSL,
2164 					    TP_MAXIDLE(tp));
2165 				break;
2166 			case TCP_KEEPINIT:
2167 				tp->t_keepinit = ui;
2168 				if (tp->t_state == TCPS_SYN_RECEIVED ||
2169 				    tp->t_state == TCPS_SYN_SENT)
2170 					tcp_timer_activate(tp, TT_KEEP,
2171 					    TP_KEEPINIT(tp));
2172 				break;
2173 			}
2174 			goto unlock_and_done;
2175 
2176 		case TCP_KEEPCNT:
2177 			INP_WUNLOCK(inp);
2178 			error = sooptcopyin(sopt, &ui, sizeof(ui), sizeof(ui));
2179 			if (error)
2180 				return (error);
2181 
2182 			INP_WLOCK_RECHECK(inp);
2183 			tp->t_keepcnt = ui;
2184 			if ((tp->t_state == TCPS_FIN_WAIT_2) &&
2185 			    (TP_MAXIDLE(tp) > 0))
2186 				tcp_timer_activate(tp, TT_2MSL,
2187 				    TP_MAXIDLE(tp));
2188 			goto unlock_and_done;
2189 
2190 #ifdef TCPPCAP
2191 		case TCP_PCAP_OUT:
2192 		case TCP_PCAP_IN:
2193 			INP_WUNLOCK(inp);
2194 			error = sooptcopyin(sopt, &optval, sizeof optval,
2195 			    sizeof optval);
2196 			if (error)
2197 				return (error);
2198 
2199 			INP_WLOCK_RECHECK(inp);
2200 			if (optval >= 0)
2201 				tcp_pcap_set_sock_max(TCP_PCAP_OUT ?
2202 					&(tp->t_outpkts) : &(tp->t_inpkts),
2203 					optval);
2204 			else
2205 				error = EINVAL;
2206 			goto unlock_and_done;
2207 #endif
2208 
2209 		case TCP_FASTOPEN: {
2210 			struct tcp_fastopen tfo_optval;
2211 
2212 			INP_WUNLOCK(inp);
2213 			if (!V_tcp_fastopen_client_enable &&
2214 			    !V_tcp_fastopen_server_enable)
2215 				return (EPERM);
2216 
2217 			error = sooptcopyin(sopt, &tfo_optval,
2218 				    sizeof(tfo_optval), sizeof(int));
2219 			if (error)
2220 				return (error);
2221 
2222 			INP_WLOCK_RECHECK(inp);
2223 			if (tfo_optval.enable) {
2224 				if (tp->t_state == TCPS_LISTEN) {
2225 					if (!V_tcp_fastopen_server_enable) {
2226 						error = EPERM;
2227 						goto unlock_and_done;
2228 					}
2229 
2230 					tp->t_flags |= TF_FASTOPEN;
2231 					if (tp->t_tfo_pending == NULL)
2232 						tp->t_tfo_pending =
2233 						    tcp_fastopen_alloc_counter();
2234 				} else {
2235 					/*
2236 					 * If a pre-shared key was provided,
2237 					 * stash it in the client cookie
2238 					 * field of the tcpcb for use during
2239 					 * connect.
2240 					 */
2241 					if (sopt->sopt_valsize ==
2242 					    sizeof(tfo_optval)) {
2243 						memcpy(tp->t_tfo_cookie.client,
2244 						       tfo_optval.psk,
2245 						       TCP_FASTOPEN_PSK_LEN);
2246 						tp->t_tfo_client_cookie_len =
2247 						    TCP_FASTOPEN_PSK_LEN;
2248 					}
2249 					tp->t_flags |= TF_FASTOPEN;
2250 				}
2251 			} else
2252 				tp->t_flags &= ~TF_FASTOPEN;
2253 			goto unlock_and_done;
2254 		}
2255 
2256 #ifdef TCP_BLACKBOX
2257 		case TCP_LOG:
2258 			INP_WUNLOCK(inp);
2259 			error = sooptcopyin(sopt, &optval, sizeof optval,
2260 			    sizeof optval);
2261 			if (error)
2262 				return (error);
2263 
2264 			INP_WLOCK_RECHECK(inp);
2265 			error = tcp_log_state_change(tp, optval);
2266 			goto unlock_and_done;
2267 
2268 		case TCP_LOGBUF:
2269 			INP_WUNLOCK(inp);
2270 			error = EINVAL;
2271 			break;
2272 
2273 		case TCP_LOGID:
2274 			INP_WUNLOCK(inp);
2275 			error = sooptcopyin(sopt, buf, TCP_LOG_ID_LEN - 1, 0);
2276 			if (error)
2277 				break;
2278 			buf[sopt->sopt_valsize] = '\0';
2279 			INP_WLOCK_RECHECK(inp);
2280 			error = tcp_log_set_id(tp, buf);
2281 			/* tcp_log_set_id() unlocks the INP. */
2282 			break;
2283 
2284 		case TCP_LOGDUMP:
2285 		case TCP_LOGDUMPID:
2286 			INP_WUNLOCK(inp);
2287 			error =
2288 			    sooptcopyin(sopt, buf, TCP_LOG_REASON_LEN - 1, 0);
2289 			if (error)
2290 				break;
2291 			buf[sopt->sopt_valsize] = '\0';
2292 			INP_WLOCK_RECHECK(inp);
2293 			if (sopt->sopt_name == TCP_LOGDUMP) {
2294 				error = tcp_log_dump_tp_logbuf(tp, buf,
2295 				    M_WAITOK, true);
2296 				INP_WUNLOCK(inp);
2297 			} else {
2298 				tcp_log_dump_tp_bucket_logbufs(tp, buf);
2299 				/*
2300 				 * tcp_log_dump_tp_bucket_logbufs() drops the
2301 				 * INP lock.
2302 				 */
2303 			}
2304 			break;
2305 #endif
2306 
2307 		default:
2308 			INP_WUNLOCK(inp);
2309 			error = ENOPROTOOPT;
2310 			break;
2311 		}
2312 		break;
2313 
2314 	case SOPT_GET:
2315 		tp = intotcpcb(inp);
2316 		switch (sopt->sopt_name) {
2317 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
2318 		case TCP_MD5SIG:
2319 			if (!TCPMD5_ENABLED()) {
2320 				INP_WUNLOCK(inp);
2321 				return (ENOPROTOOPT);
2322 			}
2323 			error = TCPMD5_PCBCTL(inp, sopt);
2324 			break;
2325 #endif
2326 
2327 		case TCP_NODELAY:
2328 			optval = tp->t_flags & TF_NODELAY;
2329 			INP_WUNLOCK(inp);
2330 			error = sooptcopyout(sopt, &optval, sizeof optval);
2331 			break;
2332 		case TCP_MAXSEG:
2333 			optval = tp->t_maxseg;
2334 			INP_WUNLOCK(inp);
2335 			error = sooptcopyout(sopt, &optval, sizeof optval);
2336 			break;
2337 		case TCP_NOOPT:
2338 			optval = tp->t_flags & TF_NOOPT;
2339 			INP_WUNLOCK(inp);
2340 			error = sooptcopyout(sopt, &optval, sizeof optval);
2341 			break;
2342 		case TCP_NOPUSH:
2343 			optval = tp->t_flags & TF_NOPUSH;
2344 			INP_WUNLOCK(inp);
2345 			error = sooptcopyout(sopt, &optval, sizeof optval);
2346 			break;
2347 		case TCP_INFO:
2348 			tcp_fill_info(tp, &ti);
2349 			INP_WUNLOCK(inp);
2350 			error = sooptcopyout(sopt, &ti, sizeof ti);
2351 			break;
2352 		case TCP_STATS:
2353 			{
2354 #ifdef STATS
2355 			int nheld;
2356 			TYPEOF_MEMBER(struct statsblob, flags) sbflags = 0;
2357 
2358 			error = 0;
2359 			socklen_t outsbsz = sopt->sopt_valsize;
2360 			if (tp->t_stats == NULL)
2361 				error = ENOENT;
2362 			else if (outsbsz >= tp->t_stats->cursz)
2363 				outsbsz = tp->t_stats->cursz;
2364 			else if (outsbsz >= sizeof(struct statsblob))
2365 				outsbsz = sizeof(struct statsblob);
2366 			else
2367 				error = EINVAL;
2368 			INP_WUNLOCK(inp);
2369 			if (error)
2370 				break;
2371 
2372 			sbp = sopt->sopt_val;
2373 			nheld = atop(round_page(((vm_offset_t)sbp) +
2374 			    (vm_size_t)outsbsz) - trunc_page((vm_offset_t)sbp));
2375 			vm_page_t ma[nheld];
2376 			if (vm_fault_quick_hold_pages(
2377 			    &curproc->p_vmspace->vm_map, (vm_offset_t)sbp,
2378 			    outsbsz, VM_PROT_READ | VM_PROT_WRITE, ma,
2379 			    nheld) < 0) {
2380 				error = EFAULT;
2381 				break;
2382 			}
2383 
2384 			if ((error = copyin_nofault(&(sbp->flags), &sbflags,
2385 			    SIZEOF_MEMBER(struct statsblob, flags))))
2386 				goto unhold;
2387 
2388 			INP_WLOCK_RECHECK(inp);
2389 			error = stats_blob_snapshot(&sbp, outsbsz, tp->t_stats,
2390 			    sbflags | SB_CLONE_USRDSTNOFAULT);
2391 			INP_WUNLOCK(inp);
2392 			sopt->sopt_valsize = outsbsz;
2393 unhold:
2394 			vm_page_unhold_pages(ma, nheld);
2395 #else
2396 			INP_WUNLOCK(inp);
2397 			error = EOPNOTSUPP;
2398 #endif /* !STATS */
2399 			break;
2400 			}
2401 		case TCP_CONGESTION:
2402 			len = strlcpy(buf, CC_ALGO(tp)->name, TCP_CA_NAME_MAX);
2403 			INP_WUNLOCK(inp);
2404 			error = sooptcopyout(sopt, buf, len + 1);
2405 			break;
2406 		case TCP_KEEPIDLE:
2407 		case TCP_KEEPINTVL:
2408 		case TCP_KEEPINIT:
2409 		case TCP_KEEPCNT:
2410 			switch (sopt->sopt_name) {
2411 			case TCP_KEEPIDLE:
2412 				ui = TP_KEEPIDLE(tp) / hz;
2413 				break;
2414 			case TCP_KEEPINTVL:
2415 				ui = TP_KEEPINTVL(tp) / hz;
2416 				break;
2417 			case TCP_KEEPINIT:
2418 				ui = TP_KEEPINIT(tp) / hz;
2419 				break;
2420 			case TCP_KEEPCNT:
2421 				ui = TP_KEEPCNT(tp);
2422 				break;
2423 			}
2424 			INP_WUNLOCK(inp);
2425 			error = sooptcopyout(sopt, &ui, sizeof(ui));
2426 			break;
2427 #ifdef TCPPCAP
2428 		case TCP_PCAP_OUT:
2429 		case TCP_PCAP_IN:
2430 			optval = tcp_pcap_get_sock_max(TCP_PCAP_OUT ?
2431 					&(tp->t_outpkts) : &(tp->t_inpkts));
2432 			INP_WUNLOCK(inp);
2433 			error = sooptcopyout(sopt, &optval, sizeof optval);
2434 			break;
2435 #endif
2436 		case TCP_FASTOPEN:
2437 			optval = tp->t_flags & TF_FASTOPEN;
2438 			INP_WUNLOCK(inp);
2439 			error = sooptcopyout(sopt, &optval, sizeof optval);
2440 			break;
2441 #ifdef TCP_BLACKBOX
2442 		case TCP_LOG:
2443 			optval = tp->t_logstate;
2444 			INP_WUNLOCK(inp);
2445 			error = sooptcopyout(sopt, &optval, sizeof(optval));
2446 			break;
2447 		case TCP_LOGBUF:
2448 			/* tcp_log_getlogbuf() does INP_WUNLOCK(inp) */
2449 			error = tcp_log_getlogbuf(sopt, tp);
2450 			break;
2451 		case TCP_LOGID:
2452 			len = tcp_log_get_id(tp, buf);
2453 			INP_WUNLOCK(inp);
2454 			error = sooptcopyout(sopt, buf, len + 1);
2455 			break;
2456 		case TCP_LOGDUMP:
2457 		case TCP_LOGDUMPID:
2458 			INP_WUNLOCK(inp);
2459 			error = EINVAL;
2460 			break;
2461 #endif
2462 #ifdef KERN_TLS
2463 		case TCP_TXTLS_MODE:
2464 			optval = ktls_get_tx_mode(so);
2465 			INP_WUNLOCK(inp);
2466 			error = sooptcopyout(sopt, &optval, sizeof(optval));
2467 			break;
2468 		case TCP_RXTLS_MODE:
2469 			optval = ktls_get_rx_mode(so);
2470 			INP_WUNLOCK(inp);
2471 			error = sooptcopyout(sopt, &optval, sizeof(optval));
2472 			break;
2473 #endif
2474 		default:
2475 			INP_WUNLOCK(inp);
2476 			error = ENOPROTOOPT;
2477 			break;
2478 		}
2479 		break;
2480 	}
2481 	return (error);
2482 }
2483 #undef INP_WLOCK_RECHECK
2484 #undef INP_WLOCK_RECHECK_CLEANUP
2485 
2486 /*
2487  * Initiate (or continue) disconnect.
2488  * If embryonic state, just send reset (once).
2489  * If in ``let data drain'' option and linger null, just drop.
2490  * Otherwise (hard), mark socket disconnecting and drop
2491  * current input data; switch states based on user close, and
2492  * send segment to peer (with FIN).
2493  */
2494 static void
2495 tcp_disconnect(struct tcpcb *tp)
2496 {
2497 	struct inpcb *inp = tp->t_inpcb;
2498 	struct socket *so = inp->inp_socket;
2499 
2500 	NET_EPOCH_ASSERT();
2501 	INP_WLOCK_ASSERT(inp);
2502 
2503 	/*
2504 	 * Neither tcp_close() nor tcp_drop() should return NULL, as the
2505 	 * socket is still open.
2506 	 */
2507 	if (tp->t_state < TCPS_ESTABLISHED &&
2508 	    !(tp->t_state > TCPS_LISTEN && IS_FASTOPEN(tp->t_flags))) {
2509 		tp = tcp_close(tp);
2510 		KASSERT(tp != NULL,
2511 		    ("tcp_disconnect: tcp_close() returned NULL"));
2512 	} else if ((so->so_options & SO_LINGER) && so->so_linger == 0) {
2513 		tp = tcp_drop(tp, 0);
2514 		KASSERT(tp != NULL,
2515 		    ("tcp_disconnect: tcp_drop() returned NULL"));
2516 	} else {
2517 		soisdisconnecting(so);
2518 		sbflush(&so->so_rcv);
2519 		tcp_usrclosed(tp);
2520 		if (!(inp->inp_flags & INP_DROPPED))
2521 			tp->t_fb->tfb_tcp_output(tp);
2522 	}
2523 }
2524 
2525 /*
2526  * User issued close, and wish to trail through shutdown states:
2527  * if never received SYN, just forget it.  If got a SYN from peer,
2528  * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
2529  * If already got a FIN from peer, then almost done; go to LAST_ACK
2530  * state.  In all other cases, have already sent FIN to peer (e.g.
2531  * after PRU_SHUTDOWN), and just have to play tedious game waiting
2532  * for peer to send FIN or not respond to keep-alives, etc.
2533  * We can let the user exit from the close as soon as the FIN is acked.
2534  */
2535 static void
2536 tcp_usrclosed(struct tcpcb *tp)
2537 {
2538 
2539 	NET_EPOCH_ASSERT();
2540 	INP_WLOCK_ASSERT(tp->t_inpcb);
2541 
2542 	switch (tp->t_state) {
2543 	case TCPS_LISTEN:
2544 #ifdef TCP_OFFLOAD
2545 		tcp_offload_listen_stop(tp);
2546 #endif
2547 		tcp_state_change(tp, TCPS_CLOSED);
2548 		/* FALLTHROUGH */
2549 	case TCPS_CLOSED:
2550 		tp = tcp_close(tp);
2551 		/*
2552 		 * tcp_close() should never return NULL here as the socket is
2553 		 * still open.
2554 		 */
2555 		KASSERT(tp != NULL,
2556 		    ("tcp_usrclosed: tcp_close() returned NULL"));
2557 		break;
2558 
2559 	case TCPS_SYN_SENT:
2560 	case TCPS_SYN_RECEIVED:
2561 		tp->t_flags |= TF_NEEDFIN;
2562 		break;
2563 
2564 	case TCPS_ESTABLISHED:
2565 		tcp_state_change(tp, TCPS_FIN_WAIT_1);
2566 		break;
2567 
2568 	case TCPS_CLOSE_WAIT:
2569 		tcp_state_change(tp, TCPS_LAST_ACK);
2570 		break;
2571 	}
2572 	if (tp->t_state >= TCPS_FIN_WAIT_2) {
2573 		soisdisconnected(tp->t_inpcb->inp_socket);
2574 		/* Prevent the connection hanging in FIN_WAIT_2 forever. */
2575 		if (tp->t_state == TCPS_FIN_WAIT_2) {
2576 			int timeout;
2577 
2578 			timeout = (tcp_fast_finwait2_recycle) ?
2579 			    tcp_finwait2_timeout : TP_MAXIDLE(tp);
2580 			tcp_timer_activate(tp, TT_2MSL, timeout);
2581 		}
2582 	}
2583 }
2584 
2585 #ifdef DDB
2586 static void
2587 db_print_indent(int indent)
2588 {
2589 	int i;
2590 
2591 	for (i = 0; i < indent; i++)
2592 		db_printf(" ");
2593 }
2594 
2595 static void
2596 db_print_tstate(int t_state)
2597 {
2598 
2599 	switch (t_state) {
2600 	case TCPS_CLOSED:
2601 		db_printf("TCPS_CLOSED");
2602 		return;
2603 
2604 	case TCPS_LISTEN:
2605 		db_printf("TCPS_LISTEN");
2606 		return;
2607 
2608 	case TCPS_SYN_SENT:
2609 		db_printf("TCPS_SYN_SENT");
2610 		return;
2611 
2612 	case TCPS_SYN_RECEIVED:
2613 		db_printf("TCPS_SYN_RECEIVED");
2614 		return;
2615 
2616 	case TCPS_ESTABLISHED:
2617 		db_printf("TCPS_ESTABLISHED");
2618 		return;
2619 
2620 	case TCPS_CLOSE_WAIT:
2621 		db_printf("TCPS_CLOSE_WAIT");
2622 		return;
2623 
2624 	case TCPS_FIN_WAIT_1:
2625 		db_printf("TCPS_FIN_WAIT_1");
2626 		return;
2627 
2628 	case TCPS_CLOSING:
2629 		db_printf("TCPS_CLOSING");
2630 		return;
2631 
2632 	case TCPS_LAST_ACK:
2633 		db_printf("TCPS_LAST_ACK");
2634 		return;
2635 
2636 	case TCPS_FIN_WAIT_2:
2637 		db_printf("TCPS_FIN_WAIT_2");
2638 		return;
2639 
2640 	case TCPS_TIME_WAIT:
2641 		db_printf("TCPS_TIME_WAIT");
2642 		return;
2643 
2644 	default:
2645 		db_printf("unknown");
2646 		return;
2647 	}
2648 }
2649 
2650 static void
2651 db_print_tflags(u_int t_flags)
2652 {
2653 	int comma;
2654 
2655 	comma = 0;
2656 	if (t_flags & TF_ACKNOW) {
2657 		db_printf("%sTF_ACKNOW", comma ? ", " : "");
2658 		comma = 1;
2659 	}
2660 	if (t_flags & TF_DELACK) {
2661 		db_printf("%sTF_DELACK", comma ? ", " : "");
2662 		comma = 1;
2663 	}
2664 	if (t_flags & TF_NODELAY) {
2665 		db_printf("%sTF_NODELAY", comma ? ", " : "");
2666 		comma = 1;
2667 	}
2668 	if (t_flags & TF_NOOPT) {
2669 		db_printf("%sTF_NOOPT", comma ? ", " : "");
2670 		comma = 1;
2671 	}
2672 	if (t_flags & TF_SENTFIN) {
2673 		db_printf("%sTF_SENTFIN", comma ? ", " : "");
2674 		comma = 1;
2675 	}
2676 	if (t_flags & TF_REQ_SCALE) {
2677 		db_printf("%sTF_REQ_SCALE", comma ? ", " : "");
2678 		comma = 1;
2679 	}
2680 	if (t_flags & TF_RCVD_SCALE) {
2681 		db_printf("%sTF_RECVD_SCALE", comma ? ", " : "");
2682 		comma = 1;
2683 	}
2684 	if (t_flags & TF_REQ_TSTMP) {
2685 		db_printf("%sTF_REQ_TSTMP", comma ? ", " : "");
2686 		comma = 1;
2687 	}
2688 	if (t_flags & TF_RCVD_TSTMP) {
2689 		db_printf("%sTF_RCVD_TSTMP", comma ? ", " : "");
2690 		comma = 1;
2691 	}
2692 	if (t_flags & TF_SACK_PERMIT) {
2693 		db_printf("%sTF_SACK_PERMIT", comma ? ", " : "");
2694 		comma = 1;
2695 	}
2696 	if (t_flags & TF_NEEDSYN) {
2697 		db_printf("%sTF_NEEDSYN", comma ? ", " : "");
2698 		comma = 1;
2699 	}
2700 	if (t_flags & TF_NEEDFIN) {
2701 		db_printf("%sTF_NEEDFIN", comma ? ", " : "");
2702 		comma = 1;
2703 	}
2704 	if (t_flags & TF_NOPUSH) {
2705 		db_printf("%sTF_NOPUSH", comma ? ", " : "");
2706 		comma = 1;
2707 	}
2708 	if (t_flags & TF_MORETOCOME) {
2709 		db_printf("%sTF_MORETOCOME", comma ? ", " : "");
2710 		comma = 1;
2711 	}
2712 	if (t_flags & TF_LQ_OVERFLOW) {
2713 		db_printf("%sTF_LQ_OVERFLOW", comma ? ", " : "");
2714 		comma = 1;
2715 	}
2716 	if (t_flags & TF_LASTIDLE) {
2717 		db_printf("%sTF_LASTIDLE", comma ? ", " : "");
2718 		comma = 1;
2719 	}
2720 	if (t_flags & TF_RXWIN0SENT) {
2721 		db_printf("%sTF_RXWIN0SENT", comma ? ", " : "");
2722 		comma = 1;
2723 	}
2724 	if (t_flags & TF_FASTRECOVERY) {
2725 		db_printf("%sTF_FASTRECOVERY", comma ? ", " : "");
2726 		comma = 1;
2727 	}
2728 	if (t_flags & TF_CONGRECOVERY) {
2729 		db_printf("%sTF_CONGRECOVERY", comma ? ", " : "");
2730 		comma = 1;
2731 	}
2732 	if (t_flags & TF_WASFRECOVERY) {
2733 		db_printf("%sTF_WASFRECOVERY", comma ? ", " : "");
2734 		comma = 1;
2735 	}
2736 	if (t_flags & TF_SIGNATURE) {
2737 		db_printf("%sTF_SIGNATURE", comma ? ", " : "");
2738 		comma = 1;
2739 	}
2740 	if (t_flags & TF_FORCEDATA) {
2741 		db_printf("%sTF_FORCEDATA", comma ? ", " : "");
2742 		comma = 1;
2743 	}
2744 	if (t_flags & TF_TSO) {
2745 		db_printf("%sTF_TSO", comma ? ", " : "");
2746 		comma = 1;
2747 	}
2748 	if (t_flags & TF_FASTOPEN) {
2749 		db_printf("%sTF_FASTOPEN", comma ? ", " : "");
2750 		comma = 1;
2751 	}
2752 }
2753 
2754 static void
2755 db_print_tflags2(u_int t_flags2)
2756 {
2757 	int comma;
2758 
2759 	comma = 0;
2760 	if (t_flags2 & TF2_ECN_PERMIT) {
2761 		db_printf("%sTF2_ECN_PERMIT", comma ? ", " : "");
2762 		comma = 1;
2763 	}
2764 }
2765 
2766 
2767 static void
2768 db_print_toobflags(char t_oobflags)
2769 {
2770 	int comma;
2771 
2772 	comma = 0;
2773 	if (t_oobflags & TCPOOB_HAVEDATA) {
2774 		db_printf("%sTCPOOB_HAVEDATA", comma ? ", " : "");
2775 		comma = 1;
2776 	}
2777 	if (t_oobflags & TCPOOB_HADDATA) {
2778 		db_printf("%sTCPOOB_HADDATA", comma ? ", " : "");
2779 		comma = 1;
2780 	}
2781 }
2782 
2783 static void
2784 db_print_tcpcb(struct tcpcb *tp, const char *name, int indent)
2785 {
2786 
2787 	db_print_indent(indent);
2788 	db_printf("%s at %p\n", name, tp);
2789 
2790 	indent += 2;
2791 
2792 	db_print_indent(indent);
2793 	db_printf("t_segq first: %p   t_segqlen: %d   t_dupacks: %d\n",
2794 	   TAILQ_FIRST(&tp->t_segq), tp->t_segqlen, tp->t_dupacks);
2795 
2796 	db_print_indent(indent);
2797 	db_printf("tt_rexmt: %p   tt_persist: %p   tt_keep: %p\n",
2798 	    &tp->t_timers->tt_rexmt, &tp->t_timers->tt_persist, &tp->t_timers->tt_keep);
2799 
2800 	db_print_indent(indent);
2801 	db_printf("tt_2msl: %p   tt_delack: %p   t_inpcb: %p\n", &tp->t_timers->tt_2msl,
2802 	    &tp->t_timers->tt_delack, tp->t_inpcb);
2803 
2804 	db_print_indent(indent);
2805 	db_printf("t_state: %d (", tp->t_state);
2806 	db_print_tstate(tp->t_state);
2807 	db_printf(")\n");
2808 
2809 	db_print_indent(indent);
2810 	db_printf("t_flags: 0x%x (", tp->t_flags);
2811 	db_print_tflags(tp->t_flags);
2812 	db_printf(")\n");
2813 
2814 	db_print_indent(indent);
2815 	db_printf("t_flags2: 0x%x (", tp->t_flags2);
2816 	db_print_tflags2(tp->t_flags2);
2817 	db_printf(")\n");
2818 
2819 	db_print_indent(indent);
2820 	db_printf("snd_una: 0x%08x   snd_max: 0x%08x   snd_nxt: x0%08x\n",
2821 	    tp->snd_una, tp->snd_max, tp->snd_nxt);
2822 
2823 	db_print_indent(indent);
2824 	db_printf("snd_up: 0x%08x   snd_wl1: 0x%08x   snd_wl2: 0x%08x\n",
2825 	   tp->snd_up, tp->snd_wl1, tp->snd_wl2);
2826 
2827 	db_print_indent(indent);
2828 	db_printf("iss: 0x%08x   irs: 0x%08x   rcv_nxt: 0x%08x\n",
2829 	    tp->iss, tp->irs, tp->rcv_nxt);
2830 
2831 	db_print_indent(indent);
2832 	db_printf("rcv_adv: 0x%08x   rcv_wnd: %u   rcv_up: 0x%08x\n",
2833 	    tp->rcv_adv, tp->rcv_wnd, tp->rcv_up);
2834 
2835 	db_print_indent(indent);
2836 	db_printf("snd_wnd: %u   snd_cwnd: %u\n",
2837 	   tp->snd_wnd, tp->snd_cwnd);
2838 
2839 	db_print_indent(indent);
2840 	db_printf("snd_ssthresh: %u   snd_recover: "
2841 	    "0x%08x\n", tp->snd_ssthresh, tp->snd_recover);
2842 
2843 	db_print_indent(indent);
2844 	db_printf("t_rcvtime: %u   t_startime: %u\n",
2845 	    tp->t_rcvtime, tp->t_starttime);
2846 
2847 	db_print_indent(indent);
2848 	db_printf("t_rttime: %u   t_rtsq: 0x%08x\n",
2849 	    tp->t_rtttime, tp->t_rtseq);
2850 
2851 	db_print_indent(indent);
2852 	db_printf("t_rxtcur: %d   t_maxseg: %u   t_srtt: %d\n",
2853 	    tp->t_rxtcur, tp->t_maxseg, tp->t_srtt);
2854 
2855 	db_print_indent(indent);
2856 	db_printf("t_rttvar: %d   t_rxtshift: %d   t_rttmin: %u   "
2857 	    "t_rttbest: %u\n", tp->t_rttvar, tp->t_rxtshift, tp->t_rttmin,
2858 	    tp->t_rttbest);
2859 
2860 	db_print_indent(indent);
2861 	db_printf("t_rttupdated: %lu   max_sndwnd: %u   t_softerror: %d\n",
2862 	    tp->t_rttupdated, tp->max_sndwnd, tp->t_softerror);
2863 
2864 	db_print_indent(indent);
2865 	db_printf("t_oobflags: 0x%x (", tp->t_oobflags);
2866 	db_print_toobflags(tp->t_oobflags);
2867 	db_printf(")   t_iobc: 0x%02x\n", tp->t_iobc);
2868 
2869 	db_print_indent(indent);
2870 	db_printf("snd_scale: %u   rcv_scale: %u   request_r_scale: %u\n",
2871 	    tp->snd_scale, tp->rcv_scale, tp->request_r_scale);
2872 
2873 	db_print_indent(indent);
2874 	db_printf("ts_recent: %u   ts_recent_age: %u\n",
2875 	    tp->ts_recent, tp->ts_recent_age);
2876 
2877 	db_print_indent(indent);
2878 	db_printf("ts_offset: %u   last_ack_sent: 0x%08x   snd_cwnd_prev: "
2879 	    "%u\n", tp->ts_offset, tp->last_ack_sent, tp->snd_cwnd_prev);
2880 
2881 	db_print_indent(indent);
2882 	db_printf("snd_ssthresh_prev: %u   snd_recover_prev: 0x%08x   "
2883 	    "t_badrxtwin: %u\n", tp->snd_ssthresh_prev,
2884 	    tp->snd_recover_prev, tp->t_badrxtwin);
2885 
2886 	db_print_indent(indent);
2887 	db_printf("snd_numholes: %d  snd_holes first: %p\n",
2888 	    tp->snd_numholes, TAILQ_FIRST(&tp->snd_holes));
2889 
2890 	db_print_indent(indent);
2891 	db_printf("snd_fack: 0x%08x   rcv_numsacks: %d\n",
2892 	    tp->snd_fack, tp->rcv_numsacks);
2893 
2894 	/* Skip sackblks, sackhint. */
2895 
2896 	db_print_indent(indent);
2897 	db_printf("t_rttlow: %d   rfbuf_ts: %u   rfbuf_cnt: %d\n",
2898 	    tp->t_rttlow, tp->rfbuf_ts, tp->rfbuf_cnt);
2899 }
2900 
2901 DB_SHOW_COMMAND(tcpcb, db_show_tcpcb)
2902 {
2903 	struct tcpcb *tp;
2904 
2905 	if (!have_addr) {
2906 		db_printf("usage: show tcpcb <addr>\n");
2907 		return;
2908 	}
2909 	tp = (struct tcpcb *)addr;
2910 
2911 	db_print_tcpcb(tp, "tcpcb", 0);
2912 }
2913 #endif
2914