xref: /freebsd/sys/kern/uipc_socket.c (revision 9b37d84c87e69dabc69d818aa4d2fea718bd8b74)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1990, 1993
5  *	The Regents of the University of California.
6  * Copyright (c) 2004 The FreeBSD Foundation
7  * Copyright (c) 2004-2008 Robert N. M. Watson
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * Comments on the socket life cycle:
37  *
38  * soalloc() sets of socket layer state for a socket, called only by
39  * socreate() and sonewconn().  Socket layer private.
40  *
41  * sodealloc() tears down socket layer state for a socket, called only by
42  * sofree() and sonewconn().  Socket layer private.
43  *
44  * pr_attach() associates protocol layer state with an allocated socket;
45  * called only once, may fail, aborting socket allocation.  This is called
46  * from socreate() and sonewconn().  Socket layer private.
47  *
48  * pr_detach() disassociates protocol layer state from an attached socket,
49  * and will be called exactly once for sockets in which pr_attach() has
50  * been successfully called.  If pr_attach() returned an error,
51  * pr_detach() will not be called.  Socket layer private.
52  *
53  * pr_abort() and pr_close() notify the protocol layer that the last
54  * consumer of a socket is starting to tear down the socket, and that the
55  * protocol should terminate the connection.  Historically, pr_abort() also
56  * detached protocol state from the socket state, but this is no longer the
57  * case.
58  *
59  * socreate() creates a socket and attaches protocol state.  This is a public
60  * interface that may be used by socket layer consumers to create new
61  * sockets.
62  *
63  * sonewconn() creates a socket and attaches protocol state.  This is a
64  * public interface  that may be used by protocols to create new sockets when
65  * a new connection is received and will be available for accept() on a
66  * listen socket.
67  *
68  * soclose() destroys a socket after possibly waiting for it to disconnect.
69  * This is a public interface that socket consumers should use to close and
70  * release a socket when done with it.
71  *
72  * soabort() destroys a socket without waiting for it to disconnect (used
73  * only for incoming connections that are already partially or fully
74  * connected).  This is used internally by the socket layer when clearing
75  * listen socket queues (due to overflow or close on the listen socket), but
76  * is also a public interface protocols may use to abort connections in
77  * their incomplete listen queues should they no longer be required.  Sockets
78  * placed in completed connection listen queues should not be aborted for
79  * reasons described in the comment above the soclose() implementation.  This
80  * is not a general purpose close routine, and except in the specific
81  * circumstances described here, should not be used.
82  *
83  * sofree() will free a socket and its protocol state if all references on
84  * the socket have been released, and is the public interface to attempt to
85  * free a socket when a reference is removed.  This is a socket layer private
86  * interface.
87  *
88  * NOTE: In addition to socreate() and soclose(), which provide a single
89  * socket reference to the consumer to be managed as required, there are two
90  * calls to explicitly manage socket references, soref(), and sorele().
91  * Currently, these are generally required only when transitioning a socket
92  * from a listen queue to a file descriptor, in order to prevent garbage
93  * collection of the socket at an untimely moment.  For a number of reasons,
94  * these interfaces are not preferred, and should be avoided.
95  *
96  * NOTE: With regard to VNETs the general rule is that callers do not set
97  * curvnet. Exceptions to this rule include soabort(), sodisconnect(),
98  * sofree(), sorele(), sonewconn() and sorflush(), which are usually called
99  * from a pre-set VNET context.  sopoll_generic() currently does not need a
100  * VNET context to be set.
101  */
102 
103 #include <sys/cdefs.h>
104 #include "opt_inet.h"
105 #include "opt_inet6.h"
106 #include "opt_kern_tls.h"
107 #include "opt_ktrace.h"
108 #include "opt_sctp.h"
109 
110 #include <sys/param.h>
111 #include <sys/systm.h>
112 #include <sys/capsicum.h>
113 #include <sys/fcntl.h>
114 #include <sys/limits.h>
115 #include <sys/lock.h>
116 #include <sys/mac.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/domain.h>
121 #include <sys/file.h>			/* for struct knote */
122 #include <sys/hhook.h>
123 #include <sys/kernel.h>
124 #include <sys/khelp.h>
125 #include <sys/kthread.h>
126 #include <sys/ktls.h>
127 #include <sys/event.h>
128 #include <sys/eventhandler.h>
129 #include <sys/poll.h>
130 #include <sys/proc.h>
131 #include <sys/protosw.h>
132 #include <sys/sbuf.h>
133 #include <sys/socket.h>
134 #include <sys/socketvar.h>
135 #include <sys/resourcevar.h>
136 #include <net/route.h>
137 #include <sys/sched.h>
138 #include <sys/signalvar.h>
139 #include <sys/smp.h>
140 #include <sys/stat.h>
141 #include <sys/sx.h>
142 #include <sys/sysctl.h>
143 #include <sys/taskqueue.h>
144 #include <sys/uio.h>
145 #include <sys/un.h>
146 #include <sys/unpcb.h>
147 #include <sys/jail.h>
148 #include <sys/syslog.h>
149 #include <netinet/in.h>
150 #include <netinet/in_pcb.h>
151 #include <netinet/tcp.h>
152 
153 #include <net/vnet.h>
154 
155 #include <security/mac/mac_framework.h>
156 #include <security/mac/mac_internal.h>
157 
158 #include <vm/uma.h>
159 
160 #ifdef COMPAT_FREEBSD32
161 #include <sys/mount.h>
162 #include <sys/sysent.h>
163 #include <compat/freebsd32/freebsd32.h>
164 #endif
165 
166 static int	soreceive_generic_locked(struct socket *so,
167 		    struct sockaddr **psa, struct uio *uio, struct mbuf **mp,
168 		    struct mbuf **controlp, int *flagsp);
169 static int	soreceive_rcvoob(struct socket *so, struct uio *uio,
170 		    int flags);
171 static int	soreceive_stream_locked(struct socket *so, struct sockbuf *sb,
172 		    struct sockaddr **psa, struct uio *uio, struct mbuf **mp,
173 		    struct mbuf **controlp, int flags);
174 static int	sosend_generic_locked(struct socket *so, struct sockaddr *addr,
175 		    struct uio *uio, struct mbuf *top, struct mbuf *control,
176 		    int flags, struct thread *td);
177 static void	so_rdknl_lock(void *);
178 static void	so_rdknl_unlock(void *);
179 static void	so_rdknl_assert_lock(void *, int);
180 static void	so_wrknl_lock(void *);
181 static void	so_wrknl_unlock(void *);
182 static void	so_wrknl_assert_lock(void *, int);
183 
184 static void	filt_sordetach(struct knote *kn);
185 static int	filt_soread(struct knote *kn, long hint);
186 static void	filt_sowdetach(struct knote *kn);
187 static int	filt_sowrite(struct knote *kn, long hint);
188 static int	filt_soempty(struct knote *kn, long hint);
189 
190 static const struct filterops soread_filtops = {
191 	.f_isfd = 1,
192 	.f_detach = filt_sordetach,
193 	.f_event = filt_soread,
194 };
195 static const struct filterops sowrite_filtops = {
196 	.f_isfd = 1,
197 	.f_detach = filt_sowdetach,
198 	.f_event = filt_sowrite,
199 };
200 static const struct filterops soempty_filtops = {
201 	.f_isfd = 1,
202 	.f_detach = filt_sowdetach,
203 	.f_event = filt_soempty,
204 };
205 
206 so_gen_t	so_gencnt;	/* generation count for sockets */
207 
208 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
209 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
210 
211 #define	VNET_SO_ASSERT(so)						\
212 	VNET_ASSERT(curvnet != NULL,					\
213 	    ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
214 
215 #ifdef SOCKET_HHOOK
216 VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]);
217 #define	V_socket_hhh		VNET(socket_hhh)
218 static inline int hhook_run_socket(struct socket *, void *, int32_t);
219 #endif
220 
221 #ifdef COMPAT_FREEBSD32
222 #ifdef __amd64__
223 /* off_t has 4-byte alignment on i386 but not on other 32-bit platforms. */
224 #define	__splice32_packed	__packed
225 #else
226 #define	__splice32_packed
227 #endif
228 struct splice32 {
229 	int32_t	sp_fd;
230 	int64_t sp_max;
231 	struct timeval32 sp_idle;
232 } __splice32_packed;
233 #undef __splice32_packed
234 #endif
235 
236 /*
237  * Limit on the number of connections in the listen queue waiting
238  * for accept(2).
239  * NB: The original sysctl somaxconn is still available but hidden
240  * to prevent confusion about the actual purpose of this number.
241  */
242 VNET_DEFINE_STATIC(u_int, somaxconn) = SOMAXCONN;
243 #define	V_somaxconn	VNET(somaxconn)
244 
245 static int
246 sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
247 {
248 	int error;
249 	u_int val;
250 
251 	val = V_somaxconn;
252 	error = sysctl_handle_int(oidp, &val, 0, req);
253 	if (error || !req->newptr )
254 		return (error);
255 
256 	/*
257 	 * The purpose of the UINT_MAX / 3 limit, is so that the formula
258 	 *   3 * sol_qlimit / 2
259 	 * below, will not overflow.
260          */
261 
262 	if (val < 1 || val > UINT_MAX / 3)
263 		return (EINVAL);
264 
265 	V_somaxconn = val;
266 	return (0);
267 }
268 SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue,
269     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE | CTLFLAG_VNET, 0, sizeof(u_int),
270     sysctl_somaxconn, "IU",
271     "Maximum listen socket pending connection accept queue size");
272 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
273     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP | CTLFLAG_MPSAFE | CTLFLAG_VNET, 0,
274     sizeof(u_int), sysctl_somaxconn, "IU",
275     "Maximum listen socket pending connection accept queue size (compat)");
276 
277 static u_int numopensockets;
278 static int
279 sysctl_numopensockets(SYSCTL_HANDLER_ARGS)
280 {
281 	u_int val;
282 
283 #ifdef VIMAGE
284 	if(!IS_DEFAULT_VNET(curvnet))
285 		val = curvnet->vnet_sockcnt;
286 	else
287 #endif
288 		val = numopensockets;
289 	return (sysctl_handle_int(oidp, &val, 0, req));
290 }
291 SYSCTL_PROC(_kern_ipc, OID_AUTO, numopensockets,
292     CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_VNET, 0, sizeof(u_int),
293     sysctl_numopensockets, "IU", "Number of open sockets");
294 
295 /*
296  * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
297  * so_gencnt field.
298  */
299 static struct mtx so_global_mtx;
300 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
301 
302 /*
303  * General IPC sysctl name space, used by sockets and a variety of other IPC
304  * types.
305  */
306 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
307     "IPC");
308 
309 /*
310  * Initialize the socket subsystem and set up the socket
311  * memory allocator.
312  */
313 static uma_zone_t socket_zone;
314 int	maxsockets;
315 
316 static void
317 socket_zone_change(void *tag)
318 {
319 
320 	maxsockets = uma_zone_set_max(socket_zone, maxsockets);
321 }
322 
323 static int splice_init_state;
324 static struct sx splice_init_lock;
325 SX_SYSINIT(splice_init_lock, &splice_init_lock, "splice_init");
326 
327 static SYSCTL_NODE(_kern_ipc, OID_AUTO, splice, CTLFLAG_RW, 0,
328     "Settings relating to the SO_SPLICE socket option");
329 
330 static bool splice_receive_stream = true;
331 SYSCTL_BOOL(_kern_ipc_splice, OID_AUTO, receive_stream, CTLFLAG_RWTUN,
332     &splice_receive_stream, 0,
333     "Use soreceive_stream() for stream splices");
334 
335 static uma_zone_t splice_zone;
336 static struct proc *splice_proc;
337 struct splice_wq {
338 	struct mtx	mtx;
339 	STAILQ_HEAD(, so_splice) head;
340 	bool		running;
341 } __aligned(CACHE_LINE_SIZE);
342 static struct splice_wq *splice_wq;
343 static uint32_t splice_index = 0;
344 
345 static void so_splice_timeout(void *arg, int pending);
346 static void so_splice_xfer(struct so_splice *s);
347 static int so_unsplice(struct socket *so, bool timeout);
348 
349 static void
350 splice_work_thread(void *ctx)
351 {
352 	struct splice_wq *wq = ctx;
353 	struct so_splice *s, *s_temp;
354 	STAILQ_HEAD(, so_splice) local_head;
355 	int cpu;
356 
357 	cpu = wq - splice_wq;
358 	if (bootverbose)
359 		printf("starting so_splice worker thread for CPU %d\n", cpu);
360 
361 	for (;;) {
362 		mtx_lock(&wq->mtx);
363 		while (STAILQ_EMPTY(&wq->head)) {
364 			wq->running = false;
365 			mtx_sleep(wq, &wq->mtx, 0, "-", 0);
366 			wq->running = true;
367 		}
368 		STAILQ_INIT(&local_head);
369 		STAILQ_CONCAT(&local_head, &wq->head);
370 		STAILQ_INIT(&wq->head);
371 		mtx_unlock(&wq->mtx);
372 		STAILQ_FOREACH_SAFE(s, &local_head, next, s_temp) {
373 			mtx_lock(&s->mtx);
374 			CURVNET_SET(s->src->so_vnet);
375 			so_splice_xfer(s);
376 			CURVNET_RESTORE();
377 		}
378 	}
379 }
380 
381 static void
382 so_splice_dispatch_async(struct so_splice *sp)
383 {
384 	struct splice_wq *wq;
385 	bool running;
386 
387 	wq = &splice_wq[sp->wq_index];
388 	mtx_lock(&wq->mtx);
389 	STAILQ_INSERT_TAIL(&wq->head, sp, next);
390 	running = wq->running;
391 	mtx_unlock(&wq->mtx);
392 	if (!running)
393 		wakeup(wq);
394 }
395 
396 void
397 so_splice_dispatch(struct so_splice *sp)
398 {
399 	mtx_assert(&sp->mtx, MA_OWNED);
400 
401 	if (sp->state != SPLICE_IDLE) {
402 		mtx_unlock(&sp->mtx);
403 	} else {
404 		sp->state = SPLICE_QUEUED;
405 		mtx_unlock(&sp->mtx);
406 		so_splice_dispatch_async(sp);
407 	}
408 }
409 
410 static int
411 splice_zinit(void *mem, int size __unused, int flags __unused)
412 {
413 	struct so_splice *s;
414 
415 	s = (struct so_splice *)mem;
416 	mtx_init(&s->mtx, "so_splice", NULL, MTX_DEF);
417 	return (0);
418 }
419 
420 static void
421 splice_zfini(void *mem, int size)
422 {
423 	struct so_splice *s;
424 
425 	s = (struct so_splice *)mem;
426 	mtx_destroy(&s->mtx);
427 }
428 
429 static int
430 splice_init(void)
431 {
432 	struct thread *td;
433 	int error, i, state;
434 
435 	state = atomic_load_acq_int(&splice_init_state);
436 	if (__predict_true(state > 0))
437 		return (0);
438 	if (state < 0)
439 		return (ENXIO);
440 	sx_xlock(&splice_init_lock);
441 	if (splice_init_state != 0) {
442 		sx_xunlock(&splice_init_lock);
443 		return (0);
444 	}
445 
446 	splice_zone = uma_zcreate("splice", sizeof(struct so_splice), NULL,
447 	    NULL, splice_zinit, splice_zfini, UMA_ALIGN_CACHE, 0);
448 
449 	splice_wq = mallocarray(mp_maxid + 1, sizeof(*splice_wq), M_TEMP,
450 	    M_WAITOK | M_ZERO);
451 
452 	/*
453 	 * Initialize the workqueues to run the splice work.  We create a
454 	 * work queue for each CPU.
455 	 */
456 	CPU_FOREACH(i) {
457 		STAILQ_INIT(&splice_wq[i].head);
458 		mtx_init(&splice_wq[i].mtx, "splice work queue", NULL, MTX_DEF);
459 	}
460 
461 	/* Start kthreads for each workqueue. */
462 	error = 0;
463 	CPU_FOREACH(i) {
464 		error = kproc_kthread_add(splice_work_thread, &splice_wq[i],
465 		    &splice_proc, &td, 0, 0, "so_splice", "thr_%d", i);
466 		if (error) {
467 			printf("Can't add so_splice thread %d error %d\n",
468 			    i, error);
469 			break;
470 		}
471 
472 		/*
473 		 * It's possible to create loops with SO_SPLICE; ensure that
474 		 * worker threads aren't able to starve the system too easily.
475 		 */
476 		thread_lock(td);
477 		sched_prio(td, PUSER);
478 		thread_unlock(td);
479 	}
480 
481 	splice_init_state = error != 0 ? -1 : 1;
482 	sx_xunlock(&splice_init_lock);
483 
484 	return (error);
485 }
486 
487 /*
488  * Lock a pair of socket's I/O locks for splicing.  Avoid blocking while holding
489  * one lock in order to avoid potential deadlocks in case there is some other
490  * code path which acquires more than one I/O lock at a time.
491  */
492 static void
493 splice_lock_pair(struct socket *so_src, struct socket *so_dst)
494 {
495 	int error;
496 
497 	for (;;) {
498 		error = SOCK_IO_SEND_LOCK(so_dst, SBL_WAIT | SBL_NOINTR);
499 		KASSERT(error == 0,
500 		    ("%s: failed to lock send I/O lock: %d", __func__, error));
501 		error = SOCK_IO_RECV_LOCK(so_src, 0);
502 		KASSERT(error == 0 || error == EWOULDBLOCK,
503 		    ("%s: failed to lock recv I/O lock: %d", __func__, error));
504 		if (error == 0)
505 			break;
506 		SOCK_IO_SEND_UNLOCK(so_dst);
507 
508 		error = SOCK_IO_RECV_LOCK(so_src, SBL_WAIT | SBL_NOINTR);
509 		KASSERT(error == 0,
510 		    ("%s: failed to lock recv I/O lock: %d", __func__, error));
511 		error = SOCK_IO_SEND_LOCK(so_dst, 0);
512 		KASSERT(error == 0 || error == EWOULDBLOCK,
513 		    ("%s: failed to lock send I/O lock: %d", __func__, error));
514 		if (error == 0)
515 			break;
516 		SOCK_IO_RECV_UNLOCK(so_src);
517 	}
518 }
519 
520 static void
521 splice_unlock_pair(struct socket *so_src, struct socket *so_dst)
522 {
523 	SOCK_IO_RECV_UNLOCK(so_src);
524 	SOCK_IO_SEND_UNLOCK(so_dst);
525 }
526 
527 /*
528  * Move data from the source to the sink.  Assumes that both of the relevant
529  * socket I/O locks are held.
530  */
531 static int
532 so_splice_xfer_data(struct socket *so_src, struct socket *so_dst, off_t max,
533     ssize_t *lenp)
534 {
535 	struct uio uio;
536 	struct mbuf *m;
537 	struct sockbuf *sb_src, *sb_dst;
538 	ssize_t len;
539 	long space;
540 	int error, flags;
541 
542 	SOCK_IO_RECV_ASSERT_LOCKED(so_src);
543 	SOCK_IO_SEND_ASSERT_LOCKED(so_dst);
544 
545 	error = 0;
546 	m = NULL;
547 	memset(&uio, 0, sizeof(uio));
548 
549 	sb_src = &so_src->so_rcv;
550 	sb_dst = &so_dst->so_snd;
551 
552 	space = sbspace(sb_dst);
553 	if (space < 0)
554 		space = 0;
555 	len = MIN(max, MIN(space, sbavail(sb_src)));
556 	if (len == 0) {
557 		SOCK_RECVBUF_LOCK(so_src);
558 		if ((sb_src->sb_state & SBS_CANTRCVMORE) != 0)
559 			error = EPIPE;
560 		SOCK_RECVBUF_UNLOCK(so_src);
561 	} else {
562 		flags = MSG_DONTWAIT;
563 		uio.uio_resid = len;
564 		if (splice_receive_stream && sb_src->sb_tls_info == NULL) {
565 			error = soreceive_stream_locked(so_src, sb_src, NULL,
566 			    &uio, &m, NULL, flags);
567 		} else {
568 			error = soreceive_generic_locked(so_src, NULL,
569 			    &uio, &m, NULL, &flags);
570 		}
571 		if (error != 0 && m != NULL) {
572 			m_freem(m);
573 			m = NULL;
574 		}
575 	}
576 	if (m != NULL) {
577 		len -= uio.uio_resid;
578 		error = sosend_generic_locked(so_dst, NULL, NULL, m, NULL,
579 		    MSG_DONTWAIT, curthread);
580 	} else if (error == 0) {
581 		len = 0;
582 		SOCK_SENDBUF_LOCK(so_dst);
583 		if ((sb_dst->sb_state & SBS_CANTSENDMORE) != 0)
584 			error = EPIPE;
585 		SOCK_SENDBUF_UNLOCK(so_dst);
586 	}
587 	if (error == 0)
588 		*lenp = len;
589 	return (error);
590 }
591 
592 /*
593  * Transfer data from the source to the sink.
594  */
595 static void
596 so_splice_xfer(struct so_splice *sp)
597 {
598 	struct socket *so_src, *so_dst;
599 	off_t max;
600 	ssize_t len;
601 	int error;
602 
603 	mtx_assert(&sp->mtx, MA_OWNED);
604 	KASSERT(sp->state == SPLICE_QUEUED || sp->state == SPLICE_CLOSING,
605 	    ("so_splice_xfer: invalid state %d", sp->state));
606 	KASSERT(sp->max != 0, ("so_splice_xfer: max == 0"));
607 
608 	if (sp->state == SPLICE_CLOSING) {
609 		/* Userspace asked us to close the splice. */
610 		goto closing;
611 	}
612 
613 	sp->state = SPLICE_RUNNING;
614 	so_src = sp->src;
615 	so_dst = sp->dst;
616 	max = sp->max > 0 ? sp->max - so_src->so_splice_sent : OFF_MAX;
617 	if (max < 0)
618 		max = 0;
619 
620 	/*
621 	 * Lock the sockets in order to block userspace from doing anything
622 	 * sneaky.  If an error occurs or one of the sockets can no longer
623 	 * transfer data, we will automatically unsplice.
624 	 */
625 	mtx_unlock(&sp->mtx);
626 	splice_lock_pair(so_src, so_dst);
627 
628 	error = so_splice_xfer_data(so_src, so_dst, max, &len);
629 
630 	mtx_lock(&sp->mtx);
631 
632 	/*
633 	 * Update our stats while still holding the socket locks.  This
634 	 * synchronizes with getsockopt(SO_SPLICE), see the comment there.
635 	 */
636 	if (error == 0) {
637 		KASSERT(len >= 0, ("%s: len %zd < 0", __func__, len));
638 		so_src->so_splice_sent += len;
639 	}
640 	splice_unlock_pair(so_src, so_dst);
641 
642 	switch (sp->state) {
643 	case SPLICE_CLOSING:
644 closing:
645 		sp->state = SPLICE_CLOSED;
646 		wakeup(sp);
647 		mtx_unlock(&sp->mtx);
648 		break;
649 	case SPLICE_RUNNING:
650 		if (error != 0 ||
651 		    (sp->max > 0 && so_src->so_splice_sent >= sp->max)) {
652 			sp->state = SPLICE_EXCEPTION;
653 			soref(so_src);
654 			mtx_unlock(&sp->mtx);
655 			(void)so_unsplice(so_src, false);
656 			sorele(so_src);
657 		} else {
658 			/*
659 			 * Locklessly check for additional bytes in the source's
660 			 * receive buffer and queue more work if possible.  We
661 			 * may end up queuing needless work, but that's ok, and
662 			 * if we race with a thread inserting more data into the
663 			 * buffer and observe sbavail() == 0, the splice mutex
664 			 * ensures that splice_push() will queue more work for
665 			 * us.
666 			 */
667 			if (sbavail(&so_src->so_rcv) > 0 &&
668 			    sbspace(&so_dst->so_snd) > 0) {
669 				sp->state = SPLICE_QUEUED;
670 				mtx_unlock(&sp->mtx);
671 				so_splice_dispatch_async(sp);
672 			} else {
673 				sp->state = SPLICE_IDLE;
674 				mtx_unlock(&sp->mtx);
675 			}
676 		}
677 		break;
678 	default:
679 		__assert_unreachable();
680 	}
681 }
682 
683 static void
684 socket_init(void *tag)
685 {
686 
687 	socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
688 	    NULL, NULL, UMA_ALIGN_PTR, 0);
689 	maxsockets = uma_zone_set_max(socket_zone, maxsockets);
690 	uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached");
691 	EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL,
692 	    EVENTHANDLER_PRI_FIRST);
693 }
694 SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL);
695 
696 #ifdef SOCKET_HHOOK
697 static void
698 socket_hhook_register(int subtype)
699 {
700 
701 	if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype,
702 	    &V_socket_hhh[subtype],
703 	    HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
704 		printf("%s: WARNING: unable to register hook\n", __func__);
705 }
706 
707 static void
708 socket_hhook_deregister(int subtype)
709 {
710 
711 	if (hhook_head_deregister(V_socket_hhh[subtype]) != 0)
712 		printf("%s: WARNING: unable to deregister hook\n", __func__);
713 }
714 
715 static void
716 socket_vnet_init(const void *unused __unused)
717 {
718 	int i;
719 
720 	/* We expect a contiguous range */
721 	for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
722 		socket_hhook_register(i);
723 }
724 VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
725     socket_vnet_init, NULL);
726 
727 static void
728 socket_vnet_uninit(const void *unused __unused)
729 {
730 	int i;
731 
732 	for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
733 		socket_hhook_deregister(i);
734 }
735 VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
736     socket_vnet_uninit, NULL);
737 #endif	/* SOCKET_HHOOK */
738 
739 /*
740  * Initialise maxsockets.  This SYSINIT must be run after
741  * tunable_mbinit().
742  */
743 static void
744 init_maxsockets(void *ignored)
745 {
746 
747 	TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
748 	maxsockets = imax(maxsockets, maxfiles);
749 }
750 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
751 
752 /*
753  * Sysctl to get and set the maximum global sockets limit.  Notify protocols
754  * of the change so that they can update their dependent limits as required.
755  */
756 static int
757 sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
758 {
759 	int error, newmaxsockets;
760 
761 	newmaxsockets = maxsockets;
762 	error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
763 	if (error == 0 && req->newptr && newmaxsockets != maxsockets) {
764 		if (newmaxsockets > maxsockets &&
765 		    newmaxsockets <= maxfiles) {
766 			maxsockets = newmaxsockets;
767 			EVENTHANDLER_INVOKE(maxsockets_change);
768 		} else
769 			error = EINVAL;
770 	}
771 	return (error);
772 }
773 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets,
774     CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
775     &maxsockets, 0, sysctl_maxsockets, "IU",
776     "Maximum number of sockets available");
777 
778 /*
779  * Socket operation routines.  These routines are called by the routines in
780  * sys_socket.c or from a system process, and implement the semantics of
781  * socket operations by switching out to the protocol specific routines.
782  */
783 
784 /*
785  * Get a socket structure from our zone, and initialize it.  Note that it
786  * would probably be better to allocate socket and PCB at the same time, but
787  * I'm not convinced that all the protocols can be easily modified to do
788  * this.
789  *
790  * soalloc() returns a socket with a ref count of 0.
791  */
792 static struct socket *
793 soalloc(struct vnet *vnet)
794 {
795 	struct socket *so;
796 
797 	so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
798 	if (so == NULL)
799 		return (NULL);
800 #ifdef MAC
801 	if (mac_socket_init(so, M_NOWAIT) != 0) {
802 		uma_zfree(socket_zone, so);
803 		return (NULL);
804 	}
805 #endif
806 	if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) {
807 		uma_zfree(socket_zone, so);
808 		return (NULL);
809 	}
810 
811 	/*
812 	 * The socket locking protocol allows to lock 2 sockets at a time,
813 	 * however, the first one must be a listening socket.  WITNESS lacks
814 	 * a feature to change class of an existing lock, so we use DUPOK.
815 	 */
816 	mtx_init(&so->so_lock, "socket", NULL, MTX_DEF | MTX_DUPOK);
817 	mtx_init(&so->so_snd_mtx, "so_snd", NULL, MTX_DEF);
818 	mtx_init(&so->so_rcv_mtx, "so_rcv", NULL, MTX_DEF);
819 	so->so_rcv.sb_sel = &so->so_rdsel;
820 	so->so_snd.sb_sel = &so->so_wrsel;
821 	sx_init(&so->so_snd_sx, "so_snd_sx");
822 	sx_init(&so->so_rcv_sx, "so_rcv_sx");
823 	TAILQ_INIT(&so->so_snd.sb_aiojobq);
824 	TAILQ_INIT(&so->so_rcv.sb_aiojobq);
825 	TASK_INIT(&so->so_snd.sb_aiotask, 0, soaio_snd, so);
826 	TASK_INIT(&so->so_rcv.sb_aiotask, 0, soaio_rcv, so);
827 #ifdef VIMAGE
828 	VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p",
829 	    __func__, __LINE__, so));
830 	so->so_vnet = vnet;
831 #endif
832 #ifdef SOCKET_HHOOK
833 	/* We shouldn't need the so_global_mtx */
834 	if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) {
835 		/* Do we need more comprehensive error returns? */
836 		uma_zfree(socket_zone, so);
837 		return (NULL);
838 	}
839 #endif
840 	mtx_lock(&so_global_mtx);
841 	so->so_gencnt = ++so_gencnt;
842 	++numopensockets;
843 #ifdef VIMAGE
844 	vnet->vnet_sockcnt++;
845 #endif
846 	mtx_unlock(&so_global_mtx);
847 
848 	return (so);
849 }
850 
851 /*
852  * Free the storage associated with a socket at the socket layer, tear down
853  * locks, labels, etc.  All protocol state is assumed already to have been
854  * torn down (and possibly never set up) by the caller.
855  */
856 void
857 sodealloc(struct socket *so)
858 {
859 
860 	KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
861 	KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
862 
863 	mtx_lock(&so_global_mtx);
864 	so->so_gencnt = ++so_gencnt;
865 	--numopensockets;	/* Could be below, but faster here. */
866 #ifdef VIMAGE
867 	VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p",
868 	    __func__, __LINE__, so));
869 	so->so_vnet->vnet_sockcnt--;
870 #endif
871 	mtx_unlock(&so_global_mtx);
872 #ifdef MAC
873 	mac_socket_destroy(so);
874 #endif
875 #ifdef SOCKET_HHOOK
876 	hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE);
877 #endif
878 
879 	khelp_destroy_osd(&so->osd);
880 	if (SOLISTENING(so)) {
881 		if (so->sol_accept_filter != NULL)
882 			accept_filt_setopt(so, NULL);
883 	} else {
884 		if (so->so_rcv.sb_hiwat)
885 			(void)chgsbsize(so->so_cred->cr_uidinfo,
886 			    &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
887 		if (so->so_snd.sb_hiwat)
888 			(void)chgsbsize(so->so_cred->cr_uidinfo,
889 			    &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
890 		sx_destroy(&so->so_snd_sx);
891 		sx_destroy(&so->so_rcv_sx);
892 		mtx_destroy(&so->so_snd_mtx);
893 		mtx_destroy(&so->so_rcv_mtx);
894 	}
895 	crfree(so->so_cred);
896 	mtx_destroy(&so->so_lock);
897 	uma_zfree(socket_zone, so);
898 }
899 
900 /*
901  * socreate returns a socket with a ref count of 1 and a file descriptor
902  * reference.  The socket should be closed with soclose().
903  */
904 int
905 socreate(int dom, struct socket **aso, int type, int proto,
906     struct ucred *cred, struct thread *td)
907 {
908 	struct protosw *prp;
909 	struct socket *so;
910 	int error;
911 
912 	/*
913 	 * XXX: divert(4) historically abused PF_INET.  Keep this compatibility
914 	 * shim until all applications have been updated.
915 	 */
916 	if (__predict_false(dom == PF_INET && type == SOCK_RAW &&
917 	    proto == IPPROTO_DIVERT)) {
918 		dom = PF_DIVERT;
919 		printf("%s uses obsolete way to create divert(4) socket\n",
920 		    td->td_proc->p_comm);
921 	}
922 
923 	prp = pffindproto(dom, type, proto);
924 	if (prp == NULL) {
925 		/* No support for domain. */
926 		if (pffinddomain(dom) == NULL)
927 			return (EAFNOSUPPORT);
928 		/* No support for socket type. */
929 		if (proto == 0 && type != 0)
930 			return (EPROTOTYPE);
931 		return (EPROTONOSUPPORT);
932 	}
933 
934 	MPASS(prp->pr_attach);
935 
936 	if ((prp->pr_flags & PR_CAPATTACH) == 0) {
937 		if (CAP_TRACING(td))
938 			ktrcapfail(CAPFAIL_PROTO, &proto);
939 		if (IN_CAPABILITY_MODE(td))
940 			return (ECAPMODE);
941 	}
942 
943 	if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
944 		return (EPROTONOSUPPORT);
945 
946 	so = soalloc(CRED_TO_VNET(cred));
947 	if (so == NULL)
948 		return (ENOBUFS);
949 
950 	so->so_type = type;
951 	so->so_cred = crhold(cred);
952 	if ((prp->pr_domain->dom_family == PF_INET) ||
953 	    (prp->pr_domain->dom_family == PF_INET6) ||
954 	    (prp->pr_domain->dom_family == PF_ROUTE))
955 		so->so_fibnum = td->td_proc->p_fibnum;
956 	else
957 		so->so_fibnum = 0;
958 	so->so_proto = prp;
959 #ifdef MAC
960 	mac_socket_create(cred, so);
961 #endif
962 	knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
963 	    so_rdknl_assert_lock);
964 	knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
965 	    so_wrknl_assert_lock);
966 	if ((prp->pr_flags & PR_SOCKBUF) == 0) {
967 		so->so_snd.sb_mtx = &so->so_snd_mtx;
968 		so->so_rcv.sb_mtx = &so->so_rcv_mtx;
969 	}
970 	/*
971 	 * Auto-sizing of socket buffers is managed by the protocols and
972 	 * the appropriate flags must be set in the pr_attach() method.
973 	 */
974 	CURVNET_SET(so->so_vnet);
975 	error = prp->pr_attach(so, proto, td);
976 	CURVNET_RESTORE();
977 	if (error) {
978 		sodealloc(so);
979 		return (error);
980 	}
981 	soref(so);
982 	*aso = so;
983 	return (0);
984 }
985 
986 #ifdef REGRESSION
987 static int regression_sonewconn_earlytest = 1;
988 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
989     &regression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
990 #endif
991 
992 static int sooverprio = LOG_DEBUG;
993 SYSCTL_INT(_kern_ipc, OID_AUTO, sooverprio, CTLFLAG_RW,
994     &sooverprio, 0, "Log priority for listen socket overflows: 0..7 or -1 to disable");
995 
996 static struct timeval overinterval = { 60, 0 };
997 SYSCTL_TIMEVAL_SEC(_kern_ipc, OID_AUTO, sooverinterval, CTLFLAG_RW,
998     &overinterval,
999     "Delay in seconds between warnings for listen socket overflows");
1000 
1001 /*
1002  * When an attempt at a new connection is noted on a socket which supports
1003  * accept(2), the protocol has two options:
1004  * 1) Call legacy sonewconn() function, which would call protocol attach
1005  *    method, same as used for socket(2).
1006  * 2) Call solisten_clone(), do attach that is specific to a cloned connection,
1007  *    and then call solisten_enqueue().
1008  *
1009  * Note: the ref count on the socket is 0 on return.
1010  */
1011 struct socket *
1012 solisten_clone(struct socket *head)
1013 {
1014 	struct sbuf descrsb;
1015 	struct socket *so;
1016 	int len, overcount;
1017 	u_int qlen;
1018 	const char localprefix[] = "local:";
1019 	char descrbuf[SUNPATHLEN + sizeof(localprefix)];
1020 #if defined(INET6)
1021 	char addrbuf[INET6_ADDRSTRLEN];
1022 #elif defined(INET)
1023 	char addrbuf[INET_ADDRSTRLEN];
1024 #endif
1025 	bool dolog, over;
1026 
1027 	SOLISTEN_LOCK(head);
1028 	over = (head->sol_qlen > 3 * head->sol_qlimit / 2);
1029 #ifdef REGRESSION
1030 	if (regression_sonewconn_earlytest && over) {
1031 #else
1032 	if (over) {
1033 #endif
1034 		head->sol_overcount++;
1035 		dolog = (sooverprio >= 0) &&
1036 			!!ratecheck(&head->sol_lastover, &overinterval);
1037 
1038 		/*
1039 		 * If we're going to log, copy the overflow count and queue
1040 		 * length from the listen socket before dropping the lock.
1041 		 * Also, reset the overflow count.
1042 		 */
1043 		if (dolog) {
1044 			overcount = head->sol_overcount;
1045 			head->sol_overcount = 0;
1046 			qlen = head->sol_qlen;
1047 		}
1048 		SOLISTEN_UNLOCK(head);
1049 
1050 		if (dolog) {
1051 			/*
1052 			 * Try to print something descriptive about the
1053 			 * socket for the error message.
1054 			 */
1055 			sbuf_new(&descrsb, descrbuf, sizeof(descrbuf),
1056 			    SBUF_FIXEDLEN);
1057 			switch (head->so_proto->pr_domain->dom_family) {
1058 #if defined(INET) || defined(INET6)
1059 #ifdef INET
1060 			case AF_INET:
1061 #endif
1062 #ifdef INET6
1063 			case AF_INET6:
1064 				if (head->so_proto->pr_domain->dom_family ==
1065 				    AF_INET6 ||
1066 				    (sotoinpcb(head)->inp_inc.inc_flags &
1067 				    INC_ISIPV6)) {
1068 					ip6_sprintf(addrbuf,
1069 					    &sotoinpcb(head)->inp_inc.inc6_laddr);
1070 					sbuf_printf(&descrsb, "[%s]", addrbuf);
1071 				} else
1072 #endif
1073 				{
1074 #ifdef INET
1075 					inet_ntoa_r(
1076 					    sotoinpcb(head)->inp_inc.inc_laddr,
1077 					    addrbuf);
1078 					sbuf_cat(&descrsb, addrbuf);
1079 #endif
1080 				}
1081 				sbuf_printf(&descrsb, ":%hu (proto %u)",
1082 				    ntohs(sotoinpcb(head)->inp_inc.inc_lport),
1083 				    head->so_proto->pr_protocol);
1084 				break;
1085 #endif /* INET || INET6 */
1086 			case AF_UNIX:
1087 				sbuf_cat(&descrsb, localprefix);
1088 				if (sotounpcb(head)->unp_addr != NULL)
1089 					len =
1090 					    sotounpcb(head)->unp_addr->sun_len -
1091 					    offsetof(struct sockaddr_un,
1092 					    sun_path);
1093 				else
1094 					len = 0;
1095 				if (len > 0)
1096 					sbuf_bcat(&descrsb,
1097 					    sotounpcb(head)->unp_addr->sun_path,
1098 					    len);
1099 				else
1100 					sbuf_cat(&descrsb, "(unknown)");
1101 				break;
1102 			}
1103 
1104 			/*
1105 			 * If we can't print something more specific, at least
1106 			 * print the domain name.
1107 			 */
1108 			if (sbuf_finish(&descrsb) != 0 ||
1109 			    sbuf_len(&descrsb) <= 0) {
1110 				sbuf_clear(&descrsb);
1111 				sbuf_cat(&descrsb,
1112 				    head->so_proto->pr_domain->dom_name ?:
1113 				    "unknown");
1114 				sbuf_finish(&descrsb);
1115 			}
1116 			KASSERT(sbuf_len(&descrsb) > 0,
1117 			    ("%s: sbuf creation failed", __func__));
1118 			/*
1119 			 * Preserve the historic listen queue overflow log
1120 			 * message, that starts with "sonewconn:".  It has
1121 			 * been known to sysadmins for years and also test
1122 			 * sys/kern/sonewconn_overflow checks for it.
1123 			 */
1124 			if (head->so_cred == 0) {
1125 				log(LOG_PRI(sooverprio),
1126 				    "sonewconn: pcb %p (%s): "
1127 				    "Listen queue overflow: %i already in "
1128 				    "queue awaiting acceptance (%d "
1129 				    "occurrences)\n", head->so_pcb,
1130 				    sbuf_data(&descrsb),
1131 			    	qlen, overcount);
1132 			} else {
1133 				log(LOG_PRI(sooverprio),
1134 				    "sonewconn: pcb %p (%s): "
1135 				    "Listen queue overflow: "
1136 				    "%i already in queue awaiting acceptance "
1137 				    "(%d occurrences), euid %d, rgid %d, jail %s\n",
1138 				    head->so_pcb, sbuf_data(&descrsb), qlen,
1139 				    overcount, head->so_cred->cr_uid,
1140 				    head->so_cred->cr_rgid,
1141 				    head->so_cred->cr_prison ?
1142 					head->so_cred->cr_prison->pr_name :
1143 					"not_jailed");
1144 			}
1145 			sbuf_delete(&descrsb);
1146 
1147 			overcount = 0;
1148 		}
1149 
1150 		return (NULL);
1151 	}
1152 	SOLISTEN_UNLOCK(head);
1153 	VNET_ASSERT(head->so_vnet != NULL, ("%s: so %p vnet is NULL",
1154 	    __func__, head));
1155 	so = soalloc(head->so_vnet);
1156 	if (so == NULL) {
1157 		log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
1158 		    "limit reached or out of memory\n",
1159 		    __func__, head->so_pcb);
1160 		return (NULL);
1161 	}
1162 	so->so_listen = head;
1163 	so->so_type = head->so_type;
1164 	/*
1165 	 * POSIX is ambiguous on what options an accept(2)ed socket should
1166 	 * inherit from the listener.  Words "create a new socket" may be
1167 	 * interpreted as not inheriting anything.  Best programming practice
1168 	 * for application developers is to not rely on such inheritance.
1169 	 * FreeBSD had historically inherited all so_options excluding
1170 	 * SO_ACCEPTCONN, which virtually means all SOL_SOCKET level options,
1171 	 * including those completely irrelevant to a new born socket.  For
1172 	 * compatibility with older versions we will inherit a list of
1173 	 * meaningful options.
1174 	 * The crucial bit to inherit is SO_ACCEPTFILTER.  We need it present
1175 	 * in the child socket for soisconnected() promoting socket from the
1176 	 * incomplete queue to complete.  It will be cleared before the child
1177 	 * gets available to accept(2).
1178 	 */
1179 	so->so_options = head->so_options & (SO_ACCEPTFILTER | SO_KEEPALIVE |
1180 	    SO_DONTROUTE | SO_LINGER | SO_OOBINLINE | SO_NOSIGPIPE);
1181 	so->so_linger = head->so_linger;
1182 	so->so_state = head->so_state;
1183 	so->so_fibnum = head->so_fibnum;
1184 	so->so_proto = head->so_proto;
1185 	so->so_cred = crhold(head->so_cred);
1186 #ifdef SOCKET_HHOOK
1187 	if (V_socket_hhh[HHOOK_SOCKET_NEWCONN]->hhh_nhooks > 0) {
1188 		if (hhook_run_socket(so, head, HHOOK_SOCKET_NEWCONN)) {
1189 			sodealloc(so);
1190 			log(LOG_DEBUG, "%s: hhook run failed\n", __func__);
1191 			return (NULL);
1192 		}
1193 	}
1194 #endif
1195 #ifdef MAC
1196 	mac_socket_newconn(head, so);
1197 #endif
1198 	knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
1199 	    so_rdknl_assert_lock);
1200 	knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
1201 	    so_wrknl_assert_lock);
1202 	VNET_SO_ASSERT(head);
1203 	if (soreserve(so, head->sol_sbsnd_hiwat, head->sol_sbrcv_hiwat)) {
1204 		sodealloc(so);
1205 		log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
1206 		    __func__, head->so_pcb);
1207 		return (NULL);
1208 	}
1209 	so->so_rcv.sb_lowat = head->sol_sbrcv_lowat;
1210 	so->so_snd.sb_lowat = head->sol_sbsnd_lowat;
1211 	so->so_rcv.sb_timeo = head->sol_sbrcv_timeo;
1212 	so->so_snd.sb_timeo = head->sol_sbsnd_timeo;
1213 	so->so_rcv.sb_flags = head->sol_sbrcv_flags & SB_AUTOSIZE;
1214 	so->so_snd.sb_flags = head->sol_sbsnd_flags & SB_AUTOSIZE;
1215 	if ((so->so_proto->pr_flags & PR_SOCKBUF) == 0) {
1216 		so->so_snd.sb_mtx = &so->so_snd_mtx;
1217 		so->so_rcv.sb_mtx = &so->so_rcv_mtx;
1218 	}
1219 
1220 	return (so);
1221 }
1222 
1223 /* Connstatus may be 0 or SS_ISCONNECTED. */
1224 struct socket *
1225 sonewconn(struct socket *head, int connstatus)
1226 {
1227 	struct socket *so;
1228 
1229 	if ((so = solisten_clone(head)) == NULL)
1230 		return (NULL);
1231 
1232 	if (so->so_proto->pr_attach(so, 0, NULL) != 0) {
1233 		sodealloc(so);
1234 		log(LOG_DEBUG, "%s: pcb %p: pr_attach() failed\n",
1235 		    __func__, head->so_pcb);
1236 		return (NULL);
1237 	}
1238 
1239 	(void)solisten_enqueue(so, connstatus);
1240 
1241 	return (so);
1242 }
1243 
1244 /*
1245  * Enqueue socket cloned by solisten_clone() to the listen queue of the
1246  * listener it has been cloned from.
1247  *
1248  * Return 'true' if socket landed on complete queue, otherwise 'false'.
1249  */
1250 bool
1251 solisten_enqueue(struct socket *so, int connstatus)
1252 {
1253 	struct socket *head = so->so_listen;
1254 
1255 	MPASS(refcount_load(&so->so_count) == 0);
1256 	refcount_init(&so->so_count, 1);
1257 
1258 	SOLISTEN_LOCK(head);
1259 	if (head->sol_accept_filter != NULL)
1260 		connstatus = 0;
1261 	so->so_state |= connstatus;
1262 	soref(head); /* A socket on (in)complete queue refs head. */
1263 	if (connstatus) {
1264 		TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list);
1265 		so->so_qstate = SQ_COMP;
1266 		head->sol_qlen++;
1267 		solisten_wakeup(head);	/* unlocks */
1268 		return (true);
1269 	} else {
1270 		/*
1271 		 * Keep removing sockets from the head until there's room for
1272 		 * us to insert on the tail.  In pre-locking revisions, this
1273 		 * was a simple if(), but as we could be racing with other
1274 		 * threads and soabort() requires dropping locks, we must
1275 		 * loop waiting for the condition to be true.
1276 		 */
1277 		while (head->sol_incqlen > head->sol_qlimit) {
1278 			struct socket *sp;
1279 
1280 			sp = TAILQ_FIRST(&head->sol_incomp);
1281 			TAILQ_REMOVE(&head->sol_incomp, sp, so_list);
1282 			head->sol_incqlen--;
1283 			SOCK_LOCK(sp);
1284 			sp->so_qstate = SQ_NONE;
1285 			sp->so_listen = NULL;
1286 			SOCK_UNLOCK(sp);
1287 			sorele_locked(head);	/* does SOLISTEN_UNLOCK, head stays */
1288 			soabort(sp);
1289 			SOLISTEN_LOCK(head);
1290 		}
1291 		TAILQ_INSERT_TAIL(&head->sol_incomp, so, so_list);
1292 		so->so_qstate = SQ_INCOMP;
1293 		head->sol_incqlen++;
1294 		SOLISTEN_UNLOCK(head);
1295 		return (false);
1296 	}
1297 }
1298 
1299 #if defined(SCTP) || defined(SCTP_SUPPORT)
1300 /*
1301  * Socket part of sctp_peeloff().  Detach a new socket from an
1302  * association.  The new socket is returned with a reference.
1303  *
1304  * XXXGL: reduce copy-paste with solisten_clone().
1305  */
1306 struct socket *
1307 sopeeloff(struct socket *head)
1308 {
1309 	struct socket *so;
1310 
1311 	VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p",
1312 	    __func__, __LINE__, head));
1313 	so = soalloc(head->so_vnet);
1314 	if (so == NULL) {
1315 		log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
1316 		    "limit reached or out of memory\n",
1317 		    __func__, head->so_pcb);
1318 		return (NULL);
1319 	}
1320 	so->so_type = head->so_type;
1321 	so->so_options = head->so_options;
1322 	so->so_linger = head->so_linger;
1323 	so->so_state = (head->so_state & SS_NBIO) | SS_ISCONNECTED;
1324 	so->so_fibnum = head->so_fibnum;
1325 	so->so_proto = head->so_proto;
1326 	so->so_cred = crhold(head->so_cred);
1327 #ifdef MAC
1328 	mac_socket_newconn(head, so);
1329 #endif
1330 	knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
1331 	    so_rdknl_assert_lock);
1332 	knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
1333 	    so_wrknl_assert_lock);
1334 	VNET_SO_ASSERT(head);
1335 	if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
1336 		sodealloc(so);
1337 		log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
1338 		    __func__, head->so_pcb);
1339 		return (NULL);
1340 	}
1341 	if (so->so_proto->pr_attach(so, 0, NULL)) {
1342 		sodealloc(so);
1343 		log(LOG_DEBUG, "%s: pcb %p: pr_attach() failed\n",
1344 		    __func__, head->so_pcb);
1345 		return (NULL);
1346 	}
1347 	so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
1348 	so->so_snd.sb_lowat = head->so_snd.sb_lowat;
1349 	so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
1350 	so->so_snd.sb_timeo = head->so_snd.sb_timeo;
1351 	so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
1352 	so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
1353 	if ((so->so_proto->pr_flags & PR_SOCKBUF) == 0) {
1354 		so->so_snd.sb_mtx = &so->so_snd_mtx;
1355 		so->so_rcv.sb_mtx = &so->so_rcv_mtx;
1356 	}
1357 
1358 	soref(so);
1359 
1360 	return (so);
1361 }
1362 #endif	/* SCTP */
1363 
1364 int
1365 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
1366 {
1367 	int error;
1368 
1369 	CURVNET_SET(so->so_vnet);
1370 	error = so->so_proto->pr_bind(so, nam, td);
1371 	CURVNET_RESTORE();
1372 	return (error);
1373 }
1374 
1375 int
1376 sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
1377 {
1378 	int error;
1379 
1380 	CURVNET_SET(so->so_vnet);
1381 	error = so->so_proto->pr_bindat(fd, so, nam, td);
1382 	CURVNET_RESTORE();
1383 	return (error);
1384 }
1385 
1386 /*
1387  * solisten() transitions a socket from a non-listening state to a listening
1388  * state, but can also be used to update the listen queue depth on an
1389  * existing listen socket.  The protocol will call back into the sockets
1390  * layer using solisten_proto_check() and solisten_proto() to check and set
1391  * socket-layer listen state.  Call backs are used so that the protocol can
1392  * acquire both protocol and socket layer locks in whatever order is required
1393  * by the protocol.
1394  *
1395  * Protocol implementors are advised to hold the socket lock across the
1396  * socket-layer test and set to avoid races at the socket layer.
1397  */
1398 int
1399 solisten(struct socket *so, int backlog, struct thread *td)
1400 {
1401 	int error;
1402 
1403 	CURVNET_SET(so->so_vnet);
1404 	error = so->so_proto->pr_listen(so, backlog, td);
1405 	CURVNET_RESTORE();
1406 	return (error);
1407 }
1408 
1409 /*
1410  * Prepare for a call to solisten_proto().  Acquire all socket buffer locks in
1411  * order to interlock with socket I/O.
1412  */
1413 int
1414 solisten_proto_check(struct socket *so)
1415 {
1416 	SOCK_LOCK_ASSERT(so);
1417 
1418 	if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
1419 	    SS_ISDISCONNECTING)) != 0)
1420 		return (EINVAL);
1421 
1422 	/*
1423 	 * Sleeping is not permitted here, so simply fail if userspace is
1424 	 * attempting to transmit or receive on the socket.  This kind of
1425 	 * transient failure is not ideal, but it should occur only if userspace
1426 	 * is misusing the socket interfaces.
1427 	 */
1428 	if (!sx_try_xlock(&so->so_snd_sx))
1429 		return (EAGAIN);
1430 	if (!sx_try_xlock(&so->so_rcv_sx)) {
1431 		sx_xunlock(&so->so_snd_sx);
1432 		return (EAGAIN);
1433 	}
1434 	mtx_lock(&so->so_snd_mtx);
1435 	mtx_lock(&so->so_rcv_mtx);
1436 
1437 	/* Interlock with soo_aio_queue() and KTLS. */
1438 	if (!SOLISTENING(so)) {
1439 		bool ktls;
1440 
1441 #ifdef KERN_TLS
1442 		ktls = so->so_snd.sb_tls_info != NULL ||
1443 		    so->so_rcv.sb_tls_info != NULL;
1444 #else
1445 		ktls = false;
1446 #endif
1447 		if (ktls ||
1448 		    (so->so_snd.sb_flags & (SB_AIO | SB_AIO_RUNNING)) != 0 ||
1449 		    (so->so_rcv.sb_flags & (SB_AIO | SB_AIO_RUNNING)) != 0) {
1450 			solisten_proto_abort(so);
1451 			return (EINVAL);
1452 		}
1453 	}
1454 
1455 	return (0);
1456 }
1457 
1458 /*
1459  * Undo the setup done by solisten_proto_check().
1460  */
1461 void
1462 solisten_proto_abort(struct socket *so)
1463 {
1464 	mtx_unlock(&so->so_snd_mtx);
1465 	mtx_unlock(&so->so_rcv_mtx);
1466 	sx_xunlock(&so->so_snd_sx);
1467 	sx_xunlock(&so->so_rcv_sx);
1468 }
1469 
1470 void
1471 solisten_proto(struct socket *so, int backlog)
1472 {
1473 	int sbrcv_lowat, sbsnd_lowat;
1474 	u_int sbrcv_hiwat, sbsnd_hiwat;
1475 	short sbrcv_flags, sbsnd_flags;
1476 	sbintime_t sbrcv_timeo, sbsnd_timeo;
1477 
1478 	SOCK_LOCK_ASSERT(so);
1479 	KASSERT((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
1480 	    SS_ISDISCONNECTING)) == 0,
1481 	    ("%s: bad socket state %p", __func__, so));
1482 
1483 	if (SOLISTENING(so))
1484 		goto listening;
1485 
1486 	/*
1487 	 * Change this socket to listening state.
1488 	 */
1489 	sbrcv_lowat = so->so_rcv.sb_lowat;
1490 	sbsnd_lowat = so->so_snd.sb_lowat;
1491 	sbrcv_hiwat = so->so_rcv.sb_hiwat;
1492 	sbsnd_hiwat = so->so_snd.sb_hiwat;
1493 	sbrcv_flags = so->so_rcv.sb_flags;
1494 	sbsnd_flags = so->so_snd.sb_flags;
1495 	sbrcv_timeo = so->so_rcv.sb_timeo;
1496 	sbsnd_timeo = so->so_snd.sb_timeo;
1497 
1498 #ifdef MAC
1499 	mac_socketpeer_label_free(so->so_peerlabel);
1500 #endif
1501 
1502 	if (!(so->so_proto->pr_flags & PR_SOCKBUF)) {
1503 		sbdestroy(so, SO_SND);
1504 		sbdestroy(so, SO_RCV);
1505 	}
1506 
1507 #ifdef INVARIANTS
1508 	bzero(&so->so_rcv,
1509 	    sizeof(struct socket) - offsetof(struct socket, so_rcv));
1510 #endif
1511 
1512 	so->sol_sbrcv_lowat = sbrcv_lowat;
1513 	so->sol_sbsnd_lowat = sbsnd_lowat;
1514 	so->sol_sbrcv_hiwat = sbrcv_hiwat;
1515 	so->sol_sbsnd_hiwat = sbsnd_hiwat;
1516 	so->sol_sbrcv_flags = sbrcv_flags;
1517 	so->sol_sbsnd_flags = sbsnd_flags;
1518 	so->sol_sbrcv_timeo = sbrcv_timeo;
1519 	so->sol_sbsnd_timeo = sbsnd_timeo;
1520 
1521 	so->sol_qlen = so->sol_incqlen = 0;
1522 	TAILQ_INIT(&so->sol_incomp);
1523 	TAILQ_INIT(&so->sol_comp);
1524 
1525 	so->sol_accept_filter = NULL;
1526 	so->sol_accept_filter_arg = NULL;
1527 	so->sol_accept_filter_str = NULL;
1528 
1529 	so->sol_upcall = NULL;
1530 	so->sol_upcallarg = NULL;
1531 
1532 	so->so_options |= SO_ACCEPTCONN;
1533 
1534 listening:
1535 	if (backlog < 0 || backlog > V_somaxconn)
1536 		backlog = V_somaxconn;
1537 	so->sol_qlimit = backlog;
1538 
1539 	mtx_unlock(&so->so_snd_mtx);
1540 	mtx_unlock(&so->so_rcv_mtx);
1541 	sx_xunlock(&so->so_snd_sx);
1542 	sx_xunlock(&so->so_rcv_sx);
1543 }
1544 
1545 /*
1546  * Wakeup listeners/subsystems once we have a complete connection.
1547  * Enters with lock, returns unlocked.
1548  */
1549 void
1550 solisten_wakeup(struct socket *sol)
1551 {
1552 
1553 	if (sol->sol_upcall != NULL)
1554 		(void )sol->sol_upcall(sol, sol->sol_upcallarg, M_NOWAIT);
1555 	else {
1556 		selwakeuppri(&sol->so_rdsel, PSOCK);
1557 		KNOTE_LOCKED(&sol->so_rdsel.si_note, 0);
1558 	}
1559 	SOLISTEN_UNLOCK(sol);
1560 	wakeup_one(&sol->sol_comp);
1561 	if ((sol->so_state & SS_ASYNC) && sol->so_sigio != NULL)
1562 		pgsigio(&sol->so_sigio, SIGIO, 0);
1563 }
1564 
1565 /*
1566  * Return single connection off a listening socket queue.  Main consumer of
1567  * the function is kern_accept4().  Some modules, that do their own accept
1568  * management also use the function.  The socket reference held by the
1569  * listen queue is handed to the caller.
1570  *
1571  * Listening socket must be locked on entry and is returned unlocked on
1572  * return.
1573  * The flags argument is set of accept4(2) flags and ACCEPT4_INHERIT.
1574  */
1575 int
1576 solisten_dequeue(struct socket *head, struct socket **ret, int flags)
1577 {
1578 	struct socket *so;
1579 	int error;
1580 
1581 	SOLISTEN_LOCK_ASSERT(head);
1582 
1583 	while (!(head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp) &&
1584 	    head->so_error == 0) {
1585 		error = msleep(&head->sol_comp, SOCK_MTX(head), PSOCK | PCATCH,
1586 		    "accept", 0);
1587 		if (error != 0) {
1588 			SOLISTEN_UNLOCK(head);
1589 			return (error);
1590 		}
1591 	}
1592 	if (head->so_error) {
1593 		error = head->so_error;
1594 		head->so_error = 0;
1595 	} else if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp))
1596 		error = EWOULDBLOCK;
1597 	else
1598 		error = 0;
1599 	if (error) {
1600 		SOLISTEN_UNLOCK(head);
1601 		return (error);
1602 	}
1603 	so = TAILQ_FIRST(&head->sol_comp);
1604 	SOCK_LOCK(so);
1605 	KASSERT(so->so_qstate == SQ_COMP,
1606 	    ("%s: so %p not SQ_COMP", __func__, so));
1607 	head->sol_qlen--;
1608 	so->so_qstate = SQ_NONE;
1609 	so->so_listen = NULL;
1610 	TAILQ_REMOVE(&head->sol_comp, so, so_list);
1611 	if (flags & ACCEPT4_INHERIT)
1612 		so->so_state |= (head->so_state & SS_NBIO);
1613 	else
1614 		so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0;
1615 	SOCK_UNLOCK(so);
1616 	sorele_locked(head);
1617 
1618 	*ret = so;
1619 	return (0);
1620 }
1621 
1622 static struct so_splice *
1623 so_splice_alloc(off_t max)
1624 {
1625 	struct so_splice *sp;
1626 
1627 	sp = uma_zalloc(splice_zone, M_WAITOK);
1628 	sp->src = NULL;
1629 	sp->dst = NULL;
1630 	sp->max = max > 0 ? max : -1;
1631 	do {
1632 		sp->wq_index = atomic_fetchadd_32(&splice_index, 1) %
1633 		    (mp_maxid + 1);
1634 	} while (CPU_ABSENT(sp->wq_index));
1635 	sp->state = SPLICE_INIT;
1636 	TIMEOUT_TASK_INIT(taskqueue_thread, &sp->timeout, 0, so_splice_timeout,
1637 	    sp);
1638 	return (sp);
1639 }
1640 
1641 static void
1642 so_splice_free(struct so_splice *sp)
1643 {
1644 	KASSERT(sp->state == SPLICE_CLOSED,
1645 	    ("so_splice_free: sp %p not closed", sp));
1646 	uma_zfree(splice_zone, sp);
1647 }
1648 
1649 static void
1650 so_splice_timeout(void *arg, int pending __unused)
1651 {
1652 	struct so_splice *sp;
1653 
1654 	sp = arg;
1655 	(void)so_unsplice(sp->src, true);
1656 }
1657 
1658 /*
1659  * Splice the output from so to the input of so2.
1660  */
1661 static int
1662 so_splice(struct socket *so, struct socket *so2, struct splice *splice)
1663 {
1664 	struct so_splice *sp;
1665 	int error;
1666 
1667 	if (splice->sp_max < 0)
1668 		return (EINVAL);
1669 	/* Handle only TCP for now; TODO: other streaming protos */
1670 	if (so->so_proto->pr_protocol != IPPROTO_TCP ||
1671 	    so2->so_proto->pr_protocol != IPPROTO_TCP)
1672 		return (EPROTONOSUPPORT);
1673 	if (so->so_vnet != so2->so_vnet)
1674 		return (EINVAL);
1675 
1676 	/* so_splice_xfer() assumes that we're using these implementations. */
1677 	KASSERT(so->so_proto->pr_sosend == sosend_generic,
1678 	    ("so_splice: sosend not sosend_generic"));
1679 	KASSERT(so2->so_proto->pr_soreceive == soreceive_generic ||
1680 	    so2->so_proto->pr_soreceive == soreceive_stream,
1681 	    ("so_splice: soreceive not soreceive_generic/stream"));
1682 
1683 	sp = so_splice_alloc(splice->sp_max);
1684 	so->so_splice_sent = 0;
1685 	sp->src = so;
1686 	sp->dst = so2;
1687 
1688 	error = 0;
1689 	SOCK_LOCK(so);
1690 	if (SOLISTENING(so))
1691 		error = EINVAL;
1692 	else if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) == 0)
1693 		error = ENOTCONN;
1694 	else if (so->so_splice != NULL)
1695 		error = EBUSY;
1696 	if (error != 0) {
1697 		SOCK_UNLOCK(so);
1698 		uma_zfree(splice_zone, sp);
1699 		return (error);
1700 	}
1701 	SOCK_RECVBUF_LOCK(so);
1702 	if (so->so_rcv.sb_tls_info != NULL) {
1703 		SOCK_RECVBUF_UNLOCK(so);
1704 		SOCK_UNLOCK(so);
1705 		uma_zfree(splice_zone, sp);
1706 		return (EINVAL);
1707 	}
1708 	so->so_rcv.sb_flags |= SB_SPLICED;
1709 	so->so_splice = sp;
1710 	soref(so);
1711 	SOCK_RECVBUF_UNLOCK(so);
1712 	SOCK_UNLOCK(so);
1713 
1714 	error = 0;
1715 	SOCK_LOCK(so2);
1716 	if (SOLISTENING(so2))
1717 		error = EINVAL;
1718 	else if ((so2->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) == 0)
1719 		error = ENOTCONN;
1720 	else if (so2->so_splice_back != NULL)
1721 		error = EBUSY;
1722 	if (error != 0) {
1723 		SOCK_UNLOCK(so2);
1724 		so_unsplice(so, false);
1725 		return (error);
1726 	}
1727 	SOCK_SENDBUF_LOCK(so2);
1728 	if (so->so_snd.sb_tls_info != NULL) {
1729 		SOCK_SENDBUF_UNLOCK(so2);
1730 		SOCK_UNLOCK(so2);
1731 		so_unsplice(so, false);
1732 		return (EINVAL);
1733 	}
1734 	so2->so_snd.sb_flags |= SB_SPLICED;
1735 	so2->so_splice_back = sp;
1736 	soref(so2);
1737 	mtx_lock(&sp->mtx);
1738 	SOCK_SENDBUF_UNLOCK(so2);
1739 	SOCK_UNLOCK(so2);
1740 
1741 	if (splice->sp_idle.tv_sec != 0 || splice->sp_idle.tv_usec != 0) {
1742 		taskqueue_enqueue_timeout_sbt(taskqueue_thread, &sp->timeout,
1743 		    tvtosbt(splice->sp_idle), 0, C_PREL(4));
1744 	}
1745 
1746 	/*
1747 	 * Transfer any data already present in the socket buffer.
1748 	 */
1749 	KASSERT(sp->state == SPLICE_INIT,
1750 	    ("so_splice: splice %p state %d", sp, sp->state));
1751 	sp->state = SPLICE_QUEUED;
1752 	so_splice_xfer(sp);
1753 	return (0);
1754 }
1755 
1756 static int
1757 so_unsplice(struct socket *so, bool timeout)
1758 {
1759 	struct socket *so2;
1760 	struct so_splice *sp;
1761 	bool drain, so2rele;
1762 
1763 	/*
1764 	 * First unset SB_SPLICED and hide the splice structure so that
1765 	 * wakeup routines will stop enqueuing work.  This also ensures that
1766 	 * a only a single thread will proceed with the unsplice.
1767 	 */
1768 	SOCK_LOCK(so);
1769 	if (SOLISTENING(so)) {
1770 		SOCK_UNLOCK(so);
1771 		return (EINVAL);
1772 	}
1773 	SOCK_RECVBUF_LOCK(so);
1774 	if ((so->so_rcv.sb_flags & SB_SPLICED) == 0) {
1775 		SOCK_RECVBUF_UNLOCK(so);
1776 		SOCK_UNLOCK(so);
1777 		return (ENOTCONN);
1778 	}
1779 	sp = so->so_splice;
1780 	mtx_lock(&sp->mtx);
1781 	if (sp->state == SPLICE_INIT) {
1782 		/*
1783 		 * A splice is in the middle of being set up.
1784 		 */
1785 		mtx_unlock(&sp->mtx);
1786 		SOCK_RECVBUF_UNLOCK(so);
1787 		SOCK_UNLOCK(so);
1788 		return (ENOTCONN);
1789 	}
1790 	mtx_unlock(&sp->mtx);
1791 	so->so_rcv.sb_flags &= ~SB_SPLICED;
1792 	so->so_splice = NULL;
1793 	SOCK_RECVBUF_UNLOCK(so);
1794 	SOCK_UNLOCK(so);
1795 
1796 	so2 = sp->dst;
1797 	SOCK_LOCK(so2);
1798 	KASSERT(!SOLISTENING(so2), ("%s: so2 is listening", __func__));
1799 	SOCK_SENDBUF_LOCK(so2);
1800 	KASSERT(sp->state == SPLICE_INIT ||
1801 	    (so2->so_snd.sb_flags & SB_SPLICED) != 0,
1802 	    ("%s: so2 is not spliced", __func__));
1803 	KASSERT(sp->state == SPLICE_INIT ||
1804 	    so2->so_splice_back == sp,
1805 	    ("%s: so_splice_back != sp", __func__));
1806 	so2->so_snd.sb_flags &= ~SB_SPLICED;
1807 	so2rele = so2->so_splice_back != NULL;
1808 	so2->so_splice_back = NULL;
1809 	SOCK_SENDBUF_UNLOCK(so2);
1810 	SOCK_UNLOCK(so2);
1811 
1812 	/*
1813 	 * No new work is being enqueued.  The worker thread might be
1814 	 * splicing data right now, in which case we want to wait for it to
1815 	 * finish before proceeding.
1816 	 */
1817 	mtx_lock(&sp->mtx);
1818 	switch (sp->state) {
1819 	case SPLICE_QUEUED:
1820 	case SPLICE_RUNNING:
1821 		sp->state = SPLICE_CLOSING;
1822 		while (sp->state == SPLICE_CLOSING)
1823 			msleep(sp, &sp->mtx, PSOCK, "unsplice", 0);
1824 		break;
1825 	case SPLICE_INIT:
1826 	case SPLICE_IDLE:
1827 	case SPLICE_EXCEPTION:
1828 		sp->state = SPLICE_CLOSED;
1829 		break;
1830 	default:
1831 		__assert_unreachable();
1832 	}
1833 	if (!timeout) {
1834 		drain = taskqueue_cancel_timeout(taskqueue_thread, &sp->timeout,
1835 		    NULL) != 0;
1836 	} else {
1837 		drain = false;
1838 	}
1839 	mtx_unlock(&sp->mtx);
1840 	if (drain)
1841 		taskqueue_drain_timeout(taskqueue_thread, &sp->timeout);
1842 
1843 	/*
1844 	 * Now we hold the sole reference to the splice structure.
1845 	 * Clean up: signal userspace and release socket references.
1846 	 */
1847 	sorwakeup(so);
1848 	CURVNET_SET(so->so_vnet);
1849 	sorele(so);
1850 	sowwakeup(so2);
1851 	if (so2rele)
1852 		sorele(so2);
1853 	CURVNET_RESTORE();
1854 	so_splice_free(sp);
1855 	return (0);
1856 }
1857 
1858 /*
1859  * Free socket upon release of the very last reference.
1860  */
1861 static void
1862 sofree(struct socket *so)
1863 {
1864 	struct protosw *pr = so->so_proto;
1865 
1866 	SOCK_LOCK_ASSERT(so);
1867 	KASSERT(refcount_load(&so->so_count) == 0,
1868 	    ("%s: so %p has references", __func__, so));
1869 	KASSERT(SOLISTENING(so) || so->so_qstate == SQ_NONE,
1870 	    ("%s: so %p is on listen queue", __func__, so));
1871 	KASSERT(SOLISTENING(so) || (so->so_rcv.sb_flags & SB_SPLICED) == 0,
1872 	    ("%s: so %p rcvbuf is spliced", __func__, so));
1873 	KASSERT(SOLISTENING(so) || (so->so_snd.sb_flags & SB_SPLICED) == 0,
1874 	    ("%s: so %p sndbuf is spliced", __func__, so));
1875 	KASSERT(so->so_splice == NULL && so->so_splice_back == NULL,
1876 	    ("%s: so %p has spliced data", __func__, so));
1877 
1878 	SOCK_UNLOCK(so);
1879 
1880 	if (so->so_dtor != NULL)
1881 		so->so_dtor(so);
1882 
1883 	VNET_SO_ASSERT(so);
1884 	if (pr->pr_detach != NULL)
1885 		pr->pr_detach(so);
1886 
1887 	if (!(pr->pr_flags & PR_SOCKBUF) && !SOLISTENING(so)) {
1888 		/*
1889 		 * From this point on, we assume that no other references to
1890 		 * this socket exist anywhere else in the stack.  Therefore,
1891 		 * no locks need to be acquired or held.
1892 		 */
1893 #ifdef INVARIANTS
1894 		SOCK_SENDBUF_LOCK(so);
1895 		SOCK_RECVBUF_LOCK(so);
1896 #endif
1897 		sbdestroy(so, SO_SND);
1898 		sbdestroy(so, SO_RCV);
1899 #ifdef INVARIANTS
1900 		SOCK_SENDBUF_UNLOCK(so);
1901 		SOCK_RECVBUF_UNLOCK(so);
1902 #endif
1903 	}
1904 	seldrain(&so->so_rdsel);
1905 	seldrain(&so->so_wrsel);
1906 	knlist_destroy(&so->so_rdsel.si_note);
1907 	knlist_destroy(&so->so_wrsel.si_note);
1908 	sodealloc(so);
1909 }
1910 
1911 /*
1912  * Release a reference on a socket while holding the socket lock.
1913  * Unlocks the socket lock before returning.
1914  */
1915 void
1916 sorele_locked(struct socket *so)
1917 {
1918 	SOCK_LOCK_ASSERT(so);
1919 	if (refcount_release(&so->so_count))
1920 		sofree(so);
1921 	else
1922 		SOCK_UNLOCK(so);
1923 }
1924 
1925 /*
1926  * Close a socket on last file table reference removal.  Initiate disconnect
1927  * if connected.  Free socket when disconnect complete.
1928  *
1929  * This function will sorele() the socket.  Note that soclose() may be called
1930  * prior to the ref count reaching zero.  The actual socket structure will
1931  * not be freed until the ref count reaches zero.
1932  */
1933 int
1934 soclose(struct socket *so)
1935 {
1936 	struct accept_queue lqueue;
1937 	int error = 0;
1938 	bool listening, last __diagused;
1939 
1940 	CURVNET_SET(so->so_vnet);
1941 	funsetown(&so->so_sigio);
1942 	if (so->so_state & SS_ISCONNECTED) {
1943 		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
1944 			error = sodisconnect(so);
1945 			if (error) {
1946 				if (error == ENOTCONN)
1947 					error = 0;
1948 				goto drop;
1949 			}
1950 		}
1951 
1952 		if ((so->so_options & SO_LINGER) != 0 && so->so_linger != 0) {
1953 			if ((so->so_state & SS_ISDISCONNECTING) &&
1954 			    (so->so_state & SS_NBIO))
1955 				goto drop;
1956 			while (so->so_state & SS_ISCONNECTED) {
1957 				error = tsleep(&so->so_timeo,
1958 				    PSOCK | PCATCH, "soclos",
1959 				    so->so_linger * hz);
1960 				if (error)
1961 					break;
1962 			}
1963 		}
1964 	}
1965 
1966 drop:
1967 	if (so->so_proto->pr_close != NULL)
1968 		so->so_proto->pr_close(so);
1969 
1970 	SOCK_LOCK(so);
1971 	if ((listening = SOLISTENING(so))) {
1972 		struct socket *sp;
1973 
1974 		TAILQ_INIT(&lqueue);
1975 		TAILQ_SWAP(&lqueue, &so->sol_incomp, socket, so_list);
1976 		TAILQ_CONCAT(&lqueue, &so->sol_comp, so_list);
1977 
1978 		so->sol_qlen = so->sol_incqlen = 0;
1979 
1980 		TAILQ_FOREACH(sp, &lqueue, so_list) {
1981 			SOCK_LOCK(sp);
1982 			sp->so_qstate = SQ_NONE;
1983 			sp->so_listen = NULL;
1984 			SOCK_UNLOCK(sp);
1985 			last = refcount_release(&so->so_count);
1986 			KASSERT(!last, ("%s: released last reference for %p",
1987 			    __func__, so));
1988 		}
1989 	}
1990 	sorele_locked(so);
1991 	if (listening) {
1992 		struct socket *sp, *tsp;
1993 
1994 		TAILQ_FOREACH_SAFE(sp, &lqueue, so_list, tsp)
1995 			soabort(sp);
1996 	}
1997 	CURVNET_RESTORE();
1998 	return (error);
1999 }
2000 
2001 /*
2002  * soabort() is used to abruptly tear down a connection, such as when a
2003  * resource limit is reached (listen queue depth exceeded), or if a listen
2004  * socket is closed while there are sockets waiting to be accepted.
2005  *
2006  * This interface is tricky, because it is called on an unreferenced socket,
2007  * and must be called only by a thread that has actually removed the socket
2008  * from the listen queue it was on.  Likely this thread holds the last
2009  * reference on the socket and soabort() will proceed with sofree().  But
2010  * it might be not the last, as the sockets on the listen queues are seen
2011  * from the protocol side.
2012  *
2013  * This interface will call into the protocol code, so must not be called
2014  * with any socket locks held.  Protocols do call it while holding their own
2015  * recursible protocol mutexes, but this is something that should be subject
2016  * to review in the future.
2017  *
2018  * Usually socket should have a single reference left, but this is not a
2019  * requirement.  In the past, when we have had named references for file
2020  * descriptor and protocol, we asserted that none of them are being held.
2021  */
2022 void
2023 soabort(struct socket *so)
2024 {
2025 
2026 	VNET_SO_ASSERT(so);
2027 
2028 	if (so->so_proto->pr_abort != NULL)
2029 		so->so_proto->pr_abort(so);
2030 	SOCK_LOCK(so);
2031 	sorele_locked(so);
2032 }
2033 
2034 int
2035 soaccept(struct socket *so, struct sockaddr *sa)
2036 {
2037 #ifdef INVARIANTS
2038 	u_char len = sa->sa_len;
2039 #endif
2040 	int error;
2041 
2042 	CURVNET_SET(so->so_vnet);
2043 	error = so->so_proto->pr_accept(so, sa);
2044 	KASSERT(sa->sa_len <= len,
2045 	    ("%s: protocol %p sockaddr overflow", __func__, so->so_proto));
2046 	CURVNET_RESTORE();
2047 	return (error);
2048 }
2049 
2050 int
2051 sopeeraddr(struct socket *so, struct sockaddr *sa)
2052 {
2053 #ifdef INVARIANTS
2054 	u_char len = sa->sa_len;
2055 #endif
2056 	int error;
2057 
2058 	CURVNET_ASSERT_SET();
2059 
2060 	error = so->so_proto->pr_peeraddr(so, sa);
2061 	KASSERT(sa->sa_len <= len,
2062 	    ("%s: protocol %p sockaddr overflow", __func__, so->so_proto));
2063 
2064 	return (error);
2065 }
2066 
2067 int
2068 sosockaddr(struct socket *so, struct sockaddr *sa)
2069 {
2070 #ifdef INVARIANTS
2071 	u_char len = sa->sa_len;
2072 #endif
2073 	int error;
2074 
2075 	CURVNET_SET(so->so_vnet);
2076 	error = so->so_proto->pr_sockaddr(so, sa);
2077 	KASSERT(sa->sa_len <= len,
2078 	    ("%s: protocol %p sockaddr overflow", __func__, so->so_proto));
2079 	CURVNET_RESTORE();
2080 
2081 	return (error);
2082 }
2083 
2084 int
2085 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
2086 {
2087 
2088 	return (soconnectat(AT_FDCWD, so, nam, td));
2089 }
2090 
2091 int
2092 soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
2093 {
2094 	int error;
2095 
2096 	CURVNET_SET(so->so_vnet);
2097 
2098 	/*
2099 	 * If protocol is connection-based, can only connect once.
2100 	 * Otherwise, if connected, try to disconnect first.  This allows
2101 	 * user to disconnect by connecting to, e.g., a null address.
2102 	 *
2103 	 * Note, this check is racy and may need to be re-evaluated at the
2104 	 * protocol layer.
2105 	 */
2106 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
2107 	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
2108 	    (error = sodisconnect(so)))) {
2109 		error = EISCONN;
2110 	} else {
2111 		/*
2112 		 * Prevent accumulated error from previous connection from
2113 		 * biting us.
2114 		 */
2115 		so->so_error = 0;
2116 		if (fd == AT_FDCWD) {
2117 			error = so->so_proto->pr_connect(so, nam, td);
2118 		} else {
2119 			error = so->so_proto->pr_connectat(fd, so, nam, td);
2120 		}
2121 	}
2122 	CURVNET_RESTORE();
2123 
2124 	return (error);
2125 }
2126 
2127 int
2128 soconnect2(struct socket *so1, struct socket *so2)
2129 {
2130 	int error;
2131 
2132 	CURVNET_SET(so1->so_vnet);
2133 	error = so1->so_proto->pr_connect2(so1, so2);
2134 	CURVNET_RESTORE();
2135 	return (error);
2136 }
2137 
2138 int
2139 sodisconnect(struct socket *so)
2140 {
2141 	int error;
2142 
2143 	if ((so->so_state & SS_ISCONNECTED) == 0)
2144 		return (ENOTCONN);
2145 	if (so->so_state & SS_ISDISCONNECTING)
2146 		return (EALREADY);
2147 	VNET_SO_ASSERT(so);
2148 	error = so->so_proto->pr_disconnect(so);
2149 	return (error);
2150 }
2151 
2152 int
2153 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
2154     struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
2155 {
2156 	long space;
2157 	ssize_t resid;
2158 	int clen = 0, error, dontroute;
2159 
2160 	KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM"));
2161 	KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
2162 	    ("sosend_dgram: !PR_ATOMIC"));
2163 
2164 	if (uio != NULL)
2165 		resid = uio->uio_resid;
2166 	else
2167 		resid = top->m_pkthdr.len;
2168 	/*
2169 	 * In theory resid should be unsigned.  However, space must be
2170 	 * signed, as it might be less than 0 if we over-committed, and we
2171 	 * must use a signed comparison of space and resid.  On the other
2172 	 * hand, a negative resid causes us to loop sending 0-length
2173 	 * segments to the protocol.
2174 	 */
2175 	if (resid < 0) {
2176 		error = EINVAL;
2177 		goto out;
2178 	}
2179 
2180 	dontroute =
2181 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
2182 	if (td != NULL)
2183 		td->td_ru.ru_msgsnd++;
2184 	if (control != NULL)
2185 		clen = control->m_len;
2186 
2187 	SOCKBUF_LOCK(&so->so_snd);
2188 	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2189 		SOCKBUF_UNLOCK(&so->so_snd);
2190 		error = EPIPE;
2191 		goto out;
2192 	}
2193 	if (so->so_error) {
2194 		error = so->so_error;
2195 		so->so_error = 0;
2196 		SOCKBUF_UNLOCK(&so->so_snd);
2197 		goto out;
2198 	}
2199 	if ((so->so_state & SS_ISCONNECTED) == 0) {
2200 		/*
2201 		 * `sendto' and `sendmsg' is allowed on a connection-based
2202 		 * socket if it supports implied connect.  Return ENOTCONN if
2203 		 * not connected and no address is supplied.
2204 		 */
2205 		if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
2206 		    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
2207 			if (!(resid == 0 && clen != 0)) {
2208 				SOCKBUF_UNLOCK(&so->so_snd);
2209 				error = ENOTCONN;
2210 				goto out;
2211 			}
2212 		} else if (addr == NULL) {
2213 			if (so->so_proto->pr_flags & PR_CONNREQUIRED)
2214 				error = ENOTCONN;
2215 			else
2216 				error = EDESTADDRREQ;
2217 			SOCKBUF_UNLOCK(&so->so_snd);
2218 			goto out;
2219 		}
2220 	}
2221 
2222 	/*
2223 	 * Do we need MSG_OOB support in SOCK_DGRAM?  Signs here may be a
2224 	 * problem and need fixing.
2225 	 */
2226 	space = sbspace(&so->so_snd);
2227 	if (flags & MSG_OOB)
2228 		space += 1024;
2229 	space -= clen;
2230 	SOCKBUF_UNLOCK(&so->so_snd);
2231 	if (resid > space) {
2232 		error = EMSGSIZE;
2233 		goto out;
2234 	}
2235 	if (uio == NULL) {
2236 		resid = 0;
2237 		if (flags & MSG_EOR)
2238 			top->m_flags |= M_EOR;
2239 	} else {
2240 		/*
2241 		 * Copy the data from userland into a mbuf chain.
2242 		 * If no data is to be copied in, a single empty mbuf
2243 		 * is returned.
2244 		 */
2245 		top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
2246 		    (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
2247 		if (top == NULL) {
2248 			error = EFAULT;	/* only possible error */
2249 			goto out;
2250 		}
2251 		space -= resid - uio->uio_resid;
2252 		resid = uio->uio_resid;
2253 	}
2254 	KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
2255 	/*
2256 	 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
2257 	 * than with.
2258 	 */
2259 	if (dontroute) {
2260 		SOCK_LOCK(so);
2261 		so->so_options |= SO_DONTROUTE;
2262 		SOCK_UNLOCK(so);
2263 	}
2264 	/*
2265 	 * XXX all the SBS_CANTSENDMORE checks previously done could be out
2266 	 * of date.  We could have received a reset packet in an interrupt or
2267 	 * maybe we slept while doing page faults in uiomove() etc.  We could
2268 	 * probably recheck again inside the locking protection here, but
2269 	 * there are probably other places that this also happens.  We must
2270 	 * rethink this.
2271 	 */
2272 	VNET_SO_ASSERT(so);
2273 	error = so->so_proto->pr_send(so, (flags & MSG_OOB) ? PRUS_OOB :
2274 	/*
2275 	 * If the user set MSG_EOF, the protocol understands this flag and
2276 	 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
2277 	 */
2278 	    ((flags & MSG_EOF) &&
2279 	     (so->so_proto->pr_flags & PR_IMPLOPCL) &&
2280 	     (resid <= 0)) ?
2281 		PRUS_EOF :
2282 		/* If there is more to send set PRUS_MORETOCOME */
2283 		(flags & MSG_MORETOCOME) ||
2284 		(resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
2285 		top, addr, control, td);
2286 	if (dontroute) {
2287 		SOCK_LOCK(so);
2288 		so->so_options &= ~SO_DONTROUTE;
2289 		SOCK_UNLOCK(so);
2290 	}
2291 	clen = 0;
2292 	control = NULL;
2293 	top = NULL;
2294 out:
2295 	if (top != NULL)
2296 		m_freem(top);
2297 	if (control != NULL)
2298 		m_freem(control);
2299 	return (error);
2300 }
2301 
2302 /*
2303  * Send on a socket.  If send must go all at once and message is larger than
2304  * send buffering, then hard error.  Lock against other senders.  If must go
2305  * all at once and not enough room now, then inform user that this would
2306  * block and do nothing.  Otherwise, if nonblocking, send as much as
2307  * possible.  The data to be sent is described by "uio" if nonzero, otherwise
2308  * by the mbuf chain "top" (which must be null if uio is not).  Data provided
2309  * in mbuf chain must be small enough to send all at once.
2310  *
2311  * Returns nonzero on error, timeout or signal; callers must check for short
2312  * counts if EINTR/ERESTART are returned.  Data and control buffers are freed
2313  * on return.
2314  */
2315 static int
2316 sosend_generic_locked(struct socket *so, struct sockaddr *addr, struct uio *uio,
2317     struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
2318 {
2319 	long space;
2320 	ssize_t resid;
2321 	int clen = 0, error, dontroute;
2322 	int atomic = sosendallatonce(so) || top;
2323 	int pr_send_flag;
2324 #ifdef KERN_TLS
2325 	struct ktls_session *tls;
2326 	int tls_enq_cnt, tls_send_flag;
2327 	uint8_t tls_rtype;
2328 
2329 	tls = NULL;
2330 	tls_rtype = TLS_RLTYPE_APP;
2331 #endif
2332 
2333 	SOCK_IO_SEND_ASSERT_LOCKED(so);
2334 
2335 	if (uio != NULL)
2336 		resid = uio->uio_resid;
2337 	else if ((top->m_flags & M_PKTHDR) != 0)
2338 		resid = top->m_pkthdr.len;
2339 	else
2340 		resid = m_length(top, NULL);
2341 	/*
2342 	 * In theory resid should be unsigned.  However, space must be
2343 	 * signed, as it might be less than 0 if we over-committed, and we
2344 	 * must use a signed comparison of space and resid.  On the other
2345 	 * hand, a negative resid causes us to loop sending 0-length
2346 	 * segments to the protocol.
2347 	 *
2348 	 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
2349 	 * type sockets since that's an error.
2350 	 */
2351 	if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
2352 		error = EINVAL;
2353 		goto out;
2354 	}
2355 
2356 	dontroute =
2357 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
2358 	    (so->so_proto->pr_flags & PR_ATOMIC);
2359 	if (td != NULL)
2360 		td->td_ru.ru_msgsnd++;
2361 	if (control != NULL)
2362 		clen = control->m_len;
2363 
2364 #ifdef KERN_TLS
2365 	tls_send_flag = 0;
2366 	tls = ktls_hold(so->so_snd.sb_tls_info);
2367 	if (tls != NULL) {
2368 		if (tls->mode == TCP_TLS_MODE_SW)
2369 			tls_send_flag = PRUS_NOTREADY;
2370 
2371 		if (control != NULL) {
2372 			struct cmsghdr *cm = mtod(control, struct cmsghdr *);
2373 
2374 			if (clen >= sizeof(*cm) &&
2375 			    cm->cmsg_type == TLS_SET_RECORD_TYPE) {
2376 				tls_rtype = *((uint8_t *)CMSG_DATA(cm));
2377 				clen = 0;
2378 				m_freem(control);
2379 				control = NULL;
2380 				atomic = 1;
2381 			}
2382 		}
2383 
2384 		if (resid == 0 && !ktls_permit_empty_frames(tls)) {
2385 			error = EINVAL;
2386 			goto out;
2387 		}
2388 	}
2389 #endif
2390 
2391 restart:
2392 	do {
2393 		SOCKBUF_LOCK(&so->so_snd);
2394 		if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2395 			SOCKBUF_UNLOCK(&so->so_snd);
2396 			error = EPIPE;
2397 			goto out;
2398 		}
2399 		if (so->so_error) {
2400 			error = so->so_error;
2401 			so->so_error = 0;
2402 			SOCKBUF_UNLOCK(&so->so_snd);
2403 			goto out;
2404 		}
2405 		if ((so->so_state & SS_ISCONNECTED) == 0) {
2406 			/*
2407 			 * `sendto' and `sendmsg' is allowed on a connection-
2408 			 * based socket if it supports implied connect.
2409 			 * Return ENOTCONN if not connected and no address is
2410 			 * supplied.
2411 			 */
2412 			if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
2413 			    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
2414 				if (!(resid == 0 && clen != 0)) {
2415 					SOCKBUF_UNLOCK(&so->so_snd);
2416 					error = ENOTCONN;
2417 					goto out;
2418 				}
2419 			} else if (addr == NULL) {
2420 				SOCKBUF_UNLOCK(&so->so_snd);
2421 				if (so->so_proto->pr_flags & PR_CONNREQUIRED)
2422 					error = ENOTCONN;
2423 				else
2424 					error = EDESTADDRREQ;
2425 				goto out;
2426 			}
2427 		}
2428 		space = sbspace(&so->so_snd);
2429 		if (flags & MSG_OOB)
2430 			space += 1024;
2431 		if ((atomic && resid > so->so_snd.sb_hiwat) ||
2432 		    clen > so->so_snd.sb_hiwat) {
2433 			SOCKBUF_UNLOCK(&so->so_snd);
2434 			error = EMSGSIZE;
2435 			goto out;
2436 		}
2437 		if (space < resid + clen &&
2438 		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {
2439 			if ((so->so_state & SS_NBIO) ||
2440 			    (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) {
2441 				SOCKBUF_UNLOCK(&so->so_snd);
2442 				error = EWOULDBLOCK;
2443 				goto out;
2444 			}
2445 			error = sbwait(so, SO_SND);
2446 			SOCKBUF_UNLOCK(&so->so_snd);
2447 			if (error)
2448 				goto out;
2449 			goto restart;
2450 		}
2451 		SOCKBUF_UNLOCK(&so->so_snd);
2452 		space -= clen;
2453 		do {
2454 			if (uio == NULL) {
2455 				resid = 0;
2456 				if (flags & MSG_EOR)
2457 					top->m_flags |= M_EOR;
2458 #ifdef KERN_TLS
2459 				if (tls != NULL) {
2460 					ktls_frame(top, tls, &tls_enq_cnt,
2461 					    tls_rtype);
2462 					tls_rtype = TLS_RLTYPE_APP;
2463 				}
2464 #endif
2465 			} else {
2466 				/*
2467 				 * Copy the data from userland into a mbuf
2468 				 * chain.  If resid is 0, which can happen
2469 				 * only if we have control to send, then
2470 				 * a single empty mbuf is returned.  This
2471 				 * is a workaround to prevent protocol send
2472 				 * methods to panic.
2473 				 */
2474 #ifdef KERN_TLS
2475 				if (tls != NULL) {
2476 					top = m_uiotombuf(uio, M_WAITOK, space,
2477 					    tls->params.max_frame_len,
2478 					    M_EXTPG |
2479 					    ((flags & MSG_EOR) ? M_EOR : 0));
2480 					if (top != NULL) {
2481 						ktls_frame(top, tls,
2482 						    &tls_enq_cnt, tls_rtype);
2483 					}
2484 					tls_rtype = TLS_RLTYPE_APP;
2485 				} else
2486 #endif
2487 					top = m_uiotombuf(uio, M_WAITOK, space,
2488 					    (atomic ? max_hdr : 0),
2489 					    (atomic ? M_PKTHDR : 0) |
2490 					    ((flags & MSG_EOR) ? M_EOR : 0));
2491 				if (top == NULL) {
2492 					error = EFAULT; /* only possible error */
2493 					goto out;
2494 				}
2495 				space -= resid - uio->uio_resid;
2496 				resid = uio->uio_resid;
2497 			}
2498 			if (dontroute) {
2499 				SOCK_LOCK(so);
2500 				so->so_options |= SO_DONTROUTE;
2501 				SOCK_UNLOCK(so);
2502 			}
2503 			/*
2504 			 * XXX all the SBS_CANTSENDMORE checks previously
2505 			 * done could be out of date.  We could have received
2506 			 * a reset packet in an interrupt or maybe we slept
2507 			 * while doing page faults in uiomove() etc.  We
2508 			 * could probably recheck again inside the locking
2509 			 * protection here, but there are probably other
2510 			 * places that this also happens.  We must rethink
2511 			 * this.
2512 			 */
2513 			VNET_SO_ASSERT(so);
2514 
2515 			pr_send_flag = (flags & MSG_OOB) ? PRUS_OOB :
2516 			/*
2517 			 * If the user set MSG_EOF, the protocol understands
2518 			 * this flag and nothing left to send then use
2519 			 * PRU_SEND_EOF instead of PRU_SEND.
2520 			 */
2521 			    ((flags & MSG_EOF) &&
2522 			     (so->so_proto->pr_flags & PR_IMPLOPCL) &&
2523 			     (resid <= 0)) ?
2524 				PRUS_EOF :
2525 			/* If there is more to send set PRUS_MORETOCOME. */
2526 			    (flags & MSG_MORETOCOME) ||
2527 			    (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0;
2528 
2529 #ifdef KERN_TLS
2530 			pr_send_flag |= tls_send_flag;
2531 #endif
2532 
2533 			error = so->so_proto->pr_send(so, pr_send_flag, top,
2534 			    addr, control, td);
2535 
2536 			if (dontroute) {
2537 				SOCK_LOCK(so);
2538 				so->so_options &= ~SO_DONTROUTE;
2539 				SOCK_UNLOCK(so);
2540 			}
2541 
2542 #ifdef KERN_TLS
2543 			if (tls != NULL && tls->mode == TCP_TLS_MODE_SW) {
2544 				if (error != 0) {
2545 					m_freem(top);
2546 					top = NULL;
2547 				} else {
2548 					soref(so);
2549 					ktls_enqueue(top, so, tls_enq_cnt);
2550 				}
2551 			}
2552 #endif
2553 			clen = 0;
2554 			control = NULL;
2555 			top = NULL;
2556 			if (error)
2557 				goto out;
2558 		} while (resid && space > 0);
2559 	} while (resid);
2560 
2561 out:
2562 #ifdef KERN_TLS
2563 	if (tls != NULL)
2564 		ktls_free(tls);
2565 #endif
2566 	if (top != NULL)
2567 		m_freem(top);
2568 	if (control != NULL)
2569 		m_freem(control);
2570 	return (error);
2571 }
2572 
2573 int
2574 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
2575     struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
2576 {
2577 	int error;
2578 
2579 	error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
2580 	if (error)
2581 		return (error);
2582 	error = sosend_generic_locked(so, addr, uio, top, control, flags, td);
2583 	SOCK_IO_SEND_UNLOCK(so);
2584 	return (error);
2585 }
2586 
2587 /*
2588  * Send to a socket from a kernel thread.
2589  *
2590  * XXXGL: in almost all cases uio is NULL and the mbuf is supplied.
2591  * Exception is nfs/bootp_subr.c.  It is arguable that the VNET context needs
2592  * to be set at all.  This function should just boil down to a static inline
2593  * calling the protocol method.
2594  */
2595 int
2596 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
2597     struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
2598 {
2599 	int error;
2600 
2601 	CURVNET_SET(so->so_vnet);
2602 	error = so->so_proto->pr_sosend(so, addr, uio,
2603 	    top, control, flags, td);
2604 	CURVNET_RESTORE();
2605 	return (error);
2606 }
2607 
2608 /*
2609  * send(2), write(2) or aio_write(2) on a socket.
2610  */
2611 int
2612 sousrsend(struct socket *so, struct sockaddr *addr, struct uio *uio,
2613     struct mbuf *control, int flags, struct proc *userproc)
2614 {
2615 	struct thread *td;
2616 	ssize_t len;
2617 	int error;
2618 
2619 	td = uio->uio_td;
2620 	len = uio->uio_resid;
2621 	CURVNET_SET(so->so_vnet);
2622 	error = so->so_proto->pr_sosend(so, addr, uio, NULL, control, flags,
2623 	    td);
2624 	CURVNET_RESTORE();
2625 	if (error != 0) {
2626 		/*
2627 		 * Clear transient errors for stream protocols if they made
2628 		 * some progress.  Make exclusion for aio(4) that would
2629 		 * schedule a new write in case of EWOULDBLOCK and clear
2630 		 * error itself.  See soaio_process_job().
2631 		 */
2632 		if (uio->uio_resid != len &&
2633 		    (so->so_proto->pr_flags & PR_ATOMIC) == 0 &&
2634 		    userproc == NULL &&
2635 		    (error == ERESTART || error == EINTR ||
2636 		    error == EWOULDBLOCK))
2637 			error = 0;
2638 		/* Generation of SIGPIPE can be controlled per socket. */
2639 		if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0 &&
2640 		    (flags & MSG_NOSIGNAL) == 0) {
2641 			if (userproc != NULL) {
2642 				/* aio(4) job */
2643 				PROC_LOCK(userproc);
2644 				kern_psignal(userproc, SIGPIPE);
2645 				PROC_UNLOCK(userproc);
2646 			} else {
2647 				PROC_LOCK(td->td_proc);
2648 				tdsignal(td, SIGPIPE);
2649 				PROC_UNLOCK(td->td_proc);
2650 			}
2651 		}
2652 	}
2653 	return (error);
2654 }
2655 
2656 /*
2657  * The part of soreceive() that implements reading non-inline out-of-band
2658  * data from a socket.  For more complete comments, see soreceive(), from
2659  * which this code originated.
2660  *
2661  * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
2662  * unable to return an mbuf chain to the caller.
2663  */
2664 static int
2665 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
2666 {
2667 	struct protosw *pr = so->so_proto;
2668 	struct mbuf *m;
2669 	int error;
2670 
2671 	KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
2672 	VNET_SO_ASSERT(so);
2673 
2674 	m = m_get(M_WAITOK, MT_DATA);
2675 	error = pr->pr_rcvoob(so, m, flags & MSG_PEEK);
2676 	if (error)
2677 		goto bad;
2678 	do {
2679 		error = uiomove(mtod(m, void *),
2680 		    (int) min(uio->uio_resid, m->m_len), uio);
2681 		m = m_free(m);
2682 	} while (uio->uio_resid && error == 0 && m);
2683 bad:
2684 	if (m != NULL)
2685 		m_freem(m);
2686 	return (error);
2687 }
2688 
2689 /*
2690  * Following replacement or removal of the first mbuf on the first mbuf chain
2691  * of a socket buffer, push necessary state changes back into the socket
2692  * buffer so that other consumers see the values consistently.  'nextrecord'
2693  * is the callers locally stored value of the original value of
2694  * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
2695  * NOTE: 'nextrecord' may be NULL.
2696  */
2697 static __inline void
2698 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
2699 {
2700 
2701 	SOCKBUF_LOCK_ASSERT(sb);
2702 	/*
2703 	 * First, update for the new value of nextrecord.  If necessary, make
2704 	 * it the first record.
2705 	 */
2706 	if (sb->sb_mb != NULL)
2707 		sb->sb_mb->m_nextpkt = nextrecord;
2708 	else
2709 		sb->sb_mb = nextrecord;
2710 
2711 	/*
2712 	 * Now update any dependent socket buffer fields to reflect the new
2713 	 * state.  This is an expanded inline of SB_EMPTY_FIXUP(), with the
2714 	 * addition of a second clause that takes care of the case where
2715 	 * sb_mb has been updated, but remains the last record.
2716 	 */
2717 	if (sb->sb_mb == NULL) {
2718 		sb->sb_mbtail = NULL;
2719 		sb->sb_lastrecord = NULL;
2720 	} else if (sb->sb_mb->m_nextpkt == NULL)
2721 		sb->sb_lastrecord = sb->sb_mb;
2722 }
2723 
2724 /*
2725  * Implement receive operations on a socket.  We depend on the way that
2726  * records are added to the sockbuf by sbappend.  In particular, each record
2727  * (mbufs linked through m_next) must begin with an address if the protocol
2728  * so specifies, followed by an optional mbuf or mbufs containing ancillary
2729  * data, and then zero or more mbufs of data.  In order to allow parallelism
2730  * between network receive and copying to user space, as well as avoid
2731  * sleeping with a mutex held, we release the socket buffer mutex during the
2732  * user space copy.  Although the sockbuf is locked, new data may still be
2733  * appended, and thus we must maintain consistency of the sockbuf during that
2734  * time.
2735  *
2736  * The caller may receive the data as a single mbuf chain by supplying an
2737  * mbuf **mp0 for use in returning the chain.  The uio is then used only for
2738  * the count in uio_resid.
2739  */
2740 static int
2741 soreceive_generic_locked(struct socket *so, struct sockaddr **psa,
2742     struct uio *uio, struct mbuf **mp, struct mbuf **controlp, int *flagsp)
2743 {
2744 	struct mbuf *m;
2745 	int flags, error, offset;
2746 	ssize_t len;
2747 	struct protosw *pr = so->so_proto;
2748 	struct mbuf *nextrecord;
2749 	int moff, type = 0;
2750 	ssize_t orig_resid = uio->uio_resid;
2751 	bool report_real_len = false;
2752 
2753 	SOCK_IO_RECV_ASSERT_LOCKED(so);
2754 
2755 	error = 0;
2756 	if (flagsp != NULL) {
2757 		report_real_len = *flagsp & MSG_TRUNC;
2758 		*flagsp &= ~MSG_TRUNC;
2759 		flags = *flagsp &~ MSG_EOR;
2760 	} else
2761 		flags = 0;
2762 
2763 restart:
2764 	SOCKBUF_LOCK(&so->so_rcv);
2765 	m = so->so_rcv.sb_mb;
2766 	/*
2767 	 * If we have less data than requested, block awaiting more (subject
2768 	 * to any timeout) if:
2769 	 *   1. the current count is less than the low water mark, or
2770 	 *   2. MSG_DONTWAIT is not set
2771 	 */
2772 	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
2773 	    sbavail(&so->so_rcv) < uio->uio_resid) &&
2774 	    sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
2775 	    m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
2776 		KASSERT(m != NULL || !sbavail(&so->so_rcv),
2777 		    ("receive: m == %p sbavail == %u",
2778 		    m, sbavail(&so->so_rcv)));
2779 		if (so->so_error || so->so_rerror) {
2780 			if (m != NULL)
2781 				goto dontblock;
2782 			if (so->so_error)
2783 				error = so->so_error;
2784 			else
2785 				error = so->so_rerror;
2786 			if ((flags & MSG_PEEK) == 0) {
2787 				if (so->so_error)
2788 					so->so_error = 0;
2789 				else
2790 					so->so_rerror = 0;
2791 			}
2792 			SOCKBUF_UNLOCK(&so->so_rcv);
2793 			goto release;
2794 		}
2795 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2796 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2797 			if (m != NULL)
2798 				goto dontblock;
2799 #ifdef KERN_TLS
2800 			else if (so->so_rcv.sb_tlsdcc == 0 &&
2801 			    so->so_rcv.sb_tlscc == 0) {
2802 #else
2803 			else {
2804 #endif
2805 				SOCKBUF_UNLOCK(&so->so_rcv);
2806 				goto release;
2807 			}
2808 		}
2809 		for (; m != NULL; m = m->m_next)
2810 			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
2811 				m = so->so_rcv.sb_mb;
2812 				goto dontblock;
2813 			}
2814 		if ((so->so_state & (SS_ISCONNECTING | SS_ISCONNECTED |
2815 		    SS_ISDISCONNECTING | SS_ISDISCONNECTED)) == 0 &&
2816 		    (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) {
2817 			SOCKBUF_UNLOCK(&so->so_rcv);
2818 			error = ENOTCONN;
2819 			goto release;
2820 		}
2821 		if (uio->uio_resid == 0 && !report_real_len) {
2822 			SOCKBUF_UNLOCK(&so->so_rcv);
2823 			goto release;
2824 		}
2825 		if ((so->so_state & SS_NBIO) ||
2826 		    (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2827 			SOCKBUF_UNLOCK(&so->so_rcv);
2828 			error = EWOULDBLOCK;
2829 			goto release;
2830 		}
2831 		SBLASTRECORDCHK(&so->so_rcv);
2832 		SBLASTMBUFCHK(&so->so_rcv);
2833 		error = sbwait(so, SO_RCV);
2834 		SOCKBUF_UNLOCK(&so->so_rcv);
2835 		if (error)
2836 			goto release;
2837 		goto restart;
2838 	}
2839 dontblock:
2840 	/*
2841 	 * From this point onward, we maintain 'nextrecord' as a cache of the
2842 	 * pointer to the next record in the socket buffer.  We must keep the
2843 	 * various socket buffer pointers and local stack versions of the
2844 	 * pointers in sync, pushing out modifications before dropping the
2845 	 * socket buffer mutex, and re-reading them when picking it up.
2846 	 *
2847 	 * Otherwise, we will race with the network stack appending new data
2848 	 * or records onto the socket buffer by using inconsistent/stale
2849 	 * versions of the field, possibly resulting in socket buffer
2850 	 * corruption.
2851 	 *
2852 	 * By holding the high-level sblock(), we prevent simultaneous
2853 	 * readers from pulling off the front of the socket buffer.
2854 	 */
2855 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2856 	if (uio->uio_td)
2857 		uio->uio_td->td_ru.ru_msgrcv++;
2858 	KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
2859 	SBLASTRECORDCHK(&so->so_rcv);
2860 	SBLASTMBUFCHK(&so->so_rcv);
2861 	nextrecord = m->m_nextpkt;
2862 	if (pr->pr_flags & PR_ADDR) {
2863 		KASSERT(m->m_type == MT_SONAME,
2864 		    ("m->m_type == %d", m->m_type));
2865 		orig_resid = 0;
2866 		if (psa != NULL)
2867 			*psa = sodupsockaddr(mtod(m, struct sockaddr *),
2868 			    M_NOWAIT);
2869 		if (flags & MSG_PEEK) {
2870 			m = m->m_next;
2871 		} else {
2872 			sbfree(&so->so_rcv, m);
2873 			so->so_rcv.sb_mb = m_free(m);
2874 			m = so->so_rcv.sb_mb;
2875 			sockbuf_pushsync(&so->so_rcv, nextrecord);
2876 		}
2877 	}
2878 
2879 	/*
2880 	 * Process one or more MT_CONTROL mbufs present before any data mbufs
2881 	 * in the first mbuf chain on the socket buffer.  If MSG_PEEK, we
2882 	 * just copy the data; if !MSG_PEEK, we call into the protocol to
2883 	 * perform externalization (or freeing if controlp == NULL).
2884 	 */
2885 	if (m != NULL && m->m_type == MT_CONTROL) {
2886 		struct mbuf *cm = NULL, *cmn;
2887 		struct mbuf **cme = &cm;
2888 #ifdef KERN_TLS
2889 		struct cmsghdr *cmsg;
2890 		struct tls_get_record tgr;
2891 
2892 		/*
2893 		 * For MSG_TLSAPPDATA, check for an alert record.
2894 		 * If found, return ENXIO without removing
2895 		 * it from the receive queue.  This allows a subsequent
2896 		 * call without MSG_TLSAPPDATA to receive it.
2897 		 * Note that, for TLS, there should only be a single
2898 		 * control mbuf with the TLS_GET_RECORD message in it.
2899 		 */
2900 		if (flags & MSG_TLSAPPDATA) {
2901 			cmsg = mtod(m, struct cmsghdr *);
2902 			if (cmsg->cmsg_type == TLS_GET_RECORD &&
2903 			    cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) {
2904 				memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr));
2905 				if (__predict_false(tgr.tls_type ==
2906 				    TLS_RLTYPE_ALERT)) {
2907 					SOCKBUF_UNLOCK(&so->so_rcv);
2908 					error = ENXIO;
2909 					goto release;
2910 				}
2911 			}
2912 		}
2913 #endif
2914 
2915 		do {
2916 			if (flags & MSG_PEEK) {
2917 				if (controlp != NULL) {
2918 					*controlp = m_copym(m, 0, m->m_len,
2919 					    M_NOWAIT);
2920 					controlp = &(*controlp)->m_next;
2921 				}
2922 				m = m->m_next;
2923 			} else {
2924 				sbfree(&so->so_rcv, m);
2925 				so->so_rcv.sb_mb = m->m_next;
2926 				m->m_next = NULL;
2927 				*cme = m;
2928 				cme = &(*cme)->m_next;
2929 				m = so->so_rcv.sb_mb;
2930 			}
2931 		} while (m != NULL && m->m_type == MT_CONTROL);
2932 		if ((flags & MSG_PEEK) == 0)
2933 			sockbuf_pushsync(&so->so_rcv, nextrecord);
2934 		while (cm != NULL) {
2935 			cmn = cm->m_next;
2936 			cm->m_next = NULL;
2937 			if (pr->pr_domain->dom_externalize != NULL) {
2938 				SOCKBUF_UNLOCK(&so->so_rcv);
2939 				VNET_SO_ASSERT(so);
2940 				error = (*pr->pr_domain->dom_externalize)
2941 				    (cm, controlp, flags);
2942 				SOCKBUF_LOCK(&so->so_rcv);
2943 			} else if (controlp != NULL)
2944 				*controlp = cm;
2945 			else
2946 				m_freem(cm);
2947 			if (controlp != NULL) {
2948 				while (*controlp != NULL)
2949 					controlp = &(*controlp)->m_next;
2950 			}
2951 			cm = cmn;
2952 		}
2953 		if (m != NULL)
2954 			nextrecord = so->so_rcv.sb_mb->m_nextpkt;
2955 		else
2956 			nextrecord = so->so_rcv.sb_mb;
2957 		orig_resid = 0;
2958 	}
2959 	if (m != NULL) {
2960 		if ((flags & MSG_PEEK) == 0) {
2961 			KASSERT(m->m_nextpkt == nextrecord,
2962 			    ("soreceive: post-control, nextrecord !sync"));
2963 			if (nextrecord == NULL) {
2964 				KASSERT(so->so_rcv.sb_mb == m,
2965 				    ("soreceive: post-control, sb_mb!=m"));
2966 				KASSERT(so->so_rcv.sb_lastrecord == m,
2967 				    ("soreceive: post-control, lastrecord!=m"));
2968 			}
2969 		}
2970 		type = m->m_type;
2971 		if (type == MT_OOBDATA)
2972 			flags |= MSG_OOB;
2973 	} else {
2974 		if ((flags & MSG_PEEK) == 0) {
2975 			KASSERT(so->so_rcv.sb_mb == nextrecord,
2976 			    ("soreceive: sb_mb != nextrecord"));
2977 			if (so->so_rcv.sb_mb == NULL) {
2978 				KASSERT(so->so_rcv.sb_lastrecord == NULL,
2979 				    ("soreceive: sb_lastercord != NULL"));
2980 			}
2981 		}
2982 	}
2983 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2984 	SBLASTRECORDCHK(&so->so_rcv);
2985 	SBLASTMBUFCHK(&so->so_rcv);
2986 
2987 	/*
2988 	 * Now continue to read any data mbufs off of the head of the socket
2989 	 * buffer until the read request is satisfied.  Note that 'type' is
2990 	 * used to store the type of any mbuf reads that have happened so far
2991 	 * such that soreceive() can stop reading if the type changes, which
2992 	 * causes soreceive() to return only one of regular data and inline
2993 	 * out-of-band data in a single socket receive operation.
2994 	 */
2995 	moff = 0;
2996 	offset = 0;
2997 	while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0
2998 	    && error == 0) {
2999 		/*
3000 		 * If the type of mbuf has changed since the last mbuf
3001 		 * examined ('type'), end the receive operation.
3002 		 */
3003 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3004 		if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) {
3005 			if (type != m->m_type)
3006 				break;
3007 		} else if (type == MT_OOBDATA)
3008 			break;
3009 		else
3010 		    KASSERT(m->m_type == MT_DATA,
3011 			("m->m_type == %d", m->m_type));
3012 		so->so_rcv.sb_state &= ~SBS_RCVATMARK;
3013 		len = uio->uio_resid;
3014 		if (so->so_oobmark && len > so->so_oobmark - offset)
3015 			len = so->so_oobmark - offset;
3016 		if (len > m->m_len - moff)
3017 			len = m->m_len - moff;
3018 		/*
3019 		 * If mp is set, just pass back the mbufs.  Otherwise copy
3020 		 * them out via the uio, then free.  Sockbuf must be
3021 		 * consistent here (points to current mbuf, it points to next
3022 		 * record) when we drop priority; we must note any additions
3023 		 * to the sockbuf when we block interrupts again.
3024 		 */
3025 		if (mp == NULL) {
3026 			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3027 			SBLASTRECORDCHK(&so->so_rcv);
3028 			SBLASTMBUFCHK(&so->so_rcv);
3029 			SOCKBUF_UNLOCK(&so->so_rcv);
3030 			if ((m->m_flags & M_EXTPG) != 0)
3031 				error = m_unmapped_uiomove(m, moff, uio,
3032 				    (int)len);
3033 			else
3034 				error = uiomove(mtod(m, char *) + moff,
3035 				    (int)len, uio);
3036 			SOCKBUF_LOCK(&so->so_rcv);
3037 			if (error) {
3038 				/*
3039 				 * The MT_SONAME mbuf has already been removed
3040 				 * from the record, so it is necessary to
3041 				 * remove the data mbufs, if any, to preserve
3042 				 * the invariant in the case of PR_ADDR that
3043 				 * requires MT_SONAME mbufs at the head of
3044 				 * each record.
3045 				 */
3046 				if (pr->pr_flags & PR_ATOMIC &&
3047 				    ((flags & MSG_PEEK) == 0))
3048 					(void)sbdroprecord_locked(&so->so_rcv);
3049 				SOCKBUF_UNLOCK(&so->so_rcv);
3050 				goto release;
3051 			}
3052 		} else
3053 			uio->uio_resid -= len;
3054 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3055 		if (len == m->m_len - moff) {
3056 			if (m->m_flags & M_EOR)
3057 				flags |= MSG_EOR;
3058 			if (flags & MSG_PEEK) {
3059 				m = m->m_next;
3060 				moff = 0;
3061 			} else {
3062 				nextrecord = m->m_nextpkt;
3063 				sbfree(&so->so_rcv, m);
3064 				if (mp != NULL) {
3065 					m->m_nextpkt = NULL;
3066 					*mp = m;
3067 					mp = &m->m_next;
3068 					so->so_rcv.sb_mb = m = m->m_next;
3069 					*mp = NULL;
3070 				} else {
3071 					so->so_rcv.sb_mb = m_free(m);
3072 					m = so->so_rcv.sb_mb;
3073 				}
3074 				sockbuf_pushsync(&so->so_rcv, nextrecord);
3075 				SBLASTRECORDCHK(&so->so_rcv);
3076 				SBLASTMBUFCHK(&so->so_rcv);
3077 			}
3078 		} else {
3079 			if (flags & MSG_PEEK)
3080 				moff += len;
3081 			else {
3082 				if (mp != NULL) {
3083 					if (flags & MSG_DONTWAIT) {
3084 						*mp = m_copym(m, 0, len,
3085 						    M_NOWAIT);
3086 						if (*mp == NULL) {
3087 							/*
3088 							 * m_copym() couldn't
3089 							 * allocate an mbuf.
3090 							 * Adjust uio_resid back
3091 							 * (it was adjusted
3092 							 * down by len bytes,
3093 							 * which we didn't end
3094 							 * up "copying" over).
3095 							 */
3096 							uio->uio_resid += len;
3097 							break;
3098 						}
3099 					} else {
3100 						SOCKBUF_UNLOCK(&so->so_rcv);
3101 						*mp = m_copym(m, 0, len,
3102 						    M_WAITOK);
3103 						SOCKBUF_LOCK(&so->so_rcv);
3104 					}
3105 				}
3106 				sbcut_locked(&so->so_rcv, len);
3107 			}
3108 		}
3109 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3110 		if (so->so_oobmark) {
3111 			if ((flags & MSG_PEEK) == 0) {
3112 				so->so_oobmark -= len;
3113 				if (so->so_oobmark == 0) {
3114 					so->so_rcv.sb_state |= SBS_RCVATMARK;
3115 					break;
3116 				}
3117 			} else {
3118 				offset += len;
3119 				if (offset == so->so_oobmark)
3120 					break;
3121 			}
3122 		}
3123 		if (flags & MSG_EOR)
3124 			break;
3125 		/*
3126 		 * If the MSG_WAITALL flag is set (for non-atomic socket), we
3127 		 * must not quit until "uio->uio_resid == 0" or an error
3128 		 * termination.  If a signal/timeout occurs, return with a
3129 		 * short count but without error.  Keep sockbuf locked
3130 		 * against other readers.
3131 		 */
3132 		while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
3133 		    !sosendallatonce(so) && nextrecord == NULL) {
3134 			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3135 			if (so->so_error || so->so_rerror ||
3136 			    so->so_rcv.sb_state & SBS_CANTRCVMORE)
3137 				break;
3138 			/*
3139 			 * Notify the protocol that some data has been
3140 			 * drained before blocking.
3141 			 */
3142 			if (pr->pr_flags & PR_WANTRCVD) {
3143 				SOCKBUF_UNLOCK(&so->so_rcv);
3144 				VNET_SO_ASSERT(so);
3145 				pr->pr_rcvd(so, flags);
3146 				SOCKBUF_LOCK(&so->so_rcv);
3147 				if (__predict_false(so->so_rcv.sb_mb == NULL &&
3148 				    (so->so_error || so->so_rerror ||
3149 				    so->so_rcv.sb_state & SBS_CANTRCVMORE)))
3150 					break;
3151 			}
3152 			SBLASTRECORDCHK(&so->so_rcv);
3153 			SBLASTMBUFCHK(&so->so_rcv);
3154 			/*
3155 			 * We could receive some data while was notifying
3156 			 * the protocol. Skip blocking in this case.
3157 			 */
3158 			if (so->so_rcv.sb_mb == NULL) {
3159 				error = sbwait(so, SO_RCV);
3160 				if (error) {
3161 					SOCKBUF_UNLOCK(&so->so_rcv);
3162 					goto release;
3163 				}
3164 			}
3165 			m = so->so_rcv.sb_mb;
3166 			if (m != NULL)
3167 				nextrecord = m->m_nextpkt;
3168 		}
3169 	}
3170 
3171 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3172 	if (m != NULL && pr->pr_flags & PR_ATOMIC) {
3173 		if (report_real_len)
3174 			uio->uio_resid -= m_length(m, NULL) - moff;
3175 		flags |= MSG_TRUNC;
3176 		if ((flags & MSG_PEEK) == 0)
3177 			(void) sbdroprecord_locked(&so->so_rcv);
3178 	}
3179 	if ((flags & MSG_PEEK) == 0) {
3180 		if (m == NULL) {
3181 			/*
3182 			 * First part is an inline SB_EMPTY_FIXUP().  Second
3183 			 * part makes sure sb_lastrecord is up-to-date if
3184 			 * there is still data in the socket buffer.
3185 			 */
3186 			so->so_rcv.sb_mb = nextrecord;
3187 			if (so->so_rcv.sb_mb == NULL) {
3188 				so->so_rcv.sb_mbtail = NULL;
3189 				so->so_rcv.sb_lastrecord = NULL;
3190 			} else if (nextrecord->m_nextpkt == NULL)
3191 				so->so_rcv.sb_lastrecord = nextrecord;
3192 		}
3193 		SBLASTRECORDCHK(&so->so_rcv);
3194 		SBLASTMBUFCHK(&so->so_rcv);
3195 		/*
3196 		 * If soreceive() is being done from the socket callback,
3197 		 * then don't need to generate ACK to peer to update window,
3198 		 * since ACK will be generated on return to TCP.
3199 		 */
3200 		if (!(flags & MSG_SOCALLBCK) &&
3201 		    (pr->pr_flags & PR_WANTRCVD)) {
3202 			SOCKBUF_UNLOCK(&so->so_rcv);
3203 			VNET_SO_ASSERT(so);
3204 			pr->pr_rcvd(so, flags);
3205 			SOCKBUF_LOCK(&so->so_rcv);
3206 		}
3207 	}
3208 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3209 	if (orig_resid == uio->uio_resid && orig_resid &&
3210 	    (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
3211 		SOCKBUF_UNLOCK(&so->so_rcv);
3212 		goto restart;
3213 	}
3214 	SOCKBUF_UNLOCK(&so->so_rcv);
3215 
3216 	if (flagsp != NULL)
3217 		*flagsp |= flags;
3218 release:
3219 	return (error);
3220 }
3221 
3222 int
3223 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
3224     struct mbuf **mp, struct mbuf **controlp, int *flagsp)
3225 {
3226 	int error, flags;
3227 
3228 	if (psa != NULL)
3229 		*psa = NULL;
3230 	if (controlp != NULL)
3231 		*controlp = NULL;
3232 	if (flagsp != NULL) {
3233 		flags = *flagsp;
3234 		if ((flags & MSG_OOB) != 0)
3235 			return (soreceive_rcvoob(so, uio, flags));
3236 	} else {
3237 		flags = 0;
3238 	}
3239 	if (mp != NULL)
3240 		*mp = NULL;
3241 
3242 	error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
3243 	if (error)
3244 		return (error);
3245 	error = soreceive_generic_locked(so, psa, uio, mp, controlp, flagsp);
3246 	SOCK_IO_RECV_UNLOCK(so);
3247 	return (error);
3248 }
3249 
3250 /*
3251  * Optimized version of soreceive() for stream (TCP) sockets.
3252  */
3253 static int
3254 soreceive_stream_locked(struct socket *so, struct sockbuf *sb,
3255     struct sockaddr **psa, struct uio *uio, struct mbuf **mp0,
3256     struct mbuf **controlp, int flags)
3257 {
3258 	int len = 0, error = 0, oresid;
3259 	struct mbuf *m, *n = NULL;
3260 
3261 	SOCK_IO_RECV_ASSERT_LOCKED(so);
3262 
3263 	/* Easy one, no space to copyout anything. */
3264 	if (uio->uio_resid == 0)
3265 		return (EINVAL);
3266 	oresid = uio->uio_resid;
3267 
3268 	SOCKBUF_LOCK(sb);
3269 	/* We will never ever get anything unless we are or were connected. */
3270 	if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
3271 		error = ENOTCONN;
3272 		goto out;
3273 	}
3274 
3275 restart:
3276 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3277 
3278 	/* Abort if socket has reported problems. */
3279 	if (so->so_error) {
3280 		if (sbavail(sb) > 0)
3281 			goto deliver;
3282 		if (oresid > uio->uio_resid)
3283 			goto out;
3284 		error = so->so_error;
3285 		if (!(flags & MSG_PEEK))
3286 			so->so_error = 0;
3287 		goto out;
3288 	}
3289 
3290 	/* Door is closed.  Deliver what is left, if any. */
3291 	if (sb->sb_state & SBS_CANTRCVMORE) {
3292 		if (sbavail(sb) > 0)
3293 			goto deliver;
3294 		else
3295 			goto out;
3296 	}
3297 
3298 	/* Socket buffer is empty and we shall not block. */
3299 	if (sbavail(sb) == 0 &&
3300 	    ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
3301 		error = EAGAIN;
3302 		goto out;
3303 	}
3304 
3305 	/* Socket buffer got some data that we shall deliver now. */
3306 	if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
3307 	    ((so->so_state & SS_NBIO) ||
3308 	     (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
3309 	     sbavail(sb) >= sb->sb_lowat ||
3310 	     sbavail(sb) >= uio->uio_resid ||
3311 	     sbavail(sb) >= sb->sb_hiwat) ) {
3312 		goto deliver;
3313 	}
3314 
3315 	/* On MSG_WAITALL we must wait until all data or error arrives. */
3316 	if ((flags & MSG_WAITALL) &&
3317 	    (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
3318 		goto deliver;
3319 
3320 	/*
3321 	 * Wait and block until (more) data comes in.
3322 	 * NB: Drops the sockbuf lock during wait.
3323 	 */
3324 	error = sbwait(so, SO_RCV);
3325 	if (error)
3326 		goto out;
3327 	goto restart;
3328 
3329 deliver:
3330 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3331 	KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
3332 	KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
3333 
3334 	/* Statistics. */
3335 	if (uio->uio_td)
3336 		uio->uio_td->td_ru.ru_msgrcv++;
3337 
3338 	/* Fill uio until full or current end of socket buffer is reached. */
3339 	len = min(uio->uio_resid, sbavail(sb));
3340 	if (mp0 != NULL) {
3341 		/* Dequeue as many mbufs as possible. */
3342 		if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
3343 			if (*mp0 == NULL)
3344 				*mp0 = sb->sb_mb;
3345 			else
3346 				m_cat(*mp0, sb->sb_mb);
3347 			for (m = sb->sb_mb;
3348 			     m != NULL && m->m_len <= len;
3349 			     m = m->m_next) {
3350 				KASSERT(!(m->m_flags & M_NOTAVAIL),
3351 				    ("%s: m %p not available", __func__, m));
3352 				len -= m->m_len;
3353 				uio->uio_resid -= m->m_len;
3354 				sbfree(sb, m);
3355 				n = m;
3356 			}
3357 			n->m_next = NULL;
3358 			sb->sb_mb = m;
3359 			sb->sb_lastrecord = sb->sb_mb;
3360 			if (sb->sb_mb == NULL)
3361 				SB_EMPTY_FIXUP(sb);
3362 		}
3363 		/* Copy the remainder. */
3364 		if (len > 0) {
3365 			KASSERT(sb->sb_mb != NULL,
3366 			    ("%s: len > 0 && sb->sb_mb empty", __func__));
3367 
3368 			m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
3369 			if (m == NULL)
3370 				len = 0;	/* Don't flush data from sockbuf. */
3371 			else
3372 				uio->uio_resid -= len;
3373 			if (*mp0 != NULL)
3374 				m_cat(*mp0, m);
3375 			else
3376 				*mp0 = m;
3377 			if (*mp0 == NULL) {
3378 				error = ENOBUFS;
3379 				goto out;
3380 			}
3381 		}
3382 	} else {
3383 		/* NB: Must unlock socket buffer as uiomove may sleep. */
3384 		SOCKBUF_UNLOCK(sb);
3385 		error = m_mbuftouio(uio, sb->sb_mb, len);
3386 		SOCKBUF_LOCK(sb);
3387 		if (error)
3388 			goto out;
3389 	}
3390 	SBLASTRECORDCHK(sb);
3391 	SBLASTMBUFCHK(sb);
3392 
3393 	/*
3394 	 * Remove the delivered data from the socket buffer unless we
3395 	 * were only peeking.
3396 	 */
3397 	if (!(flags & MSG_PEEK)) {
3398 		if (len > 0)
3399 			sbdrop_locked(sb, len);
3400 
3401 		/* Notify protocol that we drained some data. */
3402 		if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
3403 		    (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
3404 		     !(flags & MSG_SOCALLBCK))) {
3405 			SOCKBUF_UNLOCK(sb);
3406 			VNET_SO_ASSERT(so);
3407 			so->so_proto->pr_rcvd(so, flags);
3408 			SOCKBUF_LOCK(sb);
3409 		}
3410 	}
3411 
3412 	/*
3413 	 * For MSG_WAITALL we may have to loop again and wait for
3414 	 * more data to come in.
3415 	 */
3416 	if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
3417 		goto restart;
3418 out:
3419 	SBLASTRECORDCHK(sb);
3420 	SBLASTMBUFCHK(sb);
3421 	SOCKBUF_UNLOCK(sb);
3422 	return (error);
3423 }
3424 
3425 int
3426 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
3427     struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3428 {
3429 	struct sockbuf *sb;
3430 	int error, flags;
3431 
3432 	sb = &so->so_rcv;
3433 
3434 	/* We only do stream sockets. */
3435 	if (so->so_type != SOCK_STREAM)
3436 		return (EINVAL);
3437 	if (psa != NULL)
3438 		*psa = NULL;
3439 	if (flagsp != NULL)
3440 		flags = *flagsp & ~MSG_EOR;
3441 	else
3442 		flags = 0;
3443 	if (controlp != NULL)
3444 		*controlp = NULL;
3445 	if (flags & MSG_OOB)
3446 		return (soreceive_rcvoob(so, uio, flags));
3447 	if (mp0 != NULL)
3448 		*mp0 = NULL;
3449 
3450 #ifdef KERN_TLS
3451 	/*
3452 	 * KTLS store TLS records as records with a control message to
3453 	 * describe the framing.
3454 	 *
3455 	 * We check once here before acquiring locks to optimize the
3456 	 * common case.
3457 	 */
3458 	if (sb->sb_tls_info != NULL)
3459 		return (soreceive_generic(so, psa, uio, mp0, controlp,
3460 		    flagsp));
3461 #endif
3462 
3463 	/*
3464 	 * Prevent other threads from reading from the socket.  This lock may be
3465 	 * dropped in order to sleep waiting for data to arrive.
3466 	 */
3467 	error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
3468 	if (error)
3469 		return (error);
3470 #ifdef KERN_TLS
3471 	if (__predict_false(sb->sb_tls_info != NULL)) {
3472 		SOCK_IO_RECV_UNLOCK(so);
3473 		return (soreceive_generic(so, psa, uio, mp0, controlp,
3474 		    flagsp));
3475 	}
3476 #endif
3477 	error = soreceive_stream_locked(so, sb, psa, uio, mp0, controlp, flags);
3478 	SOCK_IO_RECV_UNLOCK(so);
3479 	return (error);
3480 }
3481 
3482 /*
3483  * Optimized version of soreceive() for simple datagram cases from userspace.
3484  * Unlike in the stream case, we're able to drop a datagram if copyout()
3485  * fails, and because we handle datagrams atomically, we don't need to use a
3486  * sleep lock to prevent I/O interlacing.
3487  */
3488 int
3489 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
3490     struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3491 {
3492 	struct mbuf *m, *m2;
3493 	int flags, error;
3494 	ssize_t len;
3495 	struct protosw *pr = so->so_proto;
3496 	struct mbuf *nextrecord;
3497 
3498 	if (psa != NULL)
3499 		*psa = NULL;
3500 	if (controlp != NULL)
3501 		*controlp = NULL;
3502 	if (flagsp != NULL)
3503 		flags = *flagsp &~ MSG_EOR;
3504 	else
3505 		flags = 0;
3506 
3507 	/*
3508 	 * For any complicated cases, fall back to the full
3509 	 * soreceive_generic().
3510 	 */
3511 	if (mp0 != NULL || (flags & (MSG_PEEK | MSG_OOB | MSG_TRUNC)))
3512 		return (soreceive_generic(so, psa, uio, mp0, controlp,
3513 		    flagsp));
3514 
3515 	/*
3516 	 * Enforce restrictions on use.
3517 	 */
3518 	KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
3519 	    ("soreceive_dgram: wantrcvd"));
3520 	KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
3521 	KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
3522 	    ("soreceive_dgram: SBS_RCVATMARK"));
3523 	KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
3524 	    ("soreceive_dgram: P_CONNREQUIRED"));
3525 
3526 	/*
3527 	 * Loop blocking while waiting for a datagram.
3528 	 */
3529 	SOCKBUF_LOCK(&so->so_rcv);
3530 	while ((m = so->so_rcv.sb_mb) == NULL) {
3531 		KASSERT(sbavail(&so->so_rcv) == 0,
3532 		    ("soreceive_dgram: sb_mb NULL but sbavail %u",
3533 		    sbavail(&so->so_rcv)));
3534 		if (so->so_error) {
3535 			error = so->so_error;
3536 			so->so_error = 0;
3537 			SOCKBUF_UNLOCK(&so->so_rcv);
3538 			return (error);
3539 		}
3540 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
3541 		    uio->uio_resid == 0) {
3542 			SOCKBUF_UNLOCK(&so->so_rcv);
3543 			return (0);
3544 		}
3545 		if ((so->so_state & SS_NBIO) ||
3546 		    (flags & (MSG_DONTWAIT|MSG_NBIO))) {
3547 			SOCKBUF_UNLOCK(&so->so_rcv);
3548 			return (EWOULDBLOCK);
3549 		}
3550 		SBLASTRECORDCHK(&so->so_rcv);
3551 		SBLASTMBUFCHK(&so->so_rcv);
3552 		error = sbwait(so, SO_RCV);
3553 		if (error) {
3554 			SOCKBUF_UNLOCK(&so->so_rcv);
3555 			return (error);
3556 		}
3557 	}
3558 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3559 
3560 	if (uio->uio_td)
3561 		uio->uio_td->td_ru.ru_msgrcv++;
3562 	SBLASTRECORDCHK(&so->so_rcv);
3563 	SBLASTMBUFCHK(&so->so_rcv);
3564 	nextrecord = m->m_nextpkt;
3565 	if (nextrecord == NULL) {
3566 		KASSERT(so->so_rcv.sb_lastrecord == m,
3567 		    ("soreceive_dgram: lastrecord != m"));
3568 	}
3569 
3570 	KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
3571 	    ("soreceive_dgram: m_nextpkt != nextrecord"));
3572 
3573 	/*
3574 	 * Pull 'm' and its chain off the front of the packet queue.
3575 	 */
3576 	so->so_rcv.sb_mb = NULL;
3577 	sockbuf_pushsync(&so->so_rcv, nextrecord);
3578 
3579 	/*
3580 	 * Walk 'm's chain and free that many bytes from the socket buffer.
3581 	 */
3582 	for (m2 = m; m2 != NULL; m2 = m2->m_next)
3583 		sbfree(&so->so_rcv, m2);
3584 
3585 	/*
3586 	 * Do a few last checks before we let go of the lock.
3587 	 */
3588 	SBLASTRECORDCHK(&so->so_rcv);
3589 	SBLASTMBUFCHK(&so->so_rcv);
3590 	SOCKBUF_UNLOCK(&so->so_rcv);
3591 
3592 	if (pr->pr_flags & PR_ADDR) {
3593 		KASSERT(m->m_type == MT_SONAME,
3594 		    ("m->m_type == %d", m->m_type));
3595 		if (psa != NULL)
3596 			*psa = sodupsockaddr(mtod(m, struct sockaddr *),
3597 			    M_WAITOK);
3598 		m = m_free(m);
3599 	}
3600 	KASSERT(m, ("%s: no data or control after soname", __func__));
3601 
3602 	/*
3603 	 * Packet to copyout() is now in 'm' and it is disconnected from the
3604 	 * queue.
3605 	 *
3606 	 * Process one or more MT_CONTROL mbufs present before any data mbufs
3607 	 * in the first mbuf chain on the socket buffer.  We call into the
3608 	 * protocol to perform externalization (or freeing if controlp ==
3609 	 * NULL). In some cases there can be only MT_CONTROL mbufs without
3610 	 * MT_DATA mbufs.
3611 	 */
3612 	if (m->m_type == MT_CONTROL) {
3613 		struct mbuf *cm = NULL, *cmn;
3614 		struct mbuf **cme = &cm;
3615 
3616 		do {
3617 			m2 = m->m_next;
3618 			m->m_next = NULL;
3619 			*cme = m;
3620 			cme = &(*cme)->m_next;
3621 			m = m2;
3622 		} while (m != NULL && m->m_type == MT_CONTROL);
3623 		while (cm != NULL) {
3624 			cmn = cm->m_next;
3625 			cm->m_next = NULL;
3626 			if (pr->pr_domain->dom_externalize != NULL) {
3627 				error = (*pr->pr_domain->dom_externalize)
3628 				    (cm, controlp, flags);
3629 			} else if (controlp != NULL)
3630 				*controlp = cm;
3631 			else
3632 				m_freem(cm);
3633 			if (controlp != NULL) {
3634 				while (*controlp != NULL)
3635 					controlp = &(*controlp)->m_next;
3636 			}
3637 			cm = cmn;
3638 		}
3639 	}
3640 	KASSERT(m == NULL || m->m_type == MT_DATA,
3641 	    ("soreceive_dgram: !data"));
3642 	while (m != NULL && uio->uio_resid > 0) {
3643 		len = uio->uio_resid;
3644 		if (len > m->m_len)
3645 			len = m->m_len;
3646 		error = uiomove(mtod(m, char *), (int)len, uio);
3647 		if (error) {
3648 			m_freem(m);
3649 			return (error);
3650 		}
3651 		if (len == m->m_len)
3652 			m = m_free(m);
3653 		else {
3654 			m->m_data += len;
3655 			m->m_len -= len;
3656 		}
3657 	}
3658 	if (m != NULL) {
3659 		flags |= MSG_TRUNC;
3660 		m_freem(m);
3661 	}
3662 	if (flagsp != NULL)
3663 		*flagsp |= flags;
3664 	return (0);
3665 }
3666 
3667 int
3668 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
3669     struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3670 {
3671 	int error;
3672 
3673 	CURVNET_SET(so->so_vnet);
3674 	error = so->so_proto->pr_soreceive(so, psa, uio, mp0, controlp, flagsp);
3675 	CURVNET_RESTORE();
3676 	return (error);
3677 }
3678 
3679 int
3680 soshutdown(struct socket *so, enum shutdown_how how)
3681 {
3682 	int error;
3683 
3684 	CURVNET_SET(so->so_vnet);
3685 	error = so->so_proto->pr_shutdown(so, how);
3686 	CURVNET_RESTORE();
3687 
3688 	return (error);
3689 }
3690 
3691 /*
3692  * Used by several pr_shutdown implementations that use generic socket buffers.
3693  */
3694 void
3695 sorflush(struct socket *so)
3696 {
3697 	int error;
3698 
3699 	VNET_SO_ASSERT(so);
3700 
3701 	/*
3702 	 * Dislodge threads currently blocked in receive and wait to acquire
3703 	 * a lock against other simultaneous readers before clearing the
3704 	 * socket buffer.  Don't let our acquire be interrupted by a signal
3705 	 * despite any existing socket disposition on interruptable waiting.
3706 	 *
3707 	 * The SOCK_IO_RECV_LOCK() is important here as there some pr_soreceive
3708 	 * methods that read the top of the socket buffer without acquisition
3709 	 * of the socket buffer mutex, assuming that top of the buffer
3710 	 * exclusively belongs to the read(2) syscall.  This is handy when
3711 	 * performing MSG_PEEK.
3712 	 */
3713 	socantrcvmore(so);
3714 
3715 	error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR);
3716 	if (error != 0) {
3717 		KASSERT(SOLISTENING(so),
3718 		    ("%s: soiolock(%p) failed", __func__, so));
3719 		return;
3720 	}
3721 
3722 	sbrelease(so, SO_RCV);
3723 	SOCK_IO_RECV_UNLOCK(so);
3724 
3725 }
3726 
3727 int
3728 sosetfib(struct socket *so, int fibnum)
3729 {
3730 	if (fibnum < 0 || fibnum >= rt_numfibs)
3731 		return (EINVAL);
3732 
3733 	SOCK_LOCK(so);
3734 	so->so_fibnum = fibnum;
3735 	SOCK_UNLOCK(so);
3736 
3737 	return (0);
3738 }
3739 
3740 #ifdef SOCKET_HHOOK
3741 /*
3742  * Wrapper for Socket established helper hook.
3743  * Parameters: socket, context of the hook point, hook id.
3744  */
3745 static inline int
3746 hhook_run_socket(struct socket *so, void *hctx, int32_t h_id)
3747 {
3748 	struct socket_hhook_data hhook_data = {
3749 		.so = so,
3750 		.hctx = hctx,
3751 		.m = NULL,
3752 		.status = 0
3753 	};
3754 
3755 	CURVNET_SET(so->so_vnet);
3756 	HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd);
3757 	CURVNET_RESTORE();
3758 
3759 	/* Ugly but needed, since hhooks return void for now */
3760 	return (hhook_data.status);
3761 }
3762 #endif
3763 
3764 /*
3765  * Perhaps this routine, and sooptcopyout(), below, ought to come in an
3766  * additional variant to handle the case where the option value needs to be
3767  * some kind of integer, but not a specific size.  In addition to their use
3768  * here, these functions are also called by the protocol-level pr_ctloutput()
3769  * routines.
3770  */
3771 int
3772 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
3773 {
3774 	size_t	valsize;
3775 
3776 	/*
3777 	 * If the user gives us more than we wanted, we ignore it, but if we
3778 	 * don't get the minimum length the caller wants, we return EINVAL.
3779 	 * On success, sopt->sopt_valsize is set to however much we actually
3780 	 * retrieved.
3781 	 */
3782 	if ((valsize = sopt->sopt_valsize) < minlen)
3783 		return EINVAL;
3784 	if (valsize > len)
3785 		sopt->sopt_valsize = valsize = len;
3786 
3787 	if (sopt->sopt_td != NULL)
3788 		return (copyin(sopt->sopt_val, buf, valsize));
3789 
3790 	bcopy(sopt->sopt_val, buf, valsize);
3791 	return (0);
3792 }
3793 
3794 /*
3795  * Kernel version of setsockopt(2).
3796  *
3797  * XXX: optlen is size_t, not socklen_t
3798  */
3799 int
3800 so_setsockopt(struct socket *so, int level, int optname, void *optval,
3801     size_t optlen)
3802 {
3803 	struct sockopt sopt;
3804 
3805 	sopt.sopt_level = level;
3806 	sopt.sopt_name = optname;
3807 	sopt.sopt_dir = SOPT_SET;
3808 	sopt.sopt_val = optval;
3809 	sopt.sopt_valsize = optlen;
3810 	sopt.sopt_td = NULL;
3811 	return (sosetopt(so, &sopt));
3812 }
3813 
3814 int
3815 sosetopt(struct socket *so, struct sockopt *sopt)
3816 {
3817 	int	error, optval;
3818 	struct	linger l;
3819 	struct	timeval tv;
3820 	sbintime_t val, *valp;
3821 	uint32_t val32;
3822 #ifdef MAC
3823 	struct mac extmac;
3824 #endif
3825 
3826 	CURVNET_SET(so->so_vnet);
3827 	error = 0;
3828 	if (sopt->sopt_level != SOL_SOCKET) {
3829 		error = so->so_proto->pr_ctloutput(so, sopt);
3830 	} else {
3831 		switch (sopt->sopt_name) {
3832 		case SO_ACCEPTFILTER:
3833 			error = accept_filt_setopt(so, sopt);
3834 			if (error)
3835 				goto bad;
3836 			break;
3837 
3838 		case SO_LINGER:
3839 			error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
3840 			if (error)
3841 				goto bad;
3842 			if (l.l_linger < 0 ||
3843 			    l.l_linger > USHRT_MAX ||
3844 			    l.l_linger > (INT_MAX / hz)) {
3845 				error = EDOM;
3846 				goto bad;
3847 			}
3848 			SOCK_LOCK(so);
3849 			so->so_linger = l.l_linger;
3850 			if (l.l_onoff)
3851 				so->so_options |= SO_LINGER;
3852 			else
3853 				so->so_options &= ~SO_LINGER;
3854 			SOCK_UNLOCK(so);
3855 			break;
3856 
3857 		case SO_DEBUG:
3858 		case SO_KEEPALIVE:
3859 		case SO_DONTROUTE:
3860 		case SO_USELOOPBACK:
3861 		case SO_BROADCAST:
3862 		case SO_REUSEADDR:
3863 		case SO_REUSEPORT:
3864 		case SO_REUSEPORT_LB:
3865 		case SO_OOBINLINE:
3866 		case SO_TIMESTAMP:
3867 		case SO_BINTIME:
3868 		case SO_NOSIGPIPE:
3869 		case SO_NO_DDP:
3870 		case SO_NO_OFFLOAD:
3871 		case SO_RERROR:
3872 			error = sooptcopyin(sopt, &optval, sizeof optval,
3873 			    sizeof optval);
3874 			if (error)
3875 				goto bad;
3876 			SOCK_LOCK(so);
3877 			if (optval)
3878 				so->so_options |= sopt->sopt_name;
3879 			else
3880 				so->so_options &= ~sopt->sopt_name;
3881 			SOCK_UNLOCK(so);
3882 			break;
3883 
3884 		case SO_SETFIB:
3885 			error = so->so_proto->pr_ctloutput(so, sopt);
3886 			break;
3887 
3888 		case SO_USER_COOKIE:
3889 			error = sooptcopyin(sopt, &val32, sizeof val32,
3890 			    sizeof val32);
3891 			if (error)
3892 				goto bad;
3893 			so->so_user_cookie = val32;
3894 			break;
3895 
3896 		case SO_SNDBUF:
3897 		case SO_RCVBUF:
3898 		case SO_SNDLOWAT:
3899 		case SO_RCVLOWAT:
3900 			error = so->so_proto->pr_setsbopt(so, sopt);
3901 			if (error)
3902 				goto bad;
3903 			break;
3904 
3905 		case SO_SNDTIMEO:
3906 		case SO_RCVTIMEO:
3907 #ifdef COMPAT_FREEBSD32
3908 			if (SV_CURPROC_FLAG(SV_ILP32)) {
3909 				struct timeval32 tv32;
3910 
3911 				error = sooptcopyin(sopt, &tv32, sizeof tv32,
3912 				    sizeof tv32);
3913 				CP(tv32, tv, tv_sec);
3914 				CP(tv32, tv, tv_usec);
3915 			} else
3916 #endif
3917 				error = sooptcopyin(sopt, &tv, sizeof tv,
3918 				    sizeof tv);
3919 			if (error)
3920 				goto bad;
3921 			if (tv.tv_sec < 0 || tv.tv_usec < 0 ||
3922 			    tv.tv_usec >= 1000000) {
3923 				error = EDOM;
3924 				goto bad;
3925 			}
3926 			if (tv.tv_sec > INT32_MAX)
3927 				val = SBT_MAX;
3928 			else
3929 				val = tvtosbt(tv);
3930 			SOCK_LOCK(so);
3931 			valp = sopt->sopt_name == SO_SNDTIMEO ?
3932 			    (SOLISTENING(so) ? &so->sol_sbsnd_timeo :
3933 			    &so->so_snd.sb_timeo) :
3934 			    (SOLISTENING(so) ? &so->sol_sbrcv_timeo :
3935 			    &so->so_rcv.sb_timeo);
3936 			*valp = val;
3937 			SOCK_UNLOCK(so);
3938 			break;
3939 
3940 		case SO_LABEL:
3941 #ifdef MAC
3942 			error = sooptcopyin(sopt, &extmac, sizeof extmac,
3943 			    sizeof extmac);
3944 			if (error)
3945 				goto bad;
3946 			error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
3947 			    so, &extmac);
3948 #else
3949 			error = EOPNOTSUPP;
3950 #endif
3951 			break;
3952 
3953 		case SO_TS_CLOCK:
3954 			error = sooptcopyin(sopt, &optval, sizeof optval,
3955 			    sizeof optval);
3956 			if (error)
3957 				goto bad;
3958 			if (optval < 0 || optval > SO_TS_CLOCK_MAX) {
3959 				error = EINVAL;
3960 				goto bad;
3961 			}
3962 			so->so_ts_clock = optval;
3963 			break;
3964 
3965 		case SO_MAX_PACING_RATE:
3966 			error = sooptcopyin(sopt, &val32, sizeof(val32),
3967 			    sizeof(val32));
3968 			if (error)
3969 				goto bad;
3970 			so->so_max_pacing_rate = val32;
3971 			break;
3972 
3973 		case SO_SPLICE: {
3974 			struct splice splice;
3975 
3976 #ifdef COMPAT_FREEBSD32
3977 			if (SV_CURPROC_FLAG(SV_ILP32)) {
3978 				struct splice32 splice32;
3979 
3980 				error = sooptcopyin(sopt, &splice32,
3981 				    sizeof(splice32), sizeof(splice32));
3982 				if (error == 0) {
3983 					splice.sp_fd = splice32.sp_fd;
3984 					splice.sp_max = splice32.sp_max;
3985 					CP(splice32.sp_idle, splice.sp_idle,
3986 					    tv_sec);
3987 					CP(splice32.sp_idle, splice.sp_idle,
3988 					    tv_usec);
3989 				}
3990 			} else
3991 #endif
3992 			{
3993 				error = sooptcopyin(sopt, &splice,
3994 				    sizeof(splice), sizeof(splice));
3995 			}
3996 			if (error)
3997 				goto bad;
3998 #ifdef KTRACE
3999 			if (KTRPOINT(curthread, KTR_STRUCT))
4000 				ktrsplice(&splice);
4001 #endif
4002 
4003 			error = splice_init();
4004 			if (error != 0)
4005 				goto bad;
4006 
4007 			if (splice.sp_fd >= 0) {
4008 				struct file *fp;
4009 				struct socket *so2;
4010 
4011 				if (!cap_rights_contains(sopt->sopt_rights,
4012 				    &cap_recv_rights)) {
4013 					error = ENOTCAPABLE;
4014 					goto bad;
4015 				}
4016 				error = getsock(sopt->sopt_td, splice.sp_fd,
4017 				    &cap_send_rights, &fp);
4018 				if (error != 0)
4019 					goto bad;
4020 				so2 = fp->f_data;
4021 
4022 				error = so_splice(so, so2, &splice);
4023 				fdrop(fp, sopt->sopt_td);
4024 			} else {
4025 				error = so_unsplice(so, false);
4026 			}
4027 			break;
4028 		}
4029 		default:
4030 #ifdef SOCKET_HHOOK
4031 			if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
4032 				error = hhook_run_socket(so, sopt,
4033 				    HHOOK_SOCKET_OPT);
4034 			else
4035 #endif
4036 				error = ENOPROTOOPT;
4037 			break;
4038 		}
4039 		if (error == 0)
4040 			(void)so->so_proto->pr_ctloutput(so, sopt);
4041 	}
4042 bad:
4043 	CURVNET_RESTORE();
4044 	return (error);
4045 }
4046 
4047 /*
4048  * Helper routine for getsockopt.
4049  */
4050 int
4051 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
4052 {
4053 	int	error;
4054 	size_t	valsize;
4055 
4056 	error = 0;
4057 
4058 	/*
4059 	 * Documented get behavior is that we always return a value, possibly
4060 	 * truncated to fit in the user's buffer.  Traditional behavior is
4061 	 * that we always tell the user precisely how much we copied, rather
4062 	 * than something useful like the total amount we had available for
4063 	 * her.  Note that this interface is not idempotent; the entire
4064 	 * answer must be generated ahead of time.
4065 	 */
4066 	valsize = min(len, sopt->sopt_valsize);
4067 	sopt->sopt_valsize = valsize;
4068 	if (sopt->sopt_val != NULL) {
4069 		if (sopt->sopt_td != NULL)
4070 			error = copyout(buf, sopt->sopt_val, valsize);
4071 		else
4072 			bcopy(buf, sopt->sopt_val, valsize);
4073 	}
4074 	return (error);
4075 }
4076 
4077 int
4078 sogetopt(struct socket *so, struct sockopt *sopt)
4079 {
4080 	int	error, optval;
4081 	struct	linger l;
4082 	struct	timeval tv;
4083 #ifdef MAC
4084 	struct mac extmac;
4085 #endif
4086 
4087 	CURVNET_SET(so->so_vnet);
4088 	error = 0;
4089 	if (sopt->sopt_level != SOL_SOCKET) {
4090 		error = so->so_proto->pr_ctloutput(so, sopt);
4091 		CURVNET_RESTORE();
4092 		return (error);
4093 	} else {
4094 		switch (sopt->sopt_name) {
4095 		case SO_ACCEPTFILTER:
4096 			error = accept_filt_getopt(so, sopt);
4097 			break;
4098 
4099 		case SO_LINGER:
4100 			SOCK_LOCK(so);
4101 			l.l_onoff = so->so_options & SO_LINGER;
4102 			l.l_linger = so->so_linger;
4103 			SOCK_UNLOCK(so);
4104 			error = sooptcopyout(sopt, &l, sizeof l);
4105 			break;
4106 
4107 		case SO_USELOOPBACK:
4108 		case SO_DONTROUTE:
4109 		case SO_DEBUG:
4110 		case SO_KEEPALIVE:
4111 		case SO_REUSEADDR:
4112 		case SO_REUSEPORT:
4113 		case SO_REUSEPORT_LB:
4114 		case SO_BROADCAST:
4115 		case SO_OOBINLINE:
4116 		case SO_ACCEPTCONN:
4117 		case SO_TIMESTAMP:
4118 		case SO_BINTIME:
4119 		case SO_NOSIGPIPE:
4120 		case SO_NO_DDP:
4121 		case SO_NO_OFFLOAD:
4122 		case SO_RERROR:
4123 			optval = so->so_options & sopt->sopt_name;
4124 integer:
4125 			error = sooptcopyout(sopt, &optval, sizeof optval);
4126 			break;
4127 
4128 		case SO_FIB:
4129 			SOCK_LOCK(so);
4130 			optval = so->so_fibnum;
4131 			SOCK_UNLOCK(so);
4132 			goto integer;
4133 
4134 		case SO_DOMAIN:
4135 			optval = so->so_proto->pr_domain->dom_family;
4136 			goto integer;
4137 
4138 		case SO_TYPE:
4139 			optval = so->so_type;
4140 			goto integer;
4141 
4142 		case SO_PROTOCOL:
4143 			optval = so->so_proto->pr_protocol;
4144 			goto integer;
4145 
4146 		case SO_ERROR:
4147 			SOCK_LOCK(so);
4148 			if (so->so_error) {
4149 				optval = so->so_error;
4150 				so->so_error = 0;
4151 			} else {
4152 				optval = so->so_rerror;
4153 				so->so_rerror = 0;
4154 			}
4155 			SOCK_UNLOCK(so);
4156 			goto integer;
4157 
4158 		case SO_SNDBUF:
4159 			SOCK_LOCK(so);
4160 			optval = SOLISTENING(so) ? so->sol_sbsnd_hiwat :
4161 			    so->so_snd.sb_hiwat;
4162 			SOCK_UNLOCK(so);
4163 			goto integer;
4164 
4165 		case SO_RCVBUF:
4166 			SOCK_LOCK(so);
4167 			optval = SOLISTENING(so) ? so->sol_sbrcv_hiwat :
4168 			    so->so_rcv.sb_hiwat;
4169 			SOCK_UNLOCK(so);
4170 			goto integer;
4171 
4172 		case SO_SNDLOWAT:
4173 			SOCK_LOCK(so);
4174 			optval = SOLISTENING(so) ? so->sol_sbsnd_lowat :
4175 			    so->so_snd.sb_lowat;
4176 			SOCK_UNLOCK(so);
4177 			goto integer;
4178 
4179 		case SO_RCVLOWAT:
4180 			SOCK_LOCK(so);
4181 			optval = SOLISTENING(so) ? so->sol_sbrcv_lowat :
4182 			    so->so_rcv.sb_lowat;
4183 			SOCK_UNLOCK(so);
4184 			goto integer;
4185 
4186 		case SO_SNDTIMEO:
4187 		case SO_RCVTIMEO:
4188 			SOCK_LOCK(so);
4189 			tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ?
4190 			    (SOLISTENING(so) ? so->sol_sbsnd_timeo :
4191 			    so->so_snd.sb_timeo) :
4192 			    (SOLISTENING(so) ? so->sol_sbrcv_timeo :
4193 			    so->so_rcv.sb_timeo));
4194 			SOCK_UNLOCK(so);
4195 #ifdef COMPAT_FREEBSD32
4196 			if (SV_CURPROC_FLAG(SV_ILP32)) {
4197 				struct timeval32 tv32;
4198 
4199 				CP(tv, tv32, tv_sec);
4200 				CP(tv, tv32, tv_usec);
4201 				error = sooptcopyout(sopt, &tv32, sizeof tv32);
4202 			} else
4203 #endif
4204 				error = sooptcopyout(sopt, &tv, sizeof tv);
4205 			break;
4206 
4207 		case SO_LABEL:
4208 #ifdef MAC
4209 			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
4210 			    sizeof(extmac));
4211 			if (error)
4212 				goto bad;
4213 			error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
4214 			    so, &extmac);
4215 			if (error)
4216 				goto bad;
4217 			/* Don't copy out extmac, it is unchanged. */
4218 #else
4219 			error = EOPNOTSUPP;
4220 #endif
4221 			break;
4222 
4223 		case SO_PEERLABEL:
4224 #ifdef MAC
4225 			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
4226 			    sizeof(extmac));
4227 			if (error)
4228 				goto bad;
4229 			error = mac_getsockopt_peerlabel(
4230 			    sopt->sopt_td->td_ucred, so, &extmac);
4231 			if (error)
4232 				goto bad;
4233 			/* Don't copy out extmac, it is unchanged. */
4234 #else
4235 			error = EOPNOTSUPP;
4236 #endif
4237 			break;
4238 
4239 		case SO_LISTENQLIMIT:
4240 			SOCK_LOCK(so);
4241 			optval = SOLISTENING(so) ? so->sol_qlimit : 0;
4242 			SOCK_UNLOCK(so);
4243 			goto integer;
4244 
4245 		case SO_LISTENQLEN:
4246 			SOCK_LOCK(so);
4247 			optval = SOLISTENING(so) ? so->sol_qlen : 0;
4248 			SOCK_UNLOCK(so);
4249 			goto integer;
4250 
4251 		case SO_LISTENINCQLEN:
4252 			SOCK_LOCK(so);
4253 			optval = SOLISTENING(so) ? so->sol_incqlen : 0;
4254 			SOCK_UNLOCK(so);
4255 			goto integer;
4256 
4257 		case SO_TS_CLOCK:
4258 			optval = so->so_ts_clock;
4259 			goto integer;
4260 
4261 		case SO_MAX_PACING_RATE:
4262 			optval = so->so_max_pacing_rate;
4263 			goto integer;
4264 
4265 		case SO_SPLICE: {
4266 			off_t n;
4267 
4268 			/*
4269 			 * Acquire the I/O lock to serialize with
4270 			 * so_splice_xfer().  This is not required for
4271 			 * correctness, but makes testing simpler: once a byte
4272 			 * has been transmitted to the sink and observed (e.g.,
4273 			 * by reading from the socket to which the sink is
4274 			 * connected), a subsequent getsockopt(SO_SPLICE) will
4275 			 * return an up-to-date value.
4276 			 */
4277 			error = SOCK_IO_RECV_LOCK(so, SBL_WAIT);
4278 			if (error != 0)
4279 				goto bad;
4280 			SOCK_LOCK(so);
4281 			if (SOLISTENING(so)) {
4282 				n = 0;
4283 			} else {
4284 				n = so->so_splice_sent;
4285 			}
4286 			SOCK_UNLOCK(so);
4287 			SOCK_IO_RECV_UNLOCK(so);
4288 			error = sooptcopyout(sopt, &n, sizeof(n));
4289 			break;
4290 		}
4291 
4292 		default:
4293 #ifdef SOCKET_HHOOK
4294 			if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
4295 				error = hhook_run_socket(so, sopt,
4296 				    HHOOK_SOCKET_OPT);
4297 			else
4298 #endif
4299 				error = ENOPROTOOPT;
4300 			break;
4301 		}
4302 	}
4303 bad:
4304 	CURVNET_RESTORE();
4305 	return (error);
4306 }
4307 
4308 int
4309 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
4310 {
4311 	struct mbuf *m, *m_prev;
4312 	int sopt_size = sopt->sopt_valsize;
4313 
4314 	MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
4315 	if (m == NULL)
4316 		return ENOBUFS;
4317 	if (sopt_size > MLEN) {
4318 		MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT);
4319 		if ((m->m_flags & M_EXT) == 0) {
4320 			m_free(m);
4321 			return ENOBUFS;
4322 		}
4323 		m->m_len = min(MCLBYTES, sopt_size);
4324 	} else {
4325 		m->m_len = min(MLEN, sopt_size);
4326 	}
4327 	sopt_size -= m->m_len;
4328 	*mp = m;
4329 	m_prev = m;
4330 
4331 	while (sopt_size) {
4332 		MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
4333 		if (m == NULL) {
4334 			m_freem(*mp);
4335 			return ENOBUFS;
4336 		}
4337 		if (sopt_size > MLEN) {
4338 			MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK :
4339 			    M_NOWAIT);
4340 			if ((m->m_flags & M_EXT) == 0) {
4341 				m_freem(m);
4342 				m_freem(*mp);
4343 				return ENOBUFS;
4344 			}
4345 			m->m_len = min(MCLBYTES, sopt_size);
4346 		} else {
4347 			m->m_len = min(MLEN, sopt_size);
4348 		}
4349 		sopt_size -= m->m_len;
4350 		m_prev->m_next = m;
4351 		m_prev = m;
4352 	}
4353 	return (0);
4354 }
4355 
4356 int
4357 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
4358 {
4359 	struct mbuf *m0 = m;
4360 
4361 	if (sopt->sopt_val == NULL)
4362 		return (0);
4363 	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
4364 		if (sopt->sopt_td != NULL) {
4365 			int error;
4366 
4367 			error = copyin(sopt->sopt_val, mtod(m, char *),
4368 			    m->m_len);
4369 			if (error != 0) {
4370 				m_freem(m0);
4371 				return(error);
4372 			}
4373 		} else
4374 			bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
4375 		sopt->sopt_valsize -= m->m_len;
4376 		sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
4377 		m = m->m_next;
4378 	}
4379 	if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
4380 		panic("ip6_sooptmcopyin");
4381 	return (0);
4382 }
4383 
4384 int
4385 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
4386 {
4387 	struct mbuf *m0 = m;
4388 	size_t valsize = 0;
4389 
4390 	if (sopt->sopt_val == NULL)
4391 		return (0);
4392 	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
4393 		if (sopt->sopt_td != NULL) {
4394 			int error;
4395 
4396 			error = copyout(mtod(m, char *), sopt->sopt_val,
4397 			    m->m_len);
4398 			if (error != 0) {
4399 				m_freem(m0);
4400 				return(error);
4401 			}
4402 		} else
4403 			bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
4404 		sopt->sopt_valsize -= m->m_len;
4405 		sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
4406 		valsize += m->m_len;
4407 		m = m->m_next;
4408 	}
4409 	if (m != NULL) {
4410 		/* enough soopt buffer should be given from user-land */
4411 		m_freem(m0);
4412 		return(EINVAL);
4413 	}
4414 	sopt->sopt_valsize = valsize;
4415 	return (0);
4416 }
4417 
4418 /*
4419  * sohasoutofband(): protocol notifies socket layer of the arrival of new
4420  * out-of-band data, which will then notify socket consumers.
4421  */
4422 void
4423 sohasoutofband(struct socket *so)
4424 {
4425 
4426 	if (so->so_sigio != NULL)
4427 		pgsigio(&so->so_sigio, SIGURG, 0);
4428 	selwakeuppri(&so->so_rdsel, PSOCK);
4429 }
4430 
4431 int
4432 sopoll_generic(struct socket *so, int events, struct thread *td)
4433 {
4434 	int revents;
4435 
4436 	SOCK_LOCK(so);
4437 	if (SOLISTENING(so)) {
4438 		if (!(events & (POLLIN | POLLRDNORM)))
4439 			revents = 0;
4440 		else if (!TAILQ_EMPTY(&so->sol_comp))
4441 			revents = events & (POLLIN | POLLRDNORM);
4442 		else if ((events & POLLINIGNEOF) == 0 && so->so_error)
4443 			revents = (events & (POLLIN | POLLRDNORM)) | POLLHUP;
4444 		else {
4445 			selrecord(td, &so->so_rdsel);
4446 			revents = 0;
4447 		}
4448 	} else {
4449 		revents = 0;
4450 		SOCK_SENDBUF_LOCK(so);
4451 		SOCK_RECVBUF_LOCK(so);
4452 		if (events & (POLLIN | POLLRDNORM))
4453 			if (soreadabledata(so) && !isspliced(so))
4454 				revents |= events & (POLLIN | POLLRDNORM);
4455 		if (events & (POLLOUT | POLLWRNORM))
4456 			if (sowriteable(so) && !issplicedback(so))
4457 				revents |= events & (POLLOUT | POLLWRNORM);
4458 		if (events & (POLLPRI | POLLRDBAND))
4459 			if (so->so_oobmark ||
4460 			    (so->so_rcv.sb_state & SBS_RCVATMARK))
4461 				revents |= events & (POLLPRI | POLLRDBAND);
4462 		if ((events & POLLINIGNEOF) == 0) {
4463 			if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
4464 				revents |= events & (POLLIN | POLLRDNORM);
4465 				if (so->so_snd.sb_state & SBS_CANTSENDMORE)
4466 					revents |= POLLHUP;
4467 			}
4468 		}
4469 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
4470 			revents |= events & POLLRDHUP;
4471 		if (revents == 0) {
4472 			if (events &
4473 			    (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND | POLLRDHUP)) {
4474 				selrecord(td, &so->so_rdsel);
4475 				so->so_rcv.sb_flags |= SB_SEL;
4476 			}
4477 			if (events & (POLLOUT | POLLWRNORM)) {
4478 				selrecord(td, &so->so_wrsel);
4479 				so->so_snd.sb_flags |= SB_SEL;
4480 			}
4481 		}
4482 		SOCK_RECVBUF_UNLOCK(so);
4483 		SOCK_SENDBUF_UNLOCK(so);
4484 	}
4485 	SOCK_UNLOCK(so);
4486 	return (revents);
4487 }
4488 
4489 int
4490 sokqfilter_generic(struct socket *so, struct knote *kn)
4491 {
4492 	struct sockbuf *sb;
4493 	sb_which which;
4494 	struct knlist *knl;
4495 
4496 	switch (kn->kn_filter) {
4497 	case EVFILT_READ:
4498 		kn->kn_fop = &soread_filtops;
4499 		knl = &so->so_rdsel.si_note;
4500 		sb = &so->so_rcv;
4501 		which = SO_RCV;
4502 		break;
4503 	case EVFILT_WRITE:
4504 		kn->kn_fop = &sowrite_filtops;
4505 		knl = &so->so_wrsel.si_note;
4506 		sb = &so->so_snd;
4507 		which = SO_SND;
4508 		break;
4509 	case EVFILT_EMPTY:
4510 		kn->kn_fop = &soempty_filtops;
4511 		knl = &so->so_wrsel.si_note;
4512 		sb = &so->so_snd;
4513 		which = SO_SND;
4514 		break;
4515 	default:
4516 		return (EINVAL);
4517 	}
4518 
4519 	SOCK_LOCK(so);
4520 	if (SOLISTENING(so)) {
4521 		knlist_add(knl, kn, 1);
4522 	} else {
4523 		SOCK_BUF_LOCK(so, which);
4524 		knlist_add(knl, kn, 1);
4525 		sb->sb_flags |= SB_KNOTE;
4526 		SOCK_BUF_UNLOCK(so, which);
4527 	}
4528 	SOCK_UNLOCK(so);
4529 	return (0);
4530 }
4531 
4532 static void
4533 filt_sordetach(struct knote *kn)
4534 {
4535 	struct socket *so = kn->kn_fp->f_data;
4536 
4537 	so_rdknl_lock(so);
4538 	knlist_remove(&so->so_rdsel.si_note, kn, 1);
4539 	if (!SOLISTENING(so) && knlist_empty(&so->so_rdsel.si_note))
4540 		so->so_rcv.sb_flags &= ~SB_KNOTE;
4541 	so_rdknl_unlock(so);
4542 }
4543 
4544 /*ARGSUSED*/
4545 static int
4546 filt_soread(struct knote *kn, long hint)
4547 {
4548 	struct socket *so;
4549 
4550 	so = kn->kn_fp->f_data;
4551 
4552 	if (SOLISTENING(so)) {
4553 		SOCK_LOCK_ASSERT(so);
4554 		kn->kn_data = so->sol_qlen;
4555 		if (so->so_error) {
4556 			kn->kn_flags |= EV_EOF;
4557 			kn->kn_fflags = so->so_error;
4558 			return (1);
4559 		}
4560 		return (!TAILQ_EMPTY(&so->sol_comp));
4561 	}
4562 
4563 	if ((so->so_rcv.sb_flags & SB_SPLICED) != 0)
4564 		return (0);
4565 
4566 	SOCK_RECVBUF_LOCK_ASSERT(so);
4567 
4568 	kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
4569 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
4570 		kn->kn_flags |= EV_EOF;
4571 		kn->kn_fflags = so->so_error;
4572 		return (1);
4573 	} else if (so->so_error || so->so_rerror)
4574 		return (1);
4575 
4576 	if (kn->kn_sfflags & NOTE_LOWAT) {
4577 		if (kn->kn_data >= kn->kn_sdata)
4578 			return (1);
4579 	} else if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
4580 		return (1);
4581 
4582 #ifdef SOCKET_HHOOK
4583 	/* This hook returning non-zero indicates an event, not error */
4584 	return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD));
4585 #else
4586 	return (0);
4587 #endif
4588 }
4589 
4590 static void
4591 filt_sowdetach(struct knote *kn)
4592 {
4593 	struct socket *so = kn->kn_fp->f_data;
4594 
4595 	so_wrknl_lock(so);
4596 	knlist_remove(&so->so_wrsel.si_note, kn, 1);
4597 	if (!SOLISTENING(so) && knlist_empty(&so->so_wrsel.si_note))
4598 		so->so_snd.sb_flags &= ~SB_KNOTE;
4599 	so_wrknl_unlock(so);
4600 }
4601 
4602 /*ARGSUSED*/
4603 static int
4604 filt_sowrite(struct knote *kn, long hint)
4605 {
4606 	struct socket *so;
4607 
4608 	so = kn->kn_fp->f_data;
4609 
4610 	if (SOLISTENING(so))
4611 		return (0);
4612 
4613 	SOCK_SENDBUF_LOCK_ASSERT(so);
4614 	kn->kn_data = sbspace(&so->so_snd);
4615 
4616 #ifdef SOCKET_HHOOK
4617 	hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE);
4618 #endif
4619 
4620 	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
4621 		kn->kn_flags |= EV_EOF;
4622 		kn->kn_fflags = so->so_error;
4623 		return (1);
4624 	} else if (so->so_error)	/* temporary udp error */
4625 		return (1);
4626 	else if (((so->so_state & SS_ISCONNECTED) == 0) &&
4627 	    (so->so_proto->pr_flags & PR_CONNREQUIRED))
4628 		return (0);
4629 	else if (kn->kn_sfflags & NOTE_LOWAT)
4630 		return (kn->kn_data >= kn->kn_sdata);
4631 	else
4632 		return (kn->kn_data >= so->so_snd.sb_lowat);
4633 }
4634 
4635 static int
4636 filt_soempty(struct knote *kn, long hint)
4637 {
4638 	struct socket *so;
4639 
4640 	so = kn->kn_fp->f_data;
4641 
4642 	if (SOLISTENING(so))
4643 		return (1);
4644 
4645 	SOCK_SENDBUF_LOCK_ASSERT(so);
4646 	kn->kn_data = sbused(&so->so_snd);
4647 
4648 	if (kn->kn_data == 0)
4649 		return (1);
4650 	else
4651 		return (0);
4652 }
4653 
4654 int
4655 socheckuid(struct socket *so, uid_t uid)
4656 {
4657 
4658 	if (so == NULL)
4659 		return (EPERM);
4660 	if (so->so_cred->cr_uid != uid)
4661 		return (EPERM);
4662 	return (0);
4663 }
4664 
4665 /*
4666  * These functions are used by protocols to notify the socket layer (and its
4667  * consumers) of state changes in the sockets driven by protocol-side events.
4668  */
4669 
4670 /*
4671  * Procedures to manipulate state flags of socket and do appropriate wakeups.
4672  *
4673  * Normal sequence from the active (originating) side is that
4674  * soisconnecting() is called during processing of connect() call, resulting
4675  * in an eventual call to soisconnected() if/when the connection is
4676  * established.  When the connection is torn down soisdisconnecting() is
4677  * called during processing of disconnect() call, and soisdisconnected() is
4678  * called when the connection to the peer is totally severed.  The semantics
4679  * of these routines are such that connectionless protocols can call
4680  * soisconnected() and soisdisconnected() only, bypassing the in-progress
4681  * calls when setting up a ``connection'' takes no time.
4682  *
4683  * From the passive side, a socket is created with two queues of sockets:
4684  * so_incomp for connections in progress and so_comp for connections already
4685  * made and awaiting user acceptance.  As a protocol is preparing incoming
4686  * connections, it creates a socket structure queued on so_incomp by calling
4687  * sonewconn().  When the connection is established, soisconnected() is
4688  * called, and transfers the socket structure to so_comp, making it available
4689  * to accept().
4690  *
4691  * If a socket is closed with sockets on either so_incomp or so_comp, these
4692  * sockets are dropped.
4693  *
4694  * If higher-level protocols are implemented in the kernel, the wakeups done
4695  * here will sometimes cause software-interrupt process scheduling.
4696  */
4697 void
4698 soisconnecting(struct socket *so)
4699 {
4700 
4701 	SOCK_LOCK(so);
4702 	so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
4703 	so->so_state |= SS_ISCONNECTING;
4704 	SOCK_UNLOCK(so);
4705 }
4706 
4707 void
4708 soisconnected(struct socket *so)
4709 {
4710 	bool last __diagused;
4711 
4712 	SOCK_LOCK(so);
4713 	so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING);
4714 	so->so_state |= SS_ISCONNECTED;
4715 
4716 	if (so->so_qstate == SQ_INCOMP) {
4717 		struct socket *head = so->so_listen;
4718 		int ret;
4719 
4720 		KASSERT(head, ("%s: so %p on incomp of NULL", __func__, so));
4721 		/*
4722 		 * Promoting a socket from incomplete queue to complete, we
4723 		 * need to go through reverse order of locking.  We first do
4724 		 * trylock, and if that doesn't succeed, we go the hard way
4725 		 * leaving a reference and rechecking consistency after proper
4726 		 * locking.
4727 		 */
4728 		if (__predict_false(SOLISTEN_TRYLOCK(head) == 0)) {
4729 			soref(head);
4730 			SOCK_UNLOCK(so);
4731 			SOLISTEN_LOCK(head);
4732 			SOCK_LOCK(so);
4733 			if (__predict_false(head != so->so_listen)) {
4734 				/*
4735 				 * The socket went off the listen queue,
4736 				 * should be lost race to close(2) of sol.
4737 				 * The socket is about to soabort().
4738 				 */
4739 				SOCK_UNLOCK(so);
4740 				sorele_locked(head);
4741 				return;
4742 			}
4743 			last = refcount_release(&head->so_count);
4744 			KASSERT(!last, ("%s: released last reference for %p",
4745 			    __func__, head));
4746 		}
4747 again:
4748 		if ((so->so_options & SO_ACCEPTFILTER) == 0) {
4749 			TAILQ_REMOVE(&head->sol_incomp, so, so_list);
4750 			head->sol_incqlen--;
4751 			TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list);
4752 			head->sol_qlen++;
4753 			so->so_qstate = SQ_COMP;
4754 			SOCK_UNLOCK(so);
4755 			solisten_wakeup(head);	/* unlocks */
4756 		} else {
4757 			SOCK_RECVBUF_LOCK(so);
4758 			soupcall_set(so, SO_RCV,
4759 			    head->sol_accept_filter->accf_callback,
4760 			    head->sol_accept_filter_arg);
4761 			so->so_options &= ~SO_ACCEPTFILTER;
4762 			ret = head->sol_accept_filter->accf_callback(so,
4763 			    head->sol_accept_filter_arg, M_NOWAIT);
4764 			if (ret == SU_ISCONNECTED) {
4765 				soupcall_clear(so, SO_RCV);
4766 				SOCK_RECVBUF_UNLOCK(so);
4767 				goto again;
4768 			}
4769 			SOCK_RECVBUF_UNLOCK(so);
4770 			SOCK_UNLOCK(so);
4771 			SOLISTEN_UNLOCK(head);
4772 		}
4773 		return;
4774 	}
4775 	SOCK_UNLOCK(so);
4776 	wakeup(&so->so_timeo);
4777 	sorwakeup(so);
4778 	sowwakeup(so);
4779 }
4780 
4781 void
4782 soisdisconnecting(struct socket *so)
4783 {
4784 
4785 	SOCK_LOCK(so);
4786 	so->so_state &= ~SS_ISCONNECTING;
4787 	so->so_state |= SS_ISDISCONNECTING;
4788 
4789 	if (!SOLISTENING(so)) {
4790 		SOCK_RECVBUF_LOCK(so);
4791 		socantrcvmore_locked(so);
4792 		SOCK_SENDBUF_LOCK(so);
4793 		socantsendmore_locked(so);
4794 	}
4795 	SOCK_UNLOCK(so);
4796 	wakeup(&so->so_timeo);
4797 }
4798 
4799 void
4800 soisdisconnected(struct socket *so)
4801 {
4802 
4803 	SOCK_LOCK(so);
4804 
4805 	/*
4806 	 * There is at least one reader of so_state that does not
4807 	 * acquire socket lock, namely soreceive_generic().  Ensure
4808 	 * that it never sees all flags that track connection status
4809 	 * cleared, by ordering the update with a barrier semantic of
4810 	 * our release thread fence.
4811 	 */
4812 	so->so_state |= SS_ISDISCONNECTED;
4813 	atomic_thread_fence_rel();
4814 	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
4815 
4816 	if (!SOLISTENING(so)) {
4817 		SOCK_UNLOCK(so);
4818 		SOCK_RECVBUF_LOCK(so);
4819 		socantrcvmore_locked(so);
4820 		SOCK_SENDBUF_LOCK(so);
4821 		sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
4822 		socantsendmore_locked(so);
4823 	} else
4824 		SOCK_UNLOCK(so);
4825 	wakeup(&so->so_timeo);
4826 }
4827 
4828 int
4829 soiolock(struct socket *so, struct sx *sx, int flags)
4830 {
4831 	int error;
4832 
4833 	KASSERT((flags & SBL_VALID) == flags,
4834 	    ("soiolock: invalid flags %#x", flags));
4835 
4836 	if ((flags & SBL_WAIT) != 0) {
4837 		if ((flags & SBL_NOINTR) != 0) {
4838 			sx_xlock(sx);
4839 		} else {
4840 			error = sx_xlock_sig(sx);
4841 			if (error != 0)
4842 				return (error);
4843 		}
4844 	} else if (!sx_try_xlock(sx)) {
4845 		return (EWOULDBLOCK);
4846 	}
4847 
4848 	if (__predict_false(SOLISTENING(so))) {
4849 		sx_xunlock(sx);
4850 		return (ENOTCONN);
4851 	}
4852 	return (0);
4853 }
4854 
4855 void
4856 soiounlock(struct sx *sx)
4857 {
4858 	sx_xunlock(sx);
4859 }
4860 
4861 /*
4862  * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
4863  */
4864 struct sockaddr *
4865 sodupsockaddr(const struct sockaddr *sa, int mflags)
4866 {
4867 	struct sockaddr *sa2;
4868 
4869 	sa2 = malloc(sa->sa_len, M_SONAME, mflags);
4870 	if (sa2)
4871 		bcopy(sa, sa2, sa->sa_len);
4872 	return sa2;
4873 }
4874 
4875 /*
4876  * Register per-socket destructor.
4877  */
4878 void
4879 sodtor_set(struct socket *so, so_dtor_t *func)
4880 {
4881 
4882 	SOCK_LOCK_ASSERT(so);
4883 	so->so_dtor = func;
4884 }
4885 
4886 /*
4887  * Register per-socket buffer upcalls.
4888  */
4889 void
4890 soupcall_set(struct socket *so, sb_which which, so_upcall_t func, void *arg)
4891 {
4892 	struct sockbuf *sb;
4893 
4894 	KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so));
4895 
4896 	switch (which) {
4897 	case SO_RCV:
4898 		sb = &so->so_rcv;
4899 		break;
4900 	case SO_SND:
4901 		sb = &so->so_snd;
4902 		break;
4903 	}
4904 	SOCK_BUF_LOCK_ASSERT(so, which);
4905 	sb->sb_upcall = func;
4906 	sb->sb_upcallarg = arg;
4907 	sb->sb_flags |= SB_UPCALL;
4908 }
4909 
4910 void
4911 soupcall_clear(struct socket *so, sb_which which)
4912 {
4913 	struct sockbuf *sb;
4914 
4915 	KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so));
4916 
4917 	switch (which) {
4918 	case SO_RCV:
4919 		sb = &so->so_rcv;
4920 		break;
4921 	case SO_SND:
4922 		sb = &so->so_snd;
4923 		break;
4924 	}
4925 	SOCK_BUF_LOCK_ASSERT(so, which);
4926 	KASSERT(sb->sb_upcall != NULL,
4927 	    ("%s: so %p no upcall to clear", __func__, so));
4928 	sb->sb_upcall = NULL;
4929 	sb->sb_upcallarg = NULL;
4930 	sb->sb_flags &= ~SB_UPCALL;
4931 }
4932 
4933 void
4934 solisten_upcall_set(struct socket *so, so_upcall_t func, void *arg)
4935 {
4936 
4937 	SOLISTEN_LOCK_ASSERT(so);
4938 	so->sol_upcall = func;
4939 	so->sol_upcallarg = arg;
4940 }
4941 
4942 static void
4943 so_rdknl_lock(void *arg)
4944 {
4945 	struct socket *so = arg;
4946 
4947 retry:
4948 	if (SOLISTENING(so)) {
4949 		SOLISTEN_LOCK(so);
4950 	} else {
4951 		SOCK_RECVBUF_LOCK(so);
4952 		if (__predict_false(SOLISTENING(so))) {
4953 			SOCK_RECVBUF_UNLOCK(so);
4954 			goto retry;
4955 		}
4956 	}
4957 }
4958 
4959 static void
4960 so_rdknl_unlock(void *arg)
4961 {
4962 	struct socket *so = arg;
4963 
4964 	if (SOLISTENING(so))
4965 		SOLISTEN_UNLOCK(so);
4966 	else
4967 		SOCK_RECVBUF_UNLOCK(so);
4968 }
4969 
4970 static void
4971 so_rdknl_assert_lock(void *arg, int what)
4972 {
4973 	struct socket *so = arg;
4974 
4975 	if (what == LA_LOCKED) {
4976 		if (SOLISTENING(so))
4977 			SOLISTEN_LOCK_ASSERT(so);
4978 		else
4979 			SOCK_RECVBUF_LOCK_ASSERT(so);
4980 	} else {
4981 		if (SOLISTENING(so))
4982 			SOLISTEN_UNLOCK_ASSERT(so);
4983 		else
4984 			SOCK_RECVBUF_UNLOCK_ASSERT(so);
4985 	}
4986 }
4987 
4988 static void
4989 so_wrknl_lock(void *arg)
4990 {
4991 	struct socket *so = arg;
4992 
4993 retry:
4994 	if (SOLISTENING(so)) {
4995 		SOLISTEN_LOCK(so);
4996 	} else {
4997 		SOCK_SENDBUF_LOCK(so);
4998 		if (__predict_false(SOLISTENING(so))) {
4999 			SOCK_SENDBUF_UNLOCK(so);
5000 			goto retry;
5001 		}
5002 	}
5003 }
5004 
5005 static void
5006 so_wrknl_unlock(void *arg)
5007 {
5008 	struct socket *so = arg;
5009 
5010 	if (SOLISTENING(so))
5011 		SOLISTEN_UNLOCK(so);
5012 	else
5013 		SOCK_SENDBUF_UNLOCK(so);
5014 }
5015 
5016 static void
5017 so_wrknl_assert_lock(void *arg, int what)
5018 {
5019 	struct socket *so = arg;
5020 
5021 	if (what == LA_LOCKED) {
5022 		if (SOLISTENING(so))
5023 			SOLISTEN_LOCK_ASSERT(so);
5024 		else
5025 			SOCK_SENDBUF_LOCK_ASSERT(so);
5026 	} else {
5027 		if (SOLISTENING(so))
5028 			SOLISTEN_UNLOCK_ASSERT(so);
5029 		else
5030 			SOCK_SENDBUF_UNLOCK_ASSERT(so);
5031 	}
5032 }
5033 
5034 /*
5035  * Create an external-format (``xsocket'') structure using the information in
5036  * the kernel-format socket structure pointed to by so.  This is done to
5037  * reduce the spew of irrelevant information over this interface, to isolate
5038  * user code from changes in the kernel structure, and potentially to provide
5039  * information-hiding if we decide that some of this information should be
5040  * hidden from users.
5041  */
5042 void
5043 sotoxsocket(struct socket *so, struct xsocket *xso)
5044 {
5045 
5046 	bzero(xso, sizeof(*xso));
5047 	xso->xso_len = sizeof *xso;
5048 	xso->xso_so = (uintptr_t)so;
5049 	xso->so_type = so->so_type;
5050 	xso->so_options = so->so_options;
5051 	xso->so_linger = so->so_linger;
5052 	xso->so_state = so->so_state;
5053 	xso->so_pcb = (uintptr_t)so->so_pcb;
5054 	xso->xso_protocol = so->so_proto->pr_protocol;
5055 	xso->xso_family = so->so_proto->pr_domain->dom_family;
5056 	xso->so_timeo = so->so_timeo;
5057 	xso->so_error = so->so_error;
5058 	xso->so_uid = so->so_cred->cr_uid;
5059 	xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
5060 	SOCK_LOCK(so);
5061 	xso->so_fibnum = so->so_fibnum;
5062 	if (SOLISTENING(so)) {
5063 		xso->so_qlen = so->sol_qlen;
5064 		xso->so_incqlen = so->sol_incqlen;
5065 		xso->so_qlimit = so->sol_qlimit;
5066 		xso->so_oobmark = 0;
5067 	} else {
5068 		xso->so_state |= so->so_qstate;
5069 		xso->so_qlen = xso->so_incqlen = xso->so_qlimit = 0;
5070 		xso->so_oobmark = so->so_oobmark;
5071 		sbtoxsockbuf(&so->so_snd, &xso->so_snd);
5072 		sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
5073 		if ((so->so_rcv.sb_flags & SB_SPLICED) != 0)
5074 			xso->so_splice_so = (uintptr_t)so->so_splice->dst;
5075 	}
5076 	SOCK_UNLOCK(so);
5077 }
5078 
5079 int
5080 so_options_get(const struct socket *so)
5081 {
5082 
5083 	return (so->so_options);
5084 }
5085 
5086 void
5087 so_options_set(struct socket *so, int val)
5088 {
5089 
5090 	so->so_options = val;
5091 }
5092 
5093 int
5094 so_error_get(const struct socket *so)
5095 {
5096 
5097 	return (so->so_error);
5098 }
5099 
5100 void
5101 so_error_set(struct socket *so, int val)
5102 {
5103 
5104 	so->so_error = val;
5105 }
5106