xref: /freebsd/sys/kern/uipc_usrreq.c (revision c08e016f000ce417a6fb413025ab81ad66cbdfff)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1989, 1991, 1993
5  *	The Regents of the University of California. All Rights Reserved.
6  * Copyright (c) 2004-2009 Robert N. M. Watson All Rights Reserved.
7  * Copyright (c) 2018 Matthew Macy
8  * Copyright (c) 2022 Gleb Smirnoff <glebius@FreeBSD.org>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * UNIX Domain (Local) Sockets
37  *
38  * This is an implementation of UNIX (local) domain sockets.  Each socket has
39  * an associated struct unpcb (UNIX protocol control block).  Stream sockets
40  * may be connected to 0 or 1 other socket.  Datagram sockets may be
41  * connected to 0, 1, or many other sockets.  Sockets may be created and
42  * connected in pairs (socketpair(2)), or bound/connected to using the file
43  * system name space.  For most purposes, only the receive socket buffer is
44  * used, as sending on one socket delivers directly to the receive socket
45  * buffer of a second socket.
46  *
47  * The implementation is substantially complicated by the fact that
48  * "ancillary data", such as file descriptors or credentials, may be passed
49  * across UNIX domain sockets.  The potential for passing UNIX domain sockets
50  * over other UNIX domain sockets requires the implementation of a simple
51  * garbage collector to find and tear down cycles of disconnected sockets.
52  *
53  * TODO:
54  *	RDM
55  *	rethink name space problems
56  *	need a proper out-of-band
57  */
58 
59 #include <sys/cdefs.h>
60 #include "opt_ddb.h"
61 
62 #include <sys/param.h>
63 #include <sys/capsicum.h>
64 #include <sys/domain.h>
65 #include <sys/eventhandler.h>
66 #include <sys/fcntl.h>
67 #include <sys/file.h>
68 #include <sys/filedesc.h>
69 #include <sys/kernel.h>
70 #include <sys/lock.h>
71 #include <sys/malloc.h>
72 #include <sys/mbuf.h>
73 #include <sys/mount.h>
74 #include <sys/mutex.h>
75 #include <sys/namei.h>
76 #include <sys/proc.h>
77 #include <sys/protosw.h>
78 #include <sys/queue.h>
79 #include <sys/resourcevar.h>
80 #include <sys/rwlock.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/signalvar.h>
84 #include <sys/stat.h>
85 #include <sys/sx.h>
86 #include <sys/sysctl.h>
87 #include <sys/systm.h>
88 #include <sys/taskqueue.h>
89 #include <sys/un.h>
90 #include <sys/unpcb.h>
91 #include <sys/vnode.h>
92 
93 #include <net/vnet.h>
94 
95 #ifdef DDB
96 #include <ddb/ddb.h>
97 #endif
98 
99 #include <security/mac/mac_framework.h>
100 
101 #include <vm/uma.h>
102 
103 MALLOC_DECLARE(M_FILECAPS);
104 
105 static struct domain localdomain;
106 
107 static uma_zone_t	unp_zone;
108 static unp_gen_t	unp_gencnt;	/* (l) */
109 static u_int		unp_count;	/* (l) Count of local sockets. */
110 static ino_t		unp_ino;	/* Prototype for fake inode numbers. */
111 static int		unp_rights;	/* (g) File descriptors in flight. */
112 static struct unp_head	unp_shead;	/* (l) List of stream sockets. */
113 static struct unp_head	unp_dhead;	/* (l) List of datagram sockets. */
114 static struct unp_head	unp_sphead;	/* (l) List of seqpacket sockets. */
115 static struct mtx_pool	*unp_vp_mtxpool;
116 
117 struct unp_defer {
118 	SLIST_ENTRY(unp_defer) ud_link;
119 	struct file *ud_fp;
120 };
121 static SLIST_HEAD(, unp_defer) unp_defers;
122 static int unp_defers_count;
123 
124 static const struct sockaddr	sun_noname = {
125 	.sa_len = sizeof(sun_noname),
126 	.sa_family = AF_LOCAL,
127 };
128 
129 /*
130  * Garbage collection of cyclic file descriptor/socket references occurs
131  * asynchronously in a taskqueue context in order to avoid recursion and
132  * reentrance in the UNIX domain socket, file descriptor, and socket layer
133  * code.  See unp_gc() for a full description.
134  */
135 static struct timeout_task unp_gc_task;
136 
137 /*
138  * The close of unix domain sockets attached as SCM_RIGHTS is
139  * postponed to the taskqueue, to avoid arbitrary recursion depth.
140  * The attached sockets might have another sockets attached.
141  */
142 static struct task	unp_defer_task;
143 
144 /*
145  * Both send and receive buffers are allocated PIPSIZ bytes of buffering for
146  * stream sockets, although the total for sender and receiver is actually
147  * only PIPSIZ.
148  *
149  * Datagram sockets really use the sendspace as the maximum datagram size,
150  * and don't really want to reserve the sendspace.  Their recvspace should be
151  * large enough for at least one max-size datagram plus address.
152  */
153 #ifndef PIPSIZ
154 #define	PIPSIZ	8192
155 #endif
156 static u_long	unpst_sendspace = PIPSIZ;
157 static u_long	unpst_recvspace = PIPSIZ;
158 static u_long	unpdg_maxdgram = 8*1024;	/* support 8KB syslog msgs */
159 static u_long	unpdg_recvspace = 16*1024;
160 static u_long	unpsp_sendspace = PIPSIZ;	/* really max datagram size */
161 static u_long	unpsp_recvspace = PIPSIZ;
162 
163 static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
164     "Local domain");
165 static SYSCTL_NODE(_net_local, SOCK_STREAM, stream,
166     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
167     "SOCK_STREAM");
168 static SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram,
169     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
170     "SOCK_DGRAM");
171 static SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket,
172     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
173     "SOCK_SEQPACKET");
174 
175 SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
176 	   &unpst_sendspace, 0, "Default stream send space.");
177 SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
178 	   &unpst_recvspace, 0, "Default stream receive space.");
179 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
180 	   &unpdg_maxdgram, 0, "Maximum datagram size.");
181 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
182 	   &unpdg_recvspace, 0, "Default datagram receive space.");
183 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW,
184 	   &unpsp_sendspace, 0, "Default seqpacket send space.");
185 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW,
186 	   &unpsp_recvspace, 0, "Default seqpacket receive space.");
187 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0,
188     "File descriptors in flight.");
189 SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD,
190     &unp_defers_count, 0,
191     "File descriptors deferred to taskqueue for close.");
192 
193 /*
194  * Locking and synchronization:
195  *
196  * Several types of locks exist in the local domain socket implementation:
197  * - a global linkage lock
198  * - a global connection list lock
199  * - the mtxpool lock
200  * - per-unpcb mutexes
201  *
202  * The linkage lock protects the global socket lists, the generation number
203  * counter and garbage collector state.
204  *
205  * The connection list lock protects the list of referring sockets in a datagram
206  * socket PCB.  This lock is also overloaded to protect a global list of
207  * sockets whose buffers contain socket references in the form of SCM_RIGHTS
208  * messages.  To avoid recursion, such references are released by a dedicated
209  * thread.
210  *
211  * The mtxpool lock protects the vnode from being modified while referenced.
212  * Lock ordering rules require that it be acquired before any PCB locks.
213  *
214  * The unpcb lock (unp_mtx) protects the most commonly referenced fields in the
215  * unpcb.  This includes the unp_conn field, which either links two connected
216  * PCBs together (for connected socket types) or points at the destination
217  * socket (for connectionless socket types).  The operations of creating or
218  * destroying a connection therefore involve locking multiple PCBs.  To avoid
219  * lock order reversals, in some cases this involves dropping a PCB lock and
220  * using a reference counter to maintain liveness.
221  *
222  * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer,
223  * allocated in pr_attach() and freed in pr_detach().  The validity of that
224  * pointer is an invariant, so no lock is required to dereference the so_pcb
225  * pointer if a valid socket reference is held by the caller.  In practice,
226  * this is always true during operations performed on a socket.  Each unpcb
227  * has a back-pointer to its socket, unp_socket, which will be stable under
228  * the same circumstances.
229  *
230  * This pointer may only be safely dereferenced as long as a valid reference
231  * to the unpcb is held.  Typically, this reference will be from the socket,
232  * or from another unpcb when the referring unpcb's lock is held (in order
233  * that the reference not be invalidated during use).  For example, to follow
234  * unp->unp_conn->unp_socket, you need to hold a lock on unp_conn to guarantee
235  * that detach is not run clearing unp_socket.
236  *
237  * Blocking with UNIX domain sockets is a tricky issue: unlike most network
238  * protocols, bind() is a non-atomic operation, and connect() requires
239  * potential sleeping in the protocol, due to potentially waiting on local or
240  * distributed file systems.  We try to separate "lookup" operations, which
241  * may sleep, and the IPC operations themselves, which typically can occur
242  * with relative atomicity as locks can be held over the entire operation.
243  *
244  * Another tricky issue is simultaneous multi-threaded or multi-process
245  * access to a single UNIX domain socket.  These are handled by the flags
246  * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or
247  * binding, both of which involve dropping UNIX domain socket locks in order
248  * to perform namei() and other file system operations.
249  */
250 static struct rwlock	unp_link_rwlock;
251 static struct mtx	unp_defers_lock;
252 
253 #define	UNP_LINK_LOCK_INIT()		rw_init(&unp_link_rwlock,	\
254 					    "unp_link_rwlock")
255 
256 #define	UNP_LINK_LOCK_ASSERT()		rw_assert(&unp_link_rwlock,	\
257 					    RA_LOCKED)
258 #define	UNP_LINK_UNLOCK_ASSERT()	rw_assert(&unp_link_rwlock,	\
259 					    RA_UNLOCKED)
260 
261 #define	UNP_LINK_RLOCK()		rw_rlock(&unp_link_rwlock)
262 #define	UNP_LINK_RUNLOCK()		rw_runlock(&unp_link_rwlock)
263 #define	UNP_LINK_WLOCK()		rw_wlock(&unp_link_rwlock)
264 #define	UNP_LINK_WUNLOCK()		rw_wunlock(&unp_link_rwlock)
265 #define	UNP_LINK_WLOCK_ASSERT()		rw_assert(&unp_link_rwlock,	\
266 					    RA_WLOCKED)
267 #define	UNP_LINK_WOWNED()		rw_wowned(&unp_link_rwlock)
268 
269 #define	UNP_DEFERRED_LOCK_INIT()	mtx_init(&unp_defers_lock, \
270 					    "unp_defer", NULL, MTX_DEF)
271 #define	UNP_DEFERRED_LOCK()		mtx_lock(&unp_defers_lock)
272 #define	UNP_DEFERRED_UNLOCK()		mtx_unlock(&unp_defers_lock)
273 
274 #define UNP_REF_LIST_LOCK()		UNP_DEFERRED_LOCK();
275 #define UNP_REF_LIST_UNLOCK()		UNP_DEFERRED_UNLOCK();
276 
277 #define UNP_PCB_LOCK_INIT(unp)		mtx_init(&(unp)->unp_mtx,	\
278 					    "unp", "unp",	\
279 					    MTX_DUPOK|MTX_DEF)
280 #define	UNP_PCB_LOCK_DESTROY(unp)	mtx_destroy(&(unp)->unp_mtx)
281 #define	UNP_PCB_LOCKPTR(unp)		(&(unp)->unp_mtx)
282 #define	UNP_PCB_LOCK(unp)		mtx_lock(&(unp)->unp_mtx)
283 #define	UNP_PCB_TRYLOCK(unp)		mtx_trylock(&(unp)->unp_mtx)
284 #define	UNP_PCB_UNLOCK(unp)		mtx_unlock(&(unp)->unp_mtx)
285 #define	UNP_PCB_OWNED(unp)		mtx_owned(&(unp)->unp_mtx)
286 #define	UNP_PCB_LOCK_ASSERT(unp)	mtx_assert(&(unp)->unp_mtx, MA_OWNED)
287 #define	UNP_PCB_UNLOCK_ASSERT(unp)	mtx_assert(&(unp)->unp_mtx, MA_NOTOWNED)
288 
289 static int	uipc_connect2(struct socket *, struct socket *);
290 static int	uipc_ctloutput(struct socket *, struct sockopt *);
291 static int	unp_connect(struct socket *, struct sockaddr *,
292 		    struct thread *);
293 static int	unp_connectat(int, struct socket *, struct sockaddr *,
294 		    struct thread *, bool);
295 static void	unp_connect2(struct socket *so, struct socket *so2);
296 static void	unp_disconnect(struct unpcb *unp, struct unpcb *unp2);
297 static void	unp_dispose(struct socket *so);
298 static void	unp_shutdown(struct unpcb *);
299 static void	unp_drop(struct unpcb *);
300 static void	unp_gc(__unused void *, int);
301 static void	unp_scan(struct mbuf *, void (*)(struct filedescent **, int));
302 static void	unp_discard(struct file *);
303 static void	unp_freerights(struct filedescent **, int);
304 static int	unp_internalize(struct mbuf **, struct thread *,
305 		    struct mbuf **, u_int *, u_int *);
306 static void	unp_internalize_fp(struct file *);
307 static int	unp_externalize(struct mbuf *, struct mbuf **, int);
308 static int	unp_externalize_fp(struct file *);
309 static struct mbuf	*unp_addsockcred(struct thread *, struct mbuf *,
310 		    int, struct mbuf **, u_int *, u_int *);
311 static void	unp_process_defers(void * __unused, int);
312 
313 static void
314 unp_pcb_hold(struct unpcb *unp)
315 {
316 	u_int old __unused;
317 
318 	old = refcount_acquire(&unp->unp_refcount);
319 	KASSERT(old > 0, ("%s: unpcb %p has no references", __func__, unp));
320 }
321 
322 static __result_use_check bool
323 unp_pcb_rele(struct unpcb *unp)
324 {
325 	bool ret;
326 
327 	UNP_PCB_LOCK_ASSERT(unp);
328 
329 	if ((ret = refcount_release(&unp->unp_refcount))) {
330 		UNP_PCB_UNLOCK(unp);
331 		UNP_PCB_LOCK_DESTROY(unp);
332 		uma_zfree(unp_zone, unp);
333 	}
334 	return (ret);
335 }
336 
337 static void
338 unp_pcb_rele_notlast(struct unpcb *unp)
339 {
340 	bool ret __unused;
341 
342 	ret = refcount_release(&unp->unp_refcount);
343 	KASSERT(!ret, ("%s: unpcb %p has no references", __func__, unp));
344 }
345 
346 static void
347 unp_pcb_lock_pair(struct unpcb *unp, struct unpcb *unp2)
348 {
349 	UNP_PCB_UNLOCK_ASSERT(unp);
350 	UNP_PCB_UNLOCK_ASSERT(unp2);
351 
352 	if (unp == unp2) {
353 		UNP_PCB_LOCK(unp);
354 	} else if ((uintptr_t)unp2 > (uintptr_t)unp) {
355 		UNP_PCB_LOCK(unp);
356 		UNP_PCB_LOCK(unp2);
357 	} else {
358 		UNP_PCB_LOCK(unp2);
359 		UNP_PCB_LOCK(unp);
360 	}
361 }
362 
363 static void
364 unp_pcb_unlock_pair(struct unpcb *unp, struct unpcb *unp2)
365 {
366 	UNP_PCB_UNLOCK(unp);
367 	if (unp != unp2)
368 		UNP_PCB_UNLOCK(unp2);
369 }
370 
371 /*
372  * Try to lock the connected peer of an already locked socket.  In some cases
373  * this requires that we unlock the current socket.  The pairbusy counter is
374  * used to block concurrent connection attempts while the lock is dropped.  The
375  * caller must be careful to revalidate PCB state.
376  */
377 static struct unpcb *
378 unp_pcb_lock_peer(struct unpcb *unp)
379 {
380 	struct unpcb *unp2;
381 
382 	UNP_PCB_LOCK_ASSERT(unp);
383 	unp2 = unp->unp_conn;
384 	if (unp2 == NULL)
385 		return (NULL);
386 	if (__predict_false(unp == unp2))
387 		return (unp);
388 
389 	UNP_PCB_UNLOCK_ASSERT(unp2);
390 
391 	if (__predict_true(UNP_PCB_TRYLOCK(unp2)))
392 		return (unp2);
393 	if ((uintptr_t)unp2 > (uintptr_t)unp) {
394 		UNP_PCB_LOCK(unp2);
395 		return (unp2);
396 	}
397 	unp->unp_pairbusy++;
398 	unp_pcb_hold(unp2);
399 	UNP_PCB_UNLOCK(unp);
400 
401 	UNP_PCB_LOCK(unp2);
402 	UNP_PCB_LOCK(unp);
403 	KASSERT(unp->unp_conn == unp2 || unp->unp_conn == NULL,
404 	    ("%s: socket %p was reconnected", __func__, unp));
405 	if (--unp->unp_pairbusy == 0 && (unp->unp_flags & UNP_WAITING) != 0) {
406 		unp->unp_flags &= ~UNP_WAITING;
407 		wakeup(unp);
408 	}
409 	if (unp_pcb_rele(unp2)) {
410 		/* unp2 is unlocked. */
411 		return (NULL);
412 	}
413 	if (unp->unp_conn == NULL) {
414 		UNP_PCB_UNLOCK(unp2);
415 		return (NULL);
416 	}
417 	return (unp2);
418 }
419 
420 static void
421 uipc_abort(struct socket *so)
422 {
423 	struct unpcb *unp, *unp2;
424 
425 	unp = sotounpcb(so);
426 	KASSERT(unp != NULL, ("uipc_abort: unp == NULL"));
427 	UNP_PCB_UNLOCK_ASSERT(unp);
428 
429 	UNP_PCB_LOCK(unp);
430 	unp2 = unp->unp_conn;
431 	if (unp2 != NULL) {
432 		unp_pcb_hold(unp2);
433 		UNP_PCB_UNLOCK(unp);
434 		unp_drop(unp2);
435 	} else
436 		UNP_PCB_UNLOCK(unp);
437 }
438 
439 static int
440 uipc_attach(struct socket *so, int proto, struct thread *td)
441 {
442 	u_long sendspace, recvspace;
443 	struct unpcb *unp;
444 	int error;
445 	bool locked;
446 
447 	KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL"));
448 	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
449 		switch (so->so_type) {
450 		case SOCK_STREAM:
451 			sendspace = unpst_sendspace;
452 			recvspace = unpst_recvspace;
453 			break;
454 
455 		case SOCK_DGRAM:
456 			STAILQ_INIT(&so->so_rcv.uxdg_mb);
457 			STAILQ_INIT(&so->so_snd.uxdg_mb);
458 			TAILQ_INIT(&so->so_rcv.uxdg_conns);
459 			/*
460 			 * Since send buffer is either bypassed or is a part
461 			 * of one-to-many receive buffer, we assign both space
462 			 * limits to unpdg_recvspace.
463 			 */
464 			sendspace = recvspace = unpdg_recvspace;
465 			break;
466 
467 		case SOCK_SEQPACKET:
468 			sendspace = unpsp_sendspace;
469 			recvspace = unpsp_recvspace;
470 			break;
471 
472 		default:
473 			panic("uipc_attach");
474 		}
475 		error = soreserve(so, sendspace, recvspace);
476 		if (error)
477 			return (error);
478 	}
479 	unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO);
480 	if (unp == NULL)
481 		return (ENOBUFS);
482 	LIST_INIT(&unp->unp_refs);
483 	UNP_PCB_LOCK_INIT(unp);
484 	unp->unp_socket = so;
485 	so->so_pcb = unp;
486 	refcount_init(&unp->unp_refcount, 1);
487 
488 	if ((locked = UNP_LINK_WOWNED()) == false)
489 		UNP_LINK_WLOCK();
490 
491 	unp->unp_gencnt = ++unp_gencnt;
492 	unp->unp_ino = ++unp_ino;
493 	unp_count++;
494 	switch (so->so_type) {
495 	case SOCK_STREAM:
496 		LIST_INSERT_HEAD(&unp_shead, unp, unp_link);
497 		break;
498 
499 	case SOCK_DGRAM:
500 		LIST_INSERT_HEAD(&unp_dhead, unp, unp_link);
501 		break;
502 
503 	case SOCK_SEQPACKET:
504 		LIST_INSERT_HEAD(&unp_sphead, unp, unp_link);
505 		break;
506 
507 	default:
508 		panic("uipc_attach");
509 	}
510 
511 	if (locked == false)
512 		UNP_LINK_WUNLOCK();
513 
514 	return (0);
515 }
516 
517 static int
518 uipc_bindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
519 {
520 	struct sockaddr_un *soun = (struct sockaddr_un *)nam;
521 	struct vattr vattr;
522 	int error, namelen;
523 	struct nameidata nd;
524 	struct unpcb *unp;
525 	struct vnode *vp;
526 	struct mount *mp;
527 	cap_rights_t rights;
528 	char *buf;
529 
530 	if (nam->sa_family != AF_UNIX)
531 		return (EAFNOSUPPORT);
532 
533 	unp = sotounpcb(so);
534 	KASSERT(unp != NULL, ("uipc_bind: unp == NULL"));
535 
536 	if (soun->sun_len > sizeof(struct sockaddr_un))
537 		return (EINVAL);
538 	namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
539 	if (namelen <= 0)
540 		return (EINVAL);
541 
542 	/*
543 	 * We don't allow simultaneous bind() calls on a single UNIX domain
544 	 * socket, so flag in-progress operations, and return an error if an
545 	 * operation is already in progress.
546 	 *
547 	 * Historically, we have not allowed a socket to be rebound, so this
548 	 * also returns an error.  Not allowing re-binding simplifies the
549 	 * implementation and avoids a great many possible failure modes.
550 	 */
551 	UNP_PCB_LOCK(unp);
552 	if (unp->unp_vnode != NULL) {
553 		UNP_PCB_UNLOCK(unp);
554 		return (EINVAL);
555 	}
556 	if (unp->unp_flags & UNP_BINDING) {
557 		UNP_PCB_UNLOCK(unp);
558 		return (EALREADY);
559 	}
560 	unp->unp_flags |= UNP_BINDING;
561 	UNP_PCB_UNLOCK(unp);
562 
563 	buf = malloc(namelen + 1, M_TEMP, M_WAITOK);
564 	bcopy(soun->sun_path, buf, namelen);
565 	buf[namelen] = 0;
566 
567 restart:
568 	NDINIT_ATRIGHTS(&nd, CREATE, NOFOLLOW | LOCKPARENT | NOCACHE,
569 	    UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_BINDAT));
570 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
571 	error = namei(&nd);
572 	if (error)
573 		goto error;
574 	vp = nd.ni_vp;
575 	if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
576 		NDFREE_PNBUF(&nd);
577 		if (nd.ni_dvp == vp)
578 			vrele(nd.ni_dvp);
579 		else
580 			vput(nd.ni_dvp);
581 		if (vp != NULL) {
582 			vrele(vp);
583 			error = EADDRINUSE;
584 			goto error;
585 		}
586 		error = vn_start_write(NULL, &mp, V_XSLEEP | V_PCATCH);
587 		if (error)
588 			goto error;
589 		goto restart;
590 	}
591 	VATTR_NULL(&vattr);
592 	vattr.va_type = VSOCK;
593 	vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_pd->pd_cmask);
594 #ifdef MAC
595 	error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd,
596 	    &vattr);
597 #endif
598 	if (error == 0) {
599 		/*
600 		 * The prior lookup may have left LK_SHARED in cn_lkflags,
601 		 * and VOP_CREATE technically only requires the new vnode to
602 		 * be locked shared. Most filesystems will return the new vnode
603 		 * locked exclusive regardless, but we should explicitly
604 		 * specify that here since we require it and assert to that
605 		 * effect below.
606 		 */
607 		nd.ni_cnd.cn_lkflags = (nd.ni_cnd.cn_lkflags & ~LK_SHARED) |
608 		    LK_EXCLUSIVE;
609 		error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
610 	}
611 	NDFREE_PNBUF(&nd);
612 	if (error) {
613 		VOP_VPUT_PAIR(nd.ni_dvp, NULL, true);
614 		vn_finished_write(mp);
615 		if (error == ERELOOKUP)
616 			goto restart;
617 		goto error;
618 	}
619 	vp = nd.ni_vp;
620 	ASSERT_VOP_ELOCKED(vp, "uipc_bind");
621 	soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK);
622 
623 	UNP_PCB_LOCK(unp);
624 	VOP_UNP_BIND(vp, unp);
625 	unp->unp_vnode = vp;
626 	unp->unp_addr = soun;
627 	unp->unp_flags &= ~UNP_BINDING;
628 	UNP_PCB_UNLOCK(unp);
629 	vref(vp);
630 	VOP_VPUT_PAIR(nd.ni_dvp, &vp, true);
631 	vn_finished_write(mp);
632 	free(buf, M_TEMP);
633 	return (0);
634 
635 error:
636 	UNP_PCB_LOCK(unp);
637 	unp->unp_flags &= ~UNP_BINDING;
638 	UNP_PCB_UNLOCK(unp);
639 	free(buf, M_TEMP);
640 	return (error);
641 }
642 
643 static int
644 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
645 {
646 
647 	return (uipc_bindat(AT_FDCWD, so, nam, td));
648 }
649 
650 static int
651 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
652 {
653 	int error;
654 
655 	KASSERT(td == curthread, ("uipc_connect: td != curthread"));
656 	error = unp_connect(so, nam, td);
657 	return (error);
658 }
659 
660 static int
661 uipc_connectat(int fd, struct socket *so, struct sockaddr *nam,
662     struct thread *td)
663 {
664 	int error;
665 
666 	KASSERT(td == curthread, ("uipc_connectat: td != curthread"));
667 	error = unp_connectat(fd, so, nam, td, false);
668 	return (error);
669 }
670 
671 static void
672 uipc_close(struct socket *so)
673 {
674 	struct unpcb *unp, *unp2;
675 	struct vnode *vp = NULL;
676 	struct mtx *vplock;
677 
678 	unp = sotounpcb(so);
679 	KASSERT(unp != NULL, ("uipc_close: unp == NULL"));
680 
681 	vplock = NULL;
682 	if ((vp = unp->unp_vnode) != NULL) {
683 		vplock = mtx_pool_find(unp_vp_mtxpool, vp);
684 		mtx_lock(vplock);
685 	}
686 	UNP_PCB_LOCK(unp);
687 	if (vp && unp->unp_vnode == NULL) {
688 		mtx_unlock(vplock);
689 		vp = NULL;
690 	}
691 	if (vp != NULL) {
692 		VOP_UNP_DETACH(vp);
693 		unp->unp_vnode = NULL;
694 	}
695 	if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
696 		unp_disconnect(unp, unp2);
697 	else
698 		UNP_PCB_UNLOCK(unp);
699 	if (vp) {
700 		mtx_unlock(vplock);
701 		vrele(vp);
702 	}
703 }
704 
705 static int
706 uipc_connect2(struct socket *so1, struct socket *so2)
707 {
708 	struct unpcb *unp, *unp2;
709 
710 	if (so1->so_type != so2->so_type)
711 		return (EPROTOTYPE);
712 
713 	unp = so1->so_pcb;
714 	KASSERT(unp != NULL, ("uipc_connect2: unp == NULL"));
715 	unp2 = so2->so_pcb;
716 	KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL"));
717 	unp_pcb_lock_pair(unp, unp2);
718 	unp_connect2(so1, so2);
719 	unp_pcb_unlock_pair(unp, unp2);
720 
721 	return (0);
722 }
723 
724 static void
725 uipc_detach(struct socket *so)
726 {
727 	struct unpcb *unp, *unp2;
728 	struct mtx *vplock;
729 	struct vnode *vp;
730 	int local_unp_rights;
731 
732 	unp = sotounpcb(so);
733 	KASSERT(unp != NULL, ("uipc_detach: unp == NULL"));
734 
735 	vp = NULL;
736 	vplock = NULL;
737 
738 	if (!SOLISTENING(so))
739 		unp_dispose(so);
740 
741 	UNP_LINK_WLOCK();
742 	LIST_REMOVE(unp, unp_link);
743 	if (unp->unp_gcflag & UNPGC_DEAD)
744 		LIST_REMOVE(unp, unp_dead);
745 	unp->unp_gencnt = ++unp_gencnt;
746 	--unp_count;
747 	UNP_LINK_WUNLOCK();
748 
749 	UNP_PCB_UNLOCK_ASSERT(unp);
750  restart:
751 	if ((vp = unp->unp_vnode) != NULL) {
752 		vplock = mtx_pool_find(unp_vp_mtxpool, vp);
753 		mtx_lock(vplock);
754 	}
755 	UNP_PCB_LOCK(unp);
756 	if (unp->unp_vnode != vp && unp->unp_vnode != NULL) {
757 		if (vplock)
758 			mtx_unlock(vplock);
759 		UNP_PCB_UNLOCK(unp);
760 		goto restart;
761 	}
762 	if ((vp = unp->unp_vnode) != NULL) {
763 		VOP_UNP_DETACH(vp);
764 		unp->unp_vnode = NULL;
765 	}
766 	if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
767 		unp_disconnect(unp, unp2);
768 	else
769 		UNP_PCB_UNLOCK(unp);
770 
771 	UNP_REF_LIST_LOCK();
772 	while (!LIST_EMPTY(&unp->unp_refs)) {
773 		struct unpcb *ref = LIST_FIRST(&unp->unp_refs);
774 
775 		unp_pcb_hold(ref);
776 		UNP_REF_LIST_UNLOCK();
777 
778 		MPASS(ref != unp);
779 		UNP_PCB_UNLOCK_ASSERT(ref);
780 		unp_drop(ref);
781 		UNP_REF_LIST_LOCK();
782 	}
783 	UNP_REF_LIST_UNLOCK();
784 
785 	UNP_PCB_LOCK(unp);
786 	local_unp_rights = unp_rights;
787 	unp->unp_socket->so_pcb = NULL;
788 	unp->unp_socket = NULL;
789 	free(unp->unp_addr, M_SONAME);
790 	unp->unp_addr = NULL;
791 	if (!unp_pcb_rele(unp))
792 		UNP_PCB_UNLOCK(unp);
793 	if (vp) {
794 		mtx_unlock(vplock);
795 		vrele(vp);
796 	}
797 	if (local_unp_rights)
798 		taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1);
799 
800 	switch (so->so_type) {
801 	case SOCK_DGRAM:
802 		/*
803 		 * Everything should have been unlinked/freed by unp_dispose()
804 		 * and/or unp_disconnect().
805 		 */
806 		MPASS(so->so_rcv.uxdg_peeked == NULL);
807 		MPASS(STAILQ_EMPTY(&so->so_rcv.uxdg_mb));
808 		MPASS(TAILQ_EMPTY(&so->so_rcv.uxdg_conns));
809 		MPASS(STAILQ_EMPTY(&so->so_snd.uxdg_mb));
810 	}
811 }
812 
813 static int
814 uipc_disconnect(struct socket *so)
815 {
816 	struct unpcb *unp, *unp2;
817 
818 	unp = sotounpcb(so);
819 	KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL"));
820 
821 	UNP_PCB_LOCK(unp);
822 	if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
823 		unp_disconnect(unp, unp2);
824 	else
825 		UNP_PCB_UNLOCK(unp);
826 	return (0);
827 }
828 
829 static int
830 uipc_listen(struct socket *so, int backlog, struct thread *td)
831 {
832 	struct unpcb *unp;
833 	int error;
834 
835 	MPASS(so->so_type != SOCK_DGRAM);
836 
837 	/*
838 	 * Synchronize with concurrent connection attempts.
839 	 */
840 	error = 0;
841 	unp = sotounpcb(so);
842 	UNP_PCB_LOCK(unp);
843 	if (unp->unp_conn != NULL || (unp->unp_flags & UNP_CONNECTING) != 0)
844 		error = EINVAL;
845 	else if (unp->unp_vnode == NULL)
846 		error = EDESTADDRREQ;
847 	if (error != 0) {
848 		UNP_PCB_UNLOCK(unp);
849 		return (error);
850 	}
851 
852 	SOCK_LOCK(so);
853 	error = solisten_proto_check(so);
854 	if (error == 0) {
855 		cru2xt(td, &unp->unp_peercred);
856 		solisten_proto(so, backlog);
857 	}
858 	SOCK_UNLOCK(so);
859 	UNP_PCB_UNLOCK(unp);
860 	return (error);
861 }
862 
863 static int
864 uipc_peeraddr(struct socket *so, struct sockaddr *ret)
865 {
866 	struct unpcb *unp, *unp2;
867 	const struct sockaddr *sa;
868 
869 	unp = sotounpcb(so);
870 	KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL"));
871 
872 	UNP_PCB_LOCK(unp);
873 	unp2 = unp_pcb_lock_peer(unp);
874 	if (unp2 != NULL) {
875 		if (unp2->unp_addr != NULL)
876 			sa = (struct sockaddr *)unp2->unp_addr;
877 		else
878 			sa = &sun_noname;
879 		bcopy(sa, ret, sa->sa_len);
880 		unp_pcb_unlock_pair(unp, unp2);
881 	} else {
882 		UNP_PCB_UNLOCK(unp);
883 		sa = &sun_noname;
884 		bcopy(sa, ret, sa->sa_len);
885 	}
886 	return (0);
887 }
888 
889 static int
890 uipc_rcvd(struct socket *so, int flags)
891 {
892 	struct unpcb *unp, *unp2;
893 	struct socket *so2;
894 	u_int mbcnt, sbcc;
895 
896 	unp = sotounpcb(so);
897 	KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
898 	KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET,
899 	    ("%s: socktype %d", __func__, so->so_type));
900 
901 	/*
902 	 * Adjust backpressure on sender and wakeup any waiting to write.
903 	 *
904 	 * The unp lock is acquired to maintain the validity of the unp_conn
905 	 * pointer; no lock on unp2 is required as unp2->unp_socket will be
906 	 * static as long as we don't permit unp2 to disconnect from unp,
907 	 * which is prevented by the lock on unp.  We cache values from
908 	 * so_rcv to avoid holding the so_rcv lock over the entire
909 	 * transaction on the remote so_snd.
910 	 */
911 	SOCKBUF_LOCK(&so->so_rcv);
912 	mbcnt = so->so_rcv.sb_mbcnt;
913 	sbcc = sbavail(&so->so_rcv);
914 	SOCKBUF_UNLOCK(&so->so_rcv);
915 	/*
916 	 * There is a benign race condition at this point.  If we're planning to
917 	 * clear SB_STOP, but uipc_send is called on the connected socket at
918 	 * this instant, it might add data to the sockbuf and set SB_STOP.  Then
919 	 * we would erroneously clear SB_STOP below, even though the sockbuf is
920 	 * full.  The race is benign because the only ill effect is to allow the
921 	 * sockbuf to exceed its size limit, and the size limits are not
922 	 * strictly guaranteed anyway.
923 	 */
924 	UNP_PCB_LOCK(unp);
925 	unp2 = unp->unp_conn;
926 	if (unp2 == NULL) {
927 		UNP_PCB_UNLOCK(unp);
928 		return (0);
929 	}
930 	so2 = unp2->unp_socket;
931 	SOCKBUF_LOCK(&so2->so_snd);
932 	if (sbcc < so2->so_snd.sb_hiwat && mbcnt < so2->so_snd.sb_mbmax)
933 		so2->so_snd.sb_flags &= ~SB_STOP;
934 	sowwakeup_locked(so2);
935 	UNP_PCB_UNLOCK(unp);
936 	return (0);
937 }
938 
939 static int
940 uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
941     struct mbuf *control, struct thread *td)
942 {
943 	struct unpcb *unp, *unp2;
944 	struct socket *so2;
945 	u_int mbcnt, sbcc;
946 	int error;
947 
948 	unp = sotounpcb(so);
949 	KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
950 	KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET,
951 	    ("%s: socktype %d", __func__, so->so_type));
952 
953 	error = 0;
954 	if (flags & PRUS_OOB) {
955 		error = EOPNOTSUPP;
956 		goto release;
957 	}
958 	if (control != NULL &&
959 	    (error = unp_internalize(&control, td, NULL, NULL, NULL)))
960 		goto release;
961 
962 	unp2 = NULL;
963 	if ((so->so_state & SS_ISCONNECTED) == 0) {
964 		if (nam != NULL) {
965 			if ((error = unp_connect(so, nam, td)) != 0)
966 				goto out;
967 		} else {
968 			error = ENOTCONN;
969 			goto out;
970 		}
971 	}
972 
973 	UNP_PCB_LOCK(unp);
974 	if ((unp2 = unp_pcb_lock_peer(unp)) == NULL) {
975 		UNP_PCB_UNLOCK(unp);
976 		error = ENOTCONN;
977 		goto out;
978 	} else if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
979 		unp_pcb_unlock_pair(unp, unp2);
980 		error = EPIPE;
981 		goto out;
982 	}
983 	UNP_PCB_UNLOCK(unp);
984 	if ((so2 = unp2->unp_socket) == NULL) {
985 		UNP_PCB_UNLOCK(unp2);
986 		error = ENOTCONN;
987 		goto out;
988 	}
989 	SOCKBUF_LOCK(&so2->so_rcv);
990 	if (unp2->unp_flags & UNP_WANTCRED_MASK) {
991 		/*
992 		 * Credentials are passed only once on SOCK_STREAM and
993 		 * SOCK_SEQPACKET (LOCAL_CREDS => WANTCRED_ONESHOT), or
994 		 * forever (LOCAL_CREDS_PERSISTENT => WANTCRED_ALWAYS).
995 		 */
996 		control = unp_addsockcred(td, control, unp2->unp_flags, NULL,
997 		    NULL, NULL);
998 		unp2->unp_flags &= ~UNP_WANTCRED_ONESHOT;
999 	}
1000 
1001 	/*
1002 	 * Send to paired receive port and wake up readers.  Don't
1003 	 * check for space available in the receive buffer if we're
1004 	 * attaching ancillary data; Unix domain sockets only check
1005 	 * for space in the sending sockbuf, and that check is
1006 	 * performed one level up the stack.  At that level we cannot
1007 	 * precisely account for the amount of buffer space used
1008 	 * (e.g., because control messages are not yet internalized).
1009 	 */
1010 	switch (so->so_type) {
1011 	case SOCK_STREAM:
1012 		if (control != NULL) {
1013 			sbappendcontrol_locked(&so2->so_rcv,
1014 			    m->m_len > 0 ?  m : NULL, control, flags);
1015 			control = NULL;
1016 		} else
1017 			sbappend_locked(&so2->so_rcv, m, flags);
1018 		break;
1019 
1020 	case SOCK_SEQPACKET:
1021 		if (sbappendaddr_nospacecheck_locked(&so2->so_rcv,
1022 		    &sun_noname, m, control))
1023 			control = NULL;
1024 		break;
1025 	}
1026 
1027 	mbcnt = so2->so_rcv.sb_mbcnt;
1028 	sbcc = sbavail(&so2->so_rcv);
1029 	if (sbcc)
1030 		sorwakeup_locked(so2);
1031 	else
1032 		SOCKBUF_UNLOCK(&so2->so_rcv);
1033 
1034 	/*
1035 	 * The PCB lock on unp2 protects the SB_STOP flag.  Without it,
1036 	 * it would be possible for uipc_rcvd to be called at this
1037 	 * point, drain the receiving sockbuf, clear SB_STOP, and then
1038 	 * we would set SB_STOP below.  That could lead to an empty
1039 	 * sockbuf having SB_STOP set
1040 	 */
1041 	SOCKBUF_LOCK(&so->so_snd);
1042 	if (sbcc >= so->so_snd.sb_hiwat || mbcnt >= so->so_snd.sb_mbmax)
1043 		so->so_snd.sb_flags |= SB_STOP;
1044 	SOCKBUF_UNLOCK(&so->so_snd);
1045 	UNP_PCB_UNLOCK(unp2);
1046 	m = NULL;
1047 out:
1048 	/*
1049 	 * PRUS_EOF is equivalent to pr_send followed by pr_shutdown.
1050 	 */
1051 	if (flags & PRUS_EOF) {
1052 		UNP_PCB_LOCK(unp);
1053 		socantsendmore(so);
1054 		unp_shutdown(unp);
1055 		UNP_PCB_UNLOCK(unp);
1056 	}
1057 	if (control != NULL && error != 0)
1058 		unp_scan(control, unp_freerights);
1059 
1060 release:
1061 	if (control != NULL)
1062 		m_freem(control);
1063 	/*
1064 	 * In case of PRUS_NOTREADY, uipc_ready() is responsible
1065 	 * for freeing memory.
1066 	 */
1067 	if (m != NULL && (flags & PRUS_NOTREADY) == 0)
1068 		m_freem(m);
1069 	return (error);
1070 }
1071 
1072 /* PF_UNIX/SOCK_DGRAM version of sbspace() */
1073 static inline bool
1074 uipc_dgram_sbspace(struct sockbuf *sb, u_int cc, u_int mbcnt)
1075 {
1076 	u_int bleft, mleft;
1077 
1078 	/*
1079 	 * Negative space may happen if send(2) is followed by
1080 	 * setsockopt(SO_SNDBUF/SO_RCVBUF) that shrinks maximum.
1081 	 */
1082 	if (__predict_false(sb->sb_hiwat < sb->uxdg_cc ||
1083 	    sb->sb_mbmax < sb->uxdg_mbcnt))
1084 		return (false);
1085 
1086 	if (__predict_false(sb->sb_state & SBS_CANTRCVMORE))
1087 		return (false);
1088 
1089 	bleft = sb->sb_hiwat - sb->uxdg_cc;
1090 	mleft = sb->sb_mbmax - sb->uxdg_mbcnt;
1091 
1092 	return (bleft >= cc && mleft >= mbcnt);
1093 }
1094 
1095 /*
1096  * PF_UNIX/SOCK_DGRAM send
1097  *
1098  * Allocate a record consisting of 3 mbufs in the sequence of
1099  * from -> control -> data and append it to the socket buffer.
1100  *
1101  * The first mbuf carries sender's name and is a pkthdr that stores
1102  * overall length of datagram, its memory consumption and control length.
1103  */
1104 #define	ctllen	PH_loc.thirtytwo[1]
1105 _Static_assert(offsetof(struct pkthdr, memlen) + sizeof(u_int) <=
1106     offsetof(struct pkthdr, ctllen), "unix/dgram can not store ctllen");
1107 static int
1108 uipc_sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1109     struct mbuf *m, struct mbuf *c, int flags, struct thread *td)
1110 {
1111 	struct unpcb *unp, *unp2;
1112 	const struct sockaddr *from;
1113 	struct socket *so2;
1114 	struct sockbuf *sb;
1115 	struct mbuf *f, *clast;
1116 	u_int cc, ctl, mbcnt;
1117 	u_int dcc __diagused, dctl __diagused, dmbcnt __diagused;
1118 	int error;
1119 
1120 	MPASS((uio != NULL && m == NULL) || (m != NULL && uio == NULL));
1121 
1122 	error = 0;
1123 	f = NULL;
1124 	ctl = 0;
1125 
1126 	if (__predict_false(flags & MSG_OOB)) {
1127 		error = EOPNOTSUPP;
1128 		goto out;
1129 	}
1130 	if (m == NULL) {
1131 		if (__predict_false(uio->uio_resid > unpdg_maxdgram)) {
1132 			error = EMSGSIZE;
1133 			goto out;
1134 		}
1135 		m = m_uiotombuf(uio, M_WAITOK, 0, max_hdr, M_PKTHDR);
1136 		if (__predict_false(m == NULL)) {
1137 			error = EFAULT;
1138 			goto out;
1139 		}
1140 		f = m_gethdr(M_WAITOK, MT_SONAME);
1141 		cc = m->m_pkthdr.len;
1142 		mbcnt = MSIZE + m->m_pkthdr.memlen;
1143 		if (c != NULL &&
1144 		    (error = unp_internalize(&c, td, &clast, &ctl, &mbcnt)))
1145 			goto out;
1146 	} else {
1147 		/* pr_sosend() with mbuf usually is a kernel thread. */
1148 
1149 		M_ASSERTPKTHDR(m);
1150 		if (__predict_false(c != NULL))
1151 			panic("%s: control from a kernel thread", __func__);
1152 
1153 		if (__predict_false(m->m_pkthdr.len > unpdg_maxdgram)) {
1154 			error = EMSGSIZE;
1155 			goto out;
1156 		}
1157 		if ((f = m_gethdr(M_NOWAIT, MT_SONAME)) == NULL) {
1158 			error = ENOBUFS;
1159 			goto out;
1160 		}
1161 		/* Condition the foreign mbuf to our standards. */
1162 		m_clrprotoflags(m);
1163 		m_tag_delete_chain(m, NULL);
1164 		m->m_pkthdr.rcvif = NULL;
1165 		m->m_pkthdr.flowid = 0;
1166 		m->m_pkthdr.csum_flags = 0;
1167 		m->m_pkthdr.fibnum = 0;
1168 		m->m_pkthdr.rsstype = 0;
1169 
1170 		cc = m->m_pkthdr.len;
1171 		mbcnt = MSIZE;
1172 		for (struct mbuf *mb = m; mb != NULL; mb = mb->m_next) {
1173 			mbcnt += MSIZE;
1174 			if (mb->m_flags & M_EXT)
1175 				mbcnt += mb->m_ext.ext_size;
1176 		}
1177 	}
1178 
1179 	unp = sotounpcb(so);
1180 	MPASS(unp);
1181 
1182 	/*
1183 	 * XXXGL: would be cool to fully remove so_snd out of the equation
1184 	 * and avoid this lock, which is not only extraneous, but also being
1185 	 * released, thus still leaving possibility for a race.  We can easily
1186 	 * handle SBS_CANTSENDMORE/SS_ISCONNECTED complement in unpcb, but it
1187 	 * is more difficult to invent something to handle so_error.
1188 	 */
1189 	error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
1190 	if (error)
1191 		goto out2;
1192 	SOCK_SENDBUF_LOCK(so);
1193 	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1194 		SOCK_SENDBUF_UNLOCK(so);
1195 		error = EPIPE;
1196 		goto out3;
1197 	}
1198 	if (so->so_error != 0) {
1199 		error = so->so_error;
1200 		so->so_error = 0;
1201 		SOCK_SENDBUF_UNLOCK(so);
1202 		goto out3;
1203 	}
1204 	if (((so->so_state & SS_ISCONNECTED) == 0) && addr == NULL) {
1205 		SOCK_SENDBUF_UNLOCK(so);
1206 		error = EDESTADDRREQ;
1207 		goto out3;
1208 	}
1209 	SOCK_SENDBUF_UNLOCK(so);
1210 
1211 	if (addr != NULL) {
1212 		if ((error = unp_connectat(AT_FDCWD, so, addr, td, true)))
1213 			goto out3;
1214 		UNP_PCB_LOCK_ASSERT(unp);
1215 		unp2 = unp->unp_conn;
1216 		UNP_PCB_LOCK_ASSERT(unp2);
1217 	} else {
1218 		UNP_PCB_LOCK(unp);
1219 		unp2 = unp_pcb_lock_peer(unp);
1220 		if (unp2 == NULL) {
1221 			UNP_PCB_UNLOCK(unp);
1222 			error = ENOTCONN;
1223 			goto out3;
1224 		}
1225 	}
1226 
1227 	if (unp2->unp_flags & UNP_WANTCRED_MASK)
1228 		c = unp_addsockcred(td, c, unp2->unp_flags, &clast, &ctl,
1229 		    &mbcnt);
1230 	if (unp->unp_addr != NULL)
1231 		from = (struct sockaddr *)unp->unp_addr;
1232 	else
1233 		from = &sun_noname;
1234 	f->m_len = from->sa_len;
1235 	MPASS(from->sa_len <= MLEN);
1236 	bcopy(from, mtod(f, void *), from->sa_len);
1237 	ctl += f->m_len;
1238 
1239 	/*
1240 	 * Concatenate mbufs: from -> control -> data.
1241 	 * Save overall cc and mbcnt in "from" mbuf.
1242 	 */
1243 	if (c != NULL) {
1244 #ifdef INVARIANTS
1245 		struct mbuf *mc;
1246 
1247 		for (mc = c; mc->m_next != NULL; mc = mc->m_next);
1248 		MPASS(mc == clast);
1249 #endif
1250 		f->m_next = c;
1251 		clast->m_next = m;
1252 		c = NULL;
1253 	} else
1254 		f->m_next = m;
1255 	m = NULL;
1256 #ifdef INVARIANTS
1257 	dcc = dctl = dmbcnt = 0;
1258 	for (struct mbuf *mb = f; mb != NULL; mb = mb->m_next) {
1259 		if (mb->m_type == MT_DATA)
1260 			dcc += mb->m_len;
1261 		else
1262 			dctl += mb->m_len;
1263 		dmbcnt += MSIZE;
1264 		if (mb->m_flags & M_EXT)
1265 			dmbcnt += mb->m_ext.ext_size;
1266 	}
1267 	MPASS(dcc == cc);
1268 	MPASS(dctl == ctl);
1269 	MPASS(dmbcnt == mbcnt);
1270 #endif
1271 	f->m_pkthdr.len = cc + ctl;
1272 	f->m_pkthdr.memlen = mbcnt;
1273 	f->m_pkthdr.ctllen = ctl;
1274 
1275 	/*
1276 	 * Destination socket buffer selection.
1277 	 *
1278 	 * Unconnected sends, when !(so->so_state & SS_ISCONNECTED) and the
1279 	 * destination address is supplied, create a temporary connection for
1280 	 * the run time of the function (see call to unp_connectat() above and
1281 	 * to unp_disconnect() below).  We distinguish them by condition of
1282 	 * (addr != NULL).  We intentionally avoid adding 'bool connected' for
1283 	 * that condition, since, again, through the run time of this code we
1284 	 * are always connected.  For such "unconnected" sends, the destination
1285 	 * buffer would be the receive buffer of destination socket so2.
1286 	 *
1287 	 * For connected sends, data lands on the send buffer of the sender's
1288 	 * socket "so".  Then, if we just added the very first datagram
1289 	 * on this send buffer, we need to add the send buffer on to the
1290 	 * receiving socket's buffer list.  We put ourselves on top of the
1291 	 * list.  Such logic gives infrequent senders priority over frequent
1292 	 * senders.
1293 	 *
1294 	 * Note on byte count management. As long as event methods kevent(2),
1295 	 * select(2) are not protocol specific (yet), we need to maintain
1296 	 * meaningful values on the receive buffer.  So, the receive buffer
1297 	 * would accumulate counters from all connected buffers potentially
1298 	 * having sb_ccc > sb_hiwat or sb_mbcnt > sb_mbmax.
1299 	 */
1300 	so2 = unp2->unp_socket;
1301 	sb = (addr == NULL) ? &so->so_snd : &so2->so_rcv;
1302 	SOCK_RECVBUF_LOCK(so2);
1303 	if (uipc_dgram_sbspace(sb, cc + ctl, mbcnt)) {
1304 		if (addr == NULL && STAILQ_EMPTY(&sb->uxdg_mb))
1305 			TAILQ_INSERT_HEAD(&so2->so_rcv.uxdg_conns, &so->so_snd,
1306 			    uxdg_clist);
1307 		STAILQ_INSERT_TAIL(&sb->uxdg_mb, f, m_stailqpkt);
1308 		sb->uxdg_cc += cc + ctl;
1309 		sb->uxdg_ctl += ctl;
1310 		sb->uxdg_mbcnt += mbcnt;
1311 		so2->so_rcv.sb_acc += cc + ctl;
1312 		so2->so_rcv.sb_ccc += cc + ctl;
1313 		so2->so_rcv.sb_ctl += ctl;
1314 		so2->so_rcv.sb_mbcnt += mbcnt;
1315 		sorwakeup_locked(so2);
1316 		f = NULL;
1317 	} else {
1318 		soroverflow_locked(so2);
1319 		error = ENOBUFS;
1320 		if (f->m_next->m_type == MT_CONTROL) {
1321 			c = f->m_next;
1322 			f->m_next = NULL;
1323 		}
1324 	}
1325 
1326 	if (addr != NULL)
1327 		unp_disconnect(unp, unp2);
1328 	else
1329 		unp_pcb_unlock_pair(unp, unp2);
1330 
1331 	td->td_ru.ru_msgsnd++;
1332 
1333 out3:
1334 	SOCK_IO_SEND_UNLOCK(so);
1335 out2:
1336 	if (c)
1337 		unp_scan(c, unp_freerights);
1338 out:
1339 	if (f)
1340 		m_freem(f);
1341 	if (c)
1342 		m_freem(c);
1343 	if (m)
1344 		m_freem(m);
1345 
1346 	return (error);
1347 }
1348 
1349 /*
1350  * PF_UNIX/SOCK_DGRAM receive with MSG_PEEK.
1351  * The mbuf has already been unlinked from the uxdg_mb of socket buffer
1352  * and needs to be linked onto uxdg_peeked of receive socket buffer.
1353  */
1354 static int
1355 uipc_peek_dgram(struct socket *so, struct mbuf *m, struct sockaddr **psa,
1356     struct uio *uio, struct mbuf **controlp, int *flagsp)
1357 {
1358 	ssize_t len = 0;
1359 	int error;
1360 
1361 	so->so_rcv.uxdg_peeked = m;
1362 	so->so_rcv.uxdg_cc += m->m_pkthdr.len;
1363 	so->so_rcv.uxdg_ctl += m->m_pkthdr.ctllen;
1364 	so->so_rcv.uxdg_mbcnt += m->m_pkthdr.memlen;
1365 	SOCK_RECVBUF_UNLOCK(so);
1366 
1367 	KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type));
1368 	if (psa != NULL)
1369 		*psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK);
1370 
1371 	m = m->m_next;
1372 	KASSERT(m, ("%s: no data or control after soname", __func__));
1373 
1374 	/*
1375 	 * With MSG_PEEK the control isn't executed, just copied.
1376 	 */
1377 	while (m != NULL && m->m_type == MT_CONTROL) {
1378 		if (controlp != NULL) {
1379 			*controlp = m_copym(m, 0, m->m_len, M_WAITOK);
1380 			controlp = &(*controlp)->m_next;
1381 		}
1382 		m = m->m_next;
1383 	}
1384 	KASSERT(m == NULL || m->m_type == MT_DATA,
1385 	    ("%s: not MT_DATA mbuf %p", __func__, m));
1386 	while (m != NULL && uio->uio_resid > 0) {
1387 		len = uio->uio_resid;
1388 		if (len > m->m_len)
1389 			len = m->m_len;
1390 		error = uiomove(mtod(m, char *), (int)len, uio);
1391 		if (error) {
1392 			SOCK_IO_RECV_UNLOCK(so);
1393 			return (error);
1394 		}
1395 		if (len == m->m_len)
1396 			m = m->m_next;
1397 	}
1398 	SOCK_IO_RECV_UNLOCK(so);
1399 
1400 	if (flagsp != NULL) {
1401 		if (m != NULL) {
1402 			if (*flagsp & MSG_TRUNC) {
1403 				/* Report real length of the packet */
1404 				uio->uio_resid -= m_length(m, NULL) - len;
1405 			}
1406 			*flagsp |= MSG_TRUNC;
1407 		} else
1408 			*flagsp &= ~MSG_TRUNC;
1409 	}
1410 
1411 	return (0);
1412 }
1413 
1414 /*
1415  * PF_UNIX/SOCK_DGRAM receive
1416  */
1417 static int
1418 uipc_soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
1419     struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1420 {
1421 	struct sockbuf *sb = NULL;
1422 	struct mbuf *m;
1423 	int flags, error;
1424 	ssize_t len = 0;
1425 	bool nonblock;
1426 
1427 	MPASS(mp0 == NULL);
1428 
1429 	if (psa != NULL)
1430 		*psa = NULL;
1431 	if (controlp != NULL)
1432 		*controlp = NULL;
1433 
1434 	flags = flagsp != NULL ? *flagsp : 0;
1435 	nonblock = (so->so_state & SS_NBIO) ||
1436 	    (flags & (MSG_DONTWAIT | MSG_NBIO));
1437 
1438 	error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
1439 	if (__predict_false(error))
1440 		return (error);
1441 
1442 	/*
1443 	 * Loop blocking while waiting for a datagram.  Prioritize connected
1444 	 * peers over unconnected sends.  Set sb to selected socket buffer
1445 	 * containing an mbuf on exit from the wait loop.  A datagram that
1446 	 * had already been peeked at has top priority.
1447 	 */
1448 	SOCK_RECVBUF_LOCK(so);
1449 	while ((m = so->so_rcv.uxdg_peeked) == NULL &&
1450 	    (sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) == NULL &&
1451 	    (m = STAILQ_FIRST(&so->so_rcv.uxdg_mb)) == NULL) {
1452 		if (so->so_error) {
1453 			error = so->so_error;
1454 			if (!(flags & MSG_PEEK))
1455 				so->so_error = 0;
1456 			SOCK_RECVBUF_UNLOCK(so);
1457 			SOCK_IO_RECV_UNLOCK(so);
1458 			return (error);
1459 		}
1460 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
1461 		    uio->uio_resid == 0) {
1462 			SOCK_RECVBUF_UNLOCK(so);
1463 			SOCK_IO_RECV_UNLOCK(so);
1464 			return (0);
1465 		}
1466 		if (nonblock) {
1467 			SOCK_RECVBUF_UNLOCK(so);
1468 			SOCK_IO_RECV_UNLOCK(so);
1469 			return (EWOULDBLOCK);
1470 		}
1471 		error = sbwait(so, SO_RCV);
1472 		if (error) {
1473 			SOCK_RECVBUF_UNLOCK(so);
1474 			SOCK_IO_RECV_UNLOCK(so);
1475 			return (error);
1476 		}
1477 	}
1478 
1479 	if (sb == NULL)
1480 		sb = &so->so_rcv;
1481 	else if (m == NULL)
1482 		m = STAILQ_FIRST(&sb->uxdg_mb);
1483 	else
1484 		MPASS(m == so->so_rcv.uxdg_peeked);
1485 
1486 	MPASS(sb->uxdg_cc > 0);
1487 	M_ASSERTPKTHDR(m);
1488 	KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type));
1489 
1490 	if (uio->uio_td)
1491 		uio->uio_td->td_ru.ru_msgrcv++;
1492 
1493 	if (__predict_true(m != so->so_rcv.uxdg_peeked)) {
1494 		STAILQ_REMOVE_HEAD(&sb->uxdg_mb, m_stailqpkt);
1495 		if (STAILQ_EMPTY(&sb->uxdg_mb) && sb != &so->so_rcv)
1496 			TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist);
1497 	} else
1498 		so->so_rcv.uxdg_peeked = NULL;
1499 
1500 	sb->uxdg_cc -= m->m_pkthdr.len;
1501 	sb->uxdg_ctl -= m->m_pkthdr.ctllen;
1502 	sb->uxdg_mbcnt -= m->m_pkthdr.memlen;
1503 
1504 	if (__predict_false(flags & MSG_PEEK))
1505 		return (uipc_peek_dgram(so, m, psa, uio, controlp, flagsp));
1506 
1507 	so->so_rcv.sb_acc -= m->m_pkthdr.len;
1508 	so->so_rcv.sb_ccc -= m->m_pkthdr.len;
1509 	so->so_rcv.sb_ctl -= m->m_pkthdr.ctllen;
1510 	so->so_rcv.sb_mbcnt -= m->m_pkthdr.memlen;
1511 	SOCK_RECVBUF_UNLOCK(so);
1512 
1513 	if (psa != NULL)
1514 		*psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK);
1515 	m = m_free(m);
1516 	KASSERT(m, ("%s: no data or control after soname", __func__));
1517 
1518 	/*
1519 	 * Packet to copyout() is now in 'm' and it is disconnected from the
1520 	 * queue.
1521 	 *
1522 	 * Process one or more MT_CONTROL mbufs present before any data mbufs
1523 	 * in the first mbuf chain on the socket buffer.  We call into the
1524 	 * unp_externalize() to perform externalization (or freeing if
1525 	 * controlp == NULL). In some cases there can be only MT_CONTROL mbufs
1526 	 * without MT_DATA mbufs.
1527 	 */
1528 	while (m != NULL && m->m_type == MT_CONTROL) {
1529 		struct mbuf *cm;
1530 
1531 		/* XXXGL: unp_externalize() is also dom_externalize() KBI and
1532 		 * it frees whole chain, so we must disconnect the mbuf.
1533 		 */
1534 		cm = m; m = m->m_next; cm->m_next = NULL;
1535 		error = unp_externalize(cm, controlp, flags);
1536 		if (error != 0) {
1537 			SOCK_IO_RECV_UNLOCK(so);
1538 			unp_scan(m, unp_freerights);
1539 			m_freem(m);
1540 			return (error);
1541 		}
1542 		if (controlp != NULL) {
1543 			while (*controlp != NULL)
1544 				controlp = &(*controlp)->m_next;
1545 		}
1546 	}
1547 	KASSERT(m == NULL || m->m_type == MT_DATA,
1548 	    ("%s: not MT_DATA mbuf %p", __func__, m));
1549 	while (m != NULL && uio->uio_resid > 0) {
1550 		len = uio->uio_resid;
1551 		if (len > m->m_len)
1552 			len = m->m_len;
1553 		error = uiomove(mtod(m, char *), (int)len, uio);
1554 		if (error) {
1555 			SOCK_IO_RECV_UNLOCK(so);
1556 			m_freem(m);
1557 			return (error);
1558 		}
1559 		if (len == m->m_len)
1560 			m = m_free(m);
1561 		else {
1562 			m->m_data += len;
1563 			m->m_len -= len;
1564 		}
1565 	}
1566 	SOCK_IO_RECV_UNLOCK(so);
1567 
1568 	if (m != NULL) {
1569 		if (flagsp != NULL) {
1570 			if (flags & MSG_TRUNC) {
1571 				/* Report real length of the packet */
1572 				uio->uio_resid -= m_length(m, NULL);
1573 			}
1574 			*flagsp |= MSG_TRUNC;
1575 		}
1576 		m_freem(m);
1577 	} else if (flagsp != NULL)
1578 		*flagsp &= ~MSG_TRUNC;
1579 
1580 	return (0);
1581 }
1582 
1583 static bool
1584 uipc_ready_scan(struct socket *so, struct mbuf *m, int count, int *errorp)
1585 {
1586 	struct mbuf *mb, *n;
1587 	struct sockbuf *sb;
1588 
1589 	SOCK_LOCK(so);
1590 	if (SOLISTENING(so)) {
1591 		SOCK_UNLOCK(so);
1592 		return (false);
1593 	}
1594 	mb = NULL;
1595 	sb = &so->so_rcv;
1596 	SOCKBUF_LOCK(sb);
1597 	if (sb->sb_fnrdy != NULL) {
1598 		for (mb = sb->sb_mb, n = mb->m_nextpkt; mb != NULL;) {
1599 			if (mb == m) {
1600 				*errorp = sbready(sb, m, count);
1601 				break;
1602 			}
1603 			mb = mb->m_next;
1604 			if (mb == NULL) {
1605 				mb = n;
1606 				if (mb != NULL)
1607 					n = mb->m_nextpkt;
1608 			}
1609 		}
1610 	}
1611 	SOCKBUF_UNLOCK(sb);
1612 	SOCK_UNLOCK(so);
1613 	return (mb != NULL);
1614 }
1615 
1616 static int
1617 uipc_ready(struct socket *so, struct mbuf *m, int count)
1618 {
1619 	struct unpcb *unp, *unp2;
1620 	struct socket *so2;
1621 	int error, i;
1622 
1623 	unp = sotounpcb(so);
1624 
1625 	KASSERT(so->so_type == SOCK_STREAM,
1626 	    ("%s: unexpected socket type for %p", __func__, so));
1627 
1628 	UNP_PCB_LOCK(unp);
1629 	if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) {
1630 		UNP_PCB_UNLOCK(unp);
1631 		so2 = unp2->unp_socket;
1632 		SOCKBUF_LOCK(&so2->so_rcv);
1633 		if ((error = sbready(&so2->so_rcv, m, count)) == 0)
1634 			sorwakeup_locked(so2);
1635 		else
1636 			SOCKBUF_UNLOCK(&so2->so_rcv);
1637 		UNP_PCB_UNLOCK(unp2);
1638 		return (error);
1639 	}
1640 	UNP_PCB_UNLOCK(unp);
1641 
1642 	/*
1643 	 * The receiving socket has been disconnected, but may still be valid.
1644 	 * In this case, the now-ready mbufs are still present in its socket
1645 	 * buffer, so perform an exhaustive search before giving up and freeing
1646 	 * the mbufs.
1647 	 */
1648 	UNP_LINK_RLOCK();
1649 	LIST_FOREACH(unp, &unp_shead, unp_link) {
1650 		if (uipc_ready_scan(unp->unp_socket, m, count, &error))
1651 			break;
1652 	}
1653 	UNP_LINK_RUNLOCK();
1654 
1655 	if (unp == NULL) {
1656 		for (i = 0; i < count; i++)
1657 			m = m_free(m);
1658 		error = ECONNRESET;
1659 	}
1660 	return (error);
1661 }
1662 
1663 static int
1664 uipc_sense(struct socket *so, struct stat *sb)
1665 {
1666 	struct unpcb *unp;
1667 
1668 	unp = sotounpcb(so);
1669 	KASSERT(unp != NULL, ("uipc_sense: unp == NULL"));
1670 
1671 	sb->st_blksize = so->so_snd.sb_hiwat;
1672 	sb->st_dev = NODEV;
1673 	sb->st_ino = unp->unp_ino;
1674 	return (0);
1675 }
1676 
1677 static int
1678 uipc_shutdown(struct socket *so, enum shutdown_how how)
1679 {
1680 	struct unpcb *unp = sotounpcb(so);
1681 	int error;
1682 
1683 	SOCK_LOCK(so);
1684 	if (SOLISTENING(so)) {
1685 		if (how != SHUT_WR) {
1686 			so->so_error = ECONNABORTED;
1687 			solisten_wakeup(so);    /* unlocks so */
1688 		} else
1689 			SOCK_UNLOCK(so);
1690 		return (ENOTCONN);
1691 	} else if ((so->so_state &
1692 	    (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
1693 		/*
1694 		 * POSIX mandates us to just return ENOTCONN when shutdown(2) is
1695 		 * invoked on a datagram sockets, however historically we would
1696 		 * actually tear socket down.  This is known to be leveraged by
1697 		 * some applications to unblock process waiting in recv(2) by
1698 		 * other process that it shares that socket with.  Try to meet
1699 		 * both backward-compatibility and POSIX requirements by forcing
1700 		 * ENOTCONN but still flushing buffers and performing wakeup(9).
1701 		 *
1702 		 * XXXGL: it remains unknown what applications expect this
1703 		 * behavior and is this isolated to unix/dgram or inet/dgram or
1704 		 * both.  See: D10351, D3039.
1705 		 */
1706 		error = ENOTCONN;
1707 		if (so->so_type != SOCK_DGRAM) {
1708 			SOCK_UNLOCK(so);
1709 			return (error);
1710 		}
1711 	} else
1712 		error = 0;
1713 	SOCK_UNLOCK(so);
1714 
1715 	switch (how) {
1716 	case SHUT_RD:
1717 		socantrcvmore(so);
1718 		unp_dispose(so);
1719 		break;
1720 	case SHUT_RDWR:
1721 		socantrcvmore(so);
1722 		unp_dispose(so);
1723 		/* FALLTHROUGH */
1724 	case SHUT_WR:
1725 		UNP_PCB_LOCK(unp);
1726 		socantsendmore(so);
1727 		unp_shutdown(unp);
1728 		UNP_PCB_UNLOCK(unp);
1729 	}
1730 	wakeup(&so->so_timeo);
1731 
1732 	return (error);
1733 }
1734 
1735 static int
1736 uipc_sockaddr(struct socket *so, struct sockaddr *ret)
1737 {
1738 	struct unpcb *unp;
1739 	const struct sockaddr *sa;
1740 
1741 	unp = sotounpcb(so);
1742 	KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL"));
1743 
1744 	UNP_PCB_LOCK(unp);
1745 	if (unp->unp_addr != NULL)
1746 		sa = (struct sockaddr *) unp->unp_addr;
1747 	else
1748 		sa = &sun_noname;
1749 	bcopy(sa, ret, sa->sa_len);
1750 	UNP_PCB_UNLOCK(unp);
1751 	return (0);
1752 }
1753 
1754 static int
1755 uipc_ctloutput(struct socket *so, struct sockopt *sopt)
1756 {
1757 	struct unpcb *unp;
1758 	struct xucred xu;
1759 	int error, optval;
1760 
1761 	if (sopt->sopt_level != SOL_LOCAL)
1762 		return (EINVAL);
1763 
1764 	unp = sotounpcb(so);
1765 	KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL"));
1766 	error = 0;
1767 	switch (sopt->sopt_dir) {
1768 	case SOPT_GET:
1769 		switch (sopt->sopt_name) {
1770 		case LOCAL_PEERCRED:
1771 			UNP_PCB_LOCK(unp);
1772 			if (unp->unp_flags & UNP_HAVEPC)
1773 				xu = unp->unp_peercred;
1774 			else {
1775 				if (so->so_type == SOCK_STREAM)
1776 					error = ENOTCONN;
1777 				else
1778 					error = EINVAL;
1779 			}
1780 			UNP_PCB_UNLOCK(unp);
1781 			if (error == 0)
1782 				error = sooptcopyout(sopt, &xu, sizeof(xu));
1783 			break;
1784 
1785 		case LOCAL_CREDS:
1786 			/* Unlocked read. */
1787 			optval = unp->unp_flags & UNP_WANTCRED_ONESHOT ? 1 : 0;
1788 			error = sooptcopyout(sopt, &optval, sizeof(optval));
1789 			break;
1790 
1791 		case LOCAL_CREDS_PERSISTENT:
1792 			/* Unlocked read. */
1793 			optval = unp->unp_flags & UNP_WANTCRED_ALWAYS ? 1 : 0;
1794 			error = sooptcopyout(sopt, &optval, sizeof(optval));
1795 			break;
1796 
1797 		default:
1798 			error = EOPNOTSUPP;
1799 			break;
1800 		}
1801 		break;
1802 
1803 	case SOPT_SET:
1804 		switch (sopt->sopt_name) {
1805 		case LOCAL_CREDS:
1806 		case LOCAL_CREDS_PERSISTENT:
1807 			error = sooptcopyin(sopt, &optval, sizeof(optval),
1808 					    sizeof(optval));
1809 			if (error)
1810 				break;
1811 
1812 #define	OPTSET(bit, exclusive) do {					\
1813 	UNP_PCB_LOCK(unp);						\
1814 	if (optval) {							\
1815 		if ((unp->unp_flags & (exclusive)) != 0) {		\
1816 			UNP_PCB_UNLOCK(unp);				\
1817 			error = EINVAL;					\
1818 			break;						\
1819 		}							\
1820 		unp->unp_flags |= (bit);				\
1821 	} else								\
1822 		unp->unp_flags &= ~(bit);				\
1823 	UNP_PCB_UNLOCK(unp);						\
1824 } while (0)
1825 
1826 			switch (sopt->sopt_name) {
1827 			case LOCAL_CREDS:
1828 				OPTSET(UNP_WANTCRED_ONESHOT, UNP_WANTCRED_ALWAYS);
1829 				break;
1830 
1831 			case LOCAL_CREDS_PERSISTENT:
1832 				OPTSET(UNP_WANTCRED_ALWAYS, UNP_WANTCRED_ONESHOT);
1833 				break;
1834 
1835 			default:
1836 				break;
1837 			}
1838 			break;
1839 #undef	OPTSET
1840 		default:
1841 			error = ENOPROTOOPT;
1842 			break;
1843 		}
1844 		break;
1845 
1846 	default:
1847 		error = EOPNOTSUPP;
1848 		break;
1849 	}
1850 	return (error);
1851 }
1852 
1853 static int
1854 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1855 {
1856 
1857 	return (unp_connectat(AT_FDCWD, so, nam, td, false));
1858 }
1859 
1860 static int
1861 unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
1862     struct thread *td, bool return_locked)
1863 {
1864 	struct mtx *vplock;
1865 	struct sockaddr_un *soun;
1866 	struct vnode *vp;
1867 	struct socket *so2;
1868 	struct unpcb *unp, *unp2, *unp3;
1869 	struct nameidata nd;
1870 	char buf[SOCK_MAXADDRLEN];
1871 	struct sockaddr *sa;
1872 	cap_rights_t rights;
1873 	int error, len;
1874 	bool connreq;
1875 
1876 	if (nam->sa_family != AF_UNIX)
1877 		return (EAFNOSUPPORT);
1878 	if (nam->sa_len > sizeof(struct sockaddr_un))
1879 		return (EINVAL);
1880 	len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
1881 	if (len <= 0)
1882 		return (EINVAL);
1883 	soun = (struct sockaddr_un *)nam;
1884 	bcopy(soun->sun_path, buf, len);
1885 	buf[len] = 0;
1886 
1887 	error = 0;
1888 	unp = sotounpcb(so);
1889 	UNP_PCB_LOCK(unp);
1890 	for (;;) {
1891 		/*
1892 		 * Wait for connection state to stabilize.  If a connection
1893 		 * already exists, give up.  For datagram sockets, which permit
1894 		 * multiple consecutive connect(2) calls, upper layers are
1895 		 * responsible for disconnecting in advance of a subsequent
1896 		 * connect(2), but this is not synchronized with PCB connection
1897 		 * state.
1898 		 *
1899 		 * Also make sure that no threads are currently attempting to
1900 		 * lock the peer socket, to ensure that unp_conn cannot
1901 		 * transition between two valid sockets while locks are dropped.
1902 		 */
1903 		if (SOLISTENING(so))
1904 			error = EOPNOTSUPP;
1905 		else if (unp->unp_conn != NULL)
1906 			error = EISCONN;
1907 		else if ((unp->unp_flags & UNP_CONNECTING) != 0) {
1908 			error = EALREADY;
1909 		}
1910 		if (error != 0) {
1911 			UNP_PCB_UNLOCK(unp);
1912 			return (error);
1913 		}
1914 		if (unp->unp_pairbusy > 0) {
1915 			unp->unp_flags |= UNP_WAITING;
1916 			mtx_sleep(unp, UNP_PCB_LOCKPTR(unp), 0, "unpeer", 0);
1917 			continue;
1918 		}
1919 		break;
1920 	}
1921 	unp->unp_flags |= UNP_CONNECTING;
1922 	UNP_PCB_UNLOCK(unp);
1923 
1924 	connreq = (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0;
1925 	if (connreq)
1926 		sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
1927 	else
1928 		sa = NULL;
1929 	NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF,
1930 	    UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_CONNECTAT));
1931 	error = namei(&nd);
1932 	if (error)
1933 		vp = NULL;
1934 	else
1935 		vp = nd.ni_vp;
1936 	ASSERT_VOP_LOCKED(vp, "unp_connect");
1937 	if (error)
1938 		goto bad;
1939 	NDFREE_PNBUF(&nd);
1940 
1941 	if (vp->v_type != VSOCK) {
1942 		error = ENOTSOCK;
1943 		goto bad;
1944 	}
1945 #ifdef MAC
1946 	error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD);
1947 	if (error)
1948 		goto bad;
1949 #endif
1950 	error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td);
1951 	if (error)
1952 		goto bad;
1953 
1954 	unp = sotounpcb(so);
1955 	KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
1956 
1957 	vplock = mtx_pool_find(unp_vp_mtxpool, vp);
1958 	mtx_lock(vplock);
1959 	VOP_UNP_CONNECT(vp, &unp2);
1960 	if (unp2 == NULL) {
1961 		error = ECONNREFUSED;
1962 		goto bad2;
1963 	}
1964 	so2 = unp2->unp_socket;
1965 	if (so->so_type != so2->so_type) {
1966 		error = EPROTOTYPE;
1967 		goto bad2;
1968 	}
1969 	if (connreq) {
1970 		if (SOLISTENING(so2)) {
1971 			CURVNET_SET(so2->so_vnet);
1972 			so2 = sonewconn(so2, 0);
1973 			CURVNET_RESTORE();
1974 		} else
1975 			so2 = NULL;
1976 		if (so2 == NULL) {
1977 			error = ECONNREFUSED;
1978 			goto bad2;
1979 		}
1980 		unp3 = sotounpcb(so2);
1981 		unp_pcb_lock_pair(unp2, unp3);
1982 		if (unp2->unp_addr != NULL) {
1983 			bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len);
1984 			unp3->unp_addr = (struct sockaddr_un *) sa;
1985 			sa = NULL;
1986 		}
1987 
1988 		unp_copy_peercred(td, unp3, unp, unp2);
1989 
1990 		UNP_PCB_UNLOCK(unp2);
1991 		unp2 = unp3;
1992 
1993 		/*
1994 		 * It is safe to block on the PCB lock here since unp2 is
1995 		 * nascent and cannot be connected to any other sockets.
1996 		 */
1997 		UNP_PCB_LOCK(unp);
1998 #ifdef MAC
1999 		mac_socketpeer_set_from_socket(so, so2);
2000 		mac_socketpeer_set_from_socket(so2, so);
2001 #endif
2002 	} else {
2003 		unp_pcb_lock_pair(unp, unp2);
2004 	}
2005 	KASSERT(unp2 != NULL && so2 != NULL && unp2->unp_socket == so2 &&
2006 	    sotounpcb(so2) == unp2,
2007 	    ("%s: unp2 %p so2 %p", __func__, unp2, so2));
2008 	unp_connect2(so, so2);
2009 	KASSERT((unp->unp_flags & UNP_CONNECTING) != 0,
2010 	    ("%s: unp %p has UNP_CONNECTING clear", __func__, unp));
2011 	unp->unp_flags &= ~UNP_CONNECTING;
2012 	if (!return_locked)
2013 		unp_pcb_unlock_pair(unp, unp2);
2014 bad2:
2015 	mtx_unlock(vplock);
2016 bad:
2017 	if (vp != NULL) {
2018 		/*
2019 		 * If we are returning locked (called via uipc_sosend_dgram()),
2020 		 * we need to be sure that vput() won't sleep.  This is
2021 		 * guaranteed by VOP_UNP_CONNECT() call above and unp2 lock.
2022 		 * SOCK_STREAM/SEQPACKET can't request return_locked (yet).
2023 		 */
2024 		MPASS(!(return_locked && connreq));
2025 		vput(vp);
2026 	}
2027 	free(sa, M_SONAME);
2028 	if (__predict_false(error)) {
2029 		UNP_PCB_LOCK(unp);
2030 		KASSERT((unp->unp_flags & UNP_CONNECTING) != 0,
2031 		    ("%s: unp %p has UNP_CONNECTING clear", __func__, unp));
2032 		unp->unp_flags &= ~UNP_CONNECTING;
2033 		UNP_PCB_UNLOCK(unp);
2034 	}
2035 	return (error);
2036 }
2037 
2038 /*
2039  * Set socket peer credentials at connection time.
2040  *
2041  * The client's PCB credentials are copied from its process structure.  The
2042  * server's PCB credentials are copied from the socket on which it called
2043  * listen(2).  uipc_listen cached that process's credentials at the time.
2044  */
2045 void
2046 unp_copy_peercred(struct thread *td, struct unpcb *client_unp,
2047     struct unpcb *server_unp, struct unpcb *listen_unp)
2048 {
2049 	cru2xt(td, &client_unp->unp_peercred);
2050 	client_unp->unp_flags |= UNP_HAVEPC;
2051 
2052 	memcpy(&server_unp->unp_peercred, &listen_unp->unp_peercred,
2053 	    sizeof(server_unp->unp_peercred));
2054 	server_unp->unp_flags |= UNP_HAVEPC;
2055 	client_unp->unp_flags |= (listen_unp->unp_flags & UNP_WANTCRED_MASK);
2056 }
2057 
2058 static void
2059 unp_connect2(struct socket *so, struct socket *so2)
2060 {
2061 	struct unpcb *unp;
2062 	struct unpcb *unp2;
2063 
2064 	MPASS(so2->so_type == so->so_type);
2065 	unp = sotounpcb(so);
2066 	KASSERT(unp != NULL, ("unp_connect2: unp == NULL"));
2067 	unp2 = sotounpcb(so2);
2068 	KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL"));
2069 
2070 	UNP_PCB_LOCK_ASSERT(unp);
2071 	UNP_PCB_LOCK_ASSERT(unp2);
2072 	KASSERT(unp->unp_conn == NULL,
2073 	    ("%s: socket %p is already connected", __func__, unp));
2074 
2075 	unp->unp_conn = unp2;
2076 	unp_pcb_hold(unp2);
2077 	unp_pcb_hold(unp);
2078 	switch (so->so_type) {
2079 	case SOCK_DGRAM:
2080 		UNP_REF_LIST_LOCK();
2081 		LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
2082 		UNP_REF_LIST_UNLOCK();
2083 		soisconnected(so);
2084 		break;
2085 
2086 	case SOCK_STREAM:
2087 	case SOCK_SEQPACKET:
2088 		KASSERT(unp2->unp_conn == NULL,
2089 		    ("%s: socket %p is already connected", __func__, unp2));
2090 		unp2->unp_conn = unp;
2091 		soisconnected(so);
2092 		soisconnected(so2);
2093 		break;
2094 
2095 	default:
2096 		panic("unp_connect2");
2097 	}
2098 }
2099 
2100 static void
2101 unp_disconnect(struct unpcb *unp, struct unpcb *unp2)
2102 {
2103 	struct socket *so, *so2;
2104 	struct mbuf *m = NULL;
2105 #ifdef INVARIANTS
2106 	struct unpcb *unptmp;
2107 #endif
2108 
2109 	UNP_PCB_LOCK_ASSERT(unp);
2110 	UNP_PCB_LOCK_ASSERT(unp2);
2111 	KASSERT(unp->unp_conn == unp2,
2112 	    ("%s: unpcb %p is not connected to %p", __func__, unp, unp2));
2113 
2114 	unp->unp_conn = NULL;
2115 	so = unp->unp_socket;
2116 	so2 = unp2->unp_socket;
2117 	switch (unp->unp_socket->so_type) {
2118 	case SOCK_DGRAM:
2119 		/*
2120 		 * Remove our send socket buffer from the peer's receive buffer.
2121 		 * Move the data to the receive buffer only if it is empty.
2122 		 * This is a protection against a scenario where a peer
2123 		 * connects, floods and disconnects, effectively blocking
2124 		 * sendto() from unconnected sockets.
2125 		 */
2126 		SOCK_RECVBUF_LOCK(so2);
2127 		if (!STAILQ_EMPTY(&so->so_snd.uxdg_mb)) {
2128 			TAILQ_REMOVE(&so2->so_rcv.uxdg_conns, &so->so_snd,
2129 			    uxdg_clist);
2130 			if (__predict_true((so2->so_rcv.sb_state &
2131 			    SBS_CANTRCVMORE) == 0) &&
2132 			    STAILQ_EMPTY(&so2->so_rcv.uxdg_mb)) {
2133 				STAILQ_CONCAT(&so2->so_rcv.uxdg_mb,
2134 				    &so->so_snd.uxdg_mb);
2135 				so2->so_rcv.uxdg_cc += so->so_snd.uxdg_cc;
2136 				so2->so_rcv.uxdg_ctl += so->so_snd.uxdg_ctl;
2137 				so2->so_rcv.uxdg_mbcnt += so->so_snd.uxdg_mbcnt;
2138 			} else {
2139 				m = STAILQ_FIRST(&so->so_snd.uxdg_mb);
2140 				STAILQ_INIT(&so->so_snd.uxdg_mb);
2141 				so2->so_rcv.sb_acc -= so->so_snd.uxdg_cc;
2142 				so2->so_rcv.sb_ccc -= so->so_snd.uxdg_cc;
2143 				so2->so_rcv.sb_ctl -= so->so_snd.uxdg_ctl;
2144 				so2->so_rcv.sb_mbcnt -= so->so_snd.uxdg_mbcnt;
2145 			}
2146 			/* Note: so may reconnect. */
2147 			so->so_snd.uxdg_cc = 0;
2148 			so->so_snd.uxdg_ctl = 0;
2149 			so->so_snd.uxdg_mbcnt = 0;
2150 		}
2151 		SOCK_RECVBUF_UNLOCK(so2);
2152 		UNP_REF_LIST_LOCK();
2153 #ifdef INVARIANTS
2154 		LIST_FOREACH(unptmp, &unp2->unp_refs, unp_reflink) {
2155 			if (unptmp == unp)
2156 				break;
2157 		}
2158 		KASSERT(unptmp != NULL,
2159 		    ("%s: %p not found in reflist of %p", __func__, unp, unp2));
2160 #endif
2161 		LIST_REMOVE(unp, unp_reflink);
2162 		UNP_REF_LIST_UNLOCK();
2163 		if (so) {
2164 			SOCK_LOCK(so);
2165 			so->so_state &= ~SS_ISCONNECTED;
2166 			SOCK_UNLOCK(so);
2167 		}
2168 		break;
2169 
2170 	case SOCK_STREAM:
2171 	case SOCK_SEQPACKET:
2172 		if (so)
2173 			soisdisconnected(so);
2174 		MPASS(unp2->unp_conn == unp);
2175 		unp2->unp_conn = NULL;
2176 		if (so2)
2177 			soisdisconnected(so2);
2178 		break;
2179 	}
2180 
2181 	if (unp == unp2) {
2182 		unp_pcb_rele_notlast(unp);
2183 		if (!unp_pcb_rele(unp))
2184 			UNP_PCB_UNLOCK(unp);
2185 	} else {
2186 		if (!unp_pcb_rele(unp))
2187 			UNP_PCB_UNLOCK(unp);
2188 		if (!unp_pcb_rele(unp2))
2189 			UNP_PCB_UNLOCK(unp2);
2190 	}
2191 
2192 	if (m != NULL) {
2193 		unp_scan(m, unp_freerights);
2194 		m_freemp(m);
2195 	}
2196 }
2197 
2198 /*
2199  * unp_pcblist() walks the global list of struct unpcb's to generate a
2200  * pointer list, bumping the refcount on each unpcb.  It then copies them out
2201  * sequentially, validating the generation number on each to see if it has
2202  * been detached.  All of this is necessary because copyout() may sleep on
2203  * disk I/O.
2204  */
2205 static int
2206 unp_pcblist(SYSCTL_HANDLER_ARGS)
2207 {
2208 	struct unpcb *unp, **unp_list;
2209 	unp_gen_t gencnt;
2210 	struct xunpgen *xug;
2211 	struct unp_head *head;
2212 	struct xunpcb *xu;
2213 	u_int i;
2214 	int error, n;
2215 
2216 	switch ((intptr_t)arg1) {
2217 	case SOCK_STREAM:
2218 		head = &unp_shead;
2219 		break;
2220 
2221 	case SOCK_DGRAM:
2222 		head = &unp_dhead;
2223 		break;
2224 
2225 	case SOCK_SEQPACKET:
2226 		head = &unp_sphead;
2227 		break;
2228 
2229 	default:
2230 		panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1);
2231 	}
2232 
2233 	/*
2234 	 * The process of preparing the PCB list is too time-consuming and
2235 	 * resource-intensive to repeat twice on every request.
2236 	 */
2237 	if (req->oldptr == NULL) {
2238 		n = unp_count;
2239 		req->oldidx = 2 * (sizeof *xug)
2240 			+ (n + n/8) * sizeof(struct xunpcb);
2241 		return (0);
2242 	}
2243 
2244 	if (req->newptr != NULL)
2245 		return (EPERM);
2246 
2247 	/*
2248 	 * OK, now we're committed to doing something.
2249 	 */
2250 	xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK | M_ZERO);
2251 	UNP_LINK_RLOCK();
2252 	gencnt = unp_gencnt;
2253 	n = unp_count;
2254 	UNP_LINK_RUNLOCK();
2255 
2256 	xug->xug_len = sizeof *xug;
2257 	xug->xug_count = n;
2258 	xug->xug_gen = gencnt;
2259 	xug->xug_sogen = so_gencnt;
2260 	error = SYSCTL_OUT(req, xug, sizeof *xug);
2261 	if (error) {
2262 		free(xug, M_TEMP);
2263 		return (error);
2264 	}
2265 
2266 	unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK);
2267 
2268 	UNP_LINK_RLOCK();
2269 	for (unp = LIST_FIRST(head), i = 0; unp && i < n;
2270 	     unp = LIST_NEXT(unp, unp_link)) {
2271 		UNP_PCB_LOCK(unp);
2272 		if (unp->unp_gencnt <= gencnt) {
2273 			if (cr_cansee(req->td->td_ucred,
2274 			    unp->unp_socket->so_cred)) {
2275 				UNP_PCB_UNLOCK(unp);
2276 				continue;
2277 			}
2278 			unp_list[i++] = unp;
2279 			unp_pcb_hold(unp);
2280 		}
2281 		UNP_PCB_UNLOCK(unp);
2282 	}
2283 	UNP_LINK_RUNLOCK();
2284 	n = i;			/* In case we lost some during malloc. */
2285 
2286 	error = 0;
2287 	xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO);
2288 	for (i = 0; i < n; i++) {
2289 		unp = unp_list[i];
2290 		UNP_PCB_LOCK(unp);
2291 		if (unp_pcb_rele(unp))
2292 			continue;
2293 
2294 		if (unp->unp_gencnt <= gencnt) {
2295 			xu->xu_len = sizeof *xu;
2296 			xu->xu_unpp = (uintptr_t)unp;
2297 			/*
2298 			 * XXX - need more locking here to protect against
2299 			 * connect/disconnect races for SMP.
2300 			 */
2301 			if (unp->unp_addr != NULL)
2302 				bcopy(unp->unp_addr, &xu->xu_addr,
2303 				      unp->unp_addr->sun_len);
2304 			else
2305 				bzero(&xu->xu_addr, sizeof(xu->xu_addr));
2306 			if (unp->unp_conn != NULL &&
2307 			    unp->unp_conn->unp_addr != NULL)
2308 				bcopy(unp->unp_conn->unp_addr,
2309 				      &xu->xu_caddr,
2310 				      unp->unp_conn->unp_addr->sun_len);
2311 			else
2312 				bzero(&xu->xu_caddr, sizeof(xu->xu_caddr));
2313 			xu->unp_vnode = (uintptr_t)unp->unp_vnode;
2314 			xu->unp_conn = (uintptr_t)unp->unp_conn;
2315 			xu->xu_firstref = (uintptr_t)LIST_FIRST(&unp->unp_refs);
2316 			xu->xu_nextref = (uintptr_t)LIST_NEXT(unp, unp_reflink);
2317 			xu->unp_gencnt = unp->unp_gencnt;
2318 			sotoxsocket(unp->unp_socket, &xu->xu_socket);
2319 			UNP_PCB_UNLOCK(unp);
2320 			error = SYSCTL_OUT(req, xu, sizeof *xu);
2321 		} else {
2322 			UNP_PCB_UNLOCK(unp);
2323 		}
2324 	}
2325 	free(xu, M_TEMP);
2326 	if (!error) {
2327 		/*
2328 		 * Give the user an updated idea of our state.  If the
2329 		 * generation differs from what we told her before, she knows
2330 		 * that something happened while we were processing this
2331 		 * request, and it might be necessary to retry.
2332 		 */
2333 		xug->xug_gen = unp_gencnt;
2334 		xug->xug_sogen = so_gencnt;
2335 		xug->xug_count = unp_count;
2336 		error = SYSCTL_OUT(req, xug, sizeof *xug);
2337 	}
2338 	free(unp_list, M_TEMP);
2339 	free(xug, M_TEMP);
2340 	return (error);
2341 }
2342 
2343 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist,
2344     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2345     (void *)(intptr_t)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
2346     "List of active local datagram sockets");
2347 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist,
2348     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2349     (void *)(intptr_t)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
2350     "List of active local stream sockets");
2351 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist,
2352     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2353     (void *)(intptr_t)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb",
2354     "List of active local seqpacket sockets");
2355 
2356 static void
2357 unp_shutdown(struct unpcb *unp)
2358 {
2359 	struct unpcb *unp2;
2360 	struct socket *so;
2361 
2362 	UNP_PCB_LOCK_ASSERT(unp);
2363 
2364 	unp2 = unp->unp_conn;
2365 	if ((unp->unp_socket->so_type == SOCK_STREAM ||
2366 	    (unp->unp_socket->so_type == SOCK_SEQPACKET)) && unp2 != NULL) {
2367 		so = unp2->unp_socket;
2368 		if (so != NULL)
2369 			socantrcvmore(so);
2370 	}
2371 }
2372 
2373 static void
2374 unp_drop(struct unpcb *unp)
2375 {
2376 	struct socket *so;
2377 	struct unpcb *unp2;
2378 
2379 	/*
2380 	 * Regardless of whether the socket's peer dropped the connection
2381 	 * with this socket by aborting or disconnecting, POSIX requires
2382 	 * that ECONNRESET is returned.
2383 	 */
2384 
2385 	UNP_PCB_LOCK(unp);
2386 	so = unp->unp_socket;
2387 	if (so)
2388 		so->so_error = ECONNRESET;
2389 	if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) {
2390 		/* Last reference dropped in unp_disconnect(). */
2391 		unp_pcb_rele_notlast(unp);
2392 		unp_disconnect(unp, unp2);
2393 	} else if (!unp_pcb_rele(unp)) {
2394 		UNP_PCB_UNLOCK(unp);
2395 	}
2396 }
2397 
2398 static void
2399 unp_freerights(struct filedescent **fdep, int fdcount)
2400 {
2401 	struct file *fp;
2402 	int i;
2403 
2404 	KASSERT(fdcount > 0, ("%s: fdcount %d", __func__, fdcount));
2405 
2406 	for (i = 0; i < fdcount; i++) {
2407 		fp = fdep[i]->fde_file;
2408 		filecaps_free(&fdep[i]->fde_caps);
2409 		unp_discard(fp);
2410 	}
2411 	free(fdep[0], M_FILECAPS);
2412 }
2413 
2414 static int
2415 unp_externalize(struct mbuf *control, struct mbuf **controlp, int flags)
2416 {
2417 	struct thread *td = curthread;		/* XXX */
2418 	struct cmsghdr *cm = mtod(control, struct cmsghdr *);
2419 	int i;
2420 	int *fdp;
2421 	struct filedesc *fdesc = td->td_proc->p_fd;
2422 	struct filedescent **fdep;
2423 	void *data;
2424 	socklen_t clen = control->m_len, datalen;
2425 	int error, newfds;
2426 	u_int newlen;
2427 
2428 	UNP_LINK_UNLOCK_ASSERT();
2429 
2430 	error = 0;
2431 	if (controlp != NULL) /* controlp == NULL => free control messages */
2432 		*controlp = NULL;
2433 	while (cm != NULL) {
2434 		MPASS(clen >= sizeof(*cm) && clen >= cm->cmsg_len);
2435 
2436 		data = CMSG_DATA(cm);
2437 		datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data;
2438 		if (cm->cmsg_level == SOL_SOCKET
2439 		    && cm->cmsg_type == SCM_RIGHTS) {
2440 			newfds = datalen / sizeof(*fdep);
2441 			if (newfds == 0)
2442 				goto next;
2443 			fdep = data;
2444 
2445 			/* If we're not outputting the descriptors free them. */
2446 			if (error || controlp == NULL) {
2447 				unp_freerights(fdep, newfds);
2448 				goto next;
2449 			}
2450 			FILEDESC_XLOCK(fdesc);
2451 
2452 			/*
2453 			 * Now change each pointer to an fd in the global
2454 			 * table to an integer that is the index to the local
2455 			 * fd table entry that we set up to point to the
2456 			 * global one we are transferring.
2457 			 */
2458 			newlen = newfds * sizeof(int);
2459 			*controlp = sbcreatecontrol(NULL, newlen,
2460 			    SCM_RIGHTS, SOL_SOCKET, M_WAITOK);
2461 
2462 			fdp = (int *)
2463 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2464 			if ((error = fdallocn(td, 0, fdp, newfds))) {
2465 				FILEDESC_XUNLOCK(fdesc);
2466 				unp_freerights(fdep, newfds);
2467 				m_freem(*controlp);
2468 				*controlp = NULL;
2469 				goto next;
2470 			}
2471 			for (i = 0; i < newfds; i++, fdp++) {
2472 				_finstall(fdesc, fdep[i]->fde_file, *fdp,
2473 				    (flags & MSG_CMSG_CLOEXEC) != 0 ? O_CLOEXEC : 0,
2474 				    &fdep[i]->fde_caps);
2475 				unp_externalize_fp(fdep[i]->fde_file);
2476 			}
2477 
2478 			/*
2479 			 * The new type indicates that the mbuf data refers to
2480 			 * kernel resources that may need to be released before
2481 			 * the mbuf is freed.
2482 			 */
2483 			m_chtype(*controlp, MT_EXTCONTROL);
2484 			FILEDESC_XUNLOCK(fdesc);
2485 			free(fdep[0], M_FILECAPS);
2486 		} else {
2487 			/* We can just copy anything else across. */
2488 			if (error || controlp == NULL)
2489 				goto next;
2490 			*controlp = sbcreatecontrol(NULL, datalen,
2491 			    cm->cmsg_type, cm->cmsg_level, M_WAITOK);
2492 			bcopy(data,
2493 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *)),
2494 			    datalen);
2495 		}
2496 		controlp = &(*controlp)->m_next;
2497 
2498 next:
2499 		if (CMSG_SPACE(datalen) < clen) {
2500 			clen -= CMSG_SPACE(datalen);
2501 			cm = (struct cmsghdr *)
2502 			    ((caddr_t)cm + CMSG_SPACE(datalen));
2503 		} else {
2504 			clen = 0;
2505 			cm = NULL;
2506 		}
2507 	}
2508 
2509 	m_freem(control);
2510 	return (error);
2511 }
2512 
2513 static void
2514 unp_zone_change(void *tag)
2515 {
2516 
2517 	uma_zone_set_max(unp_zone, maxsockets);
2518 }
2519 
2520 #ifdef INVARIANTS
2521 static void
2522 unp_zdtor(void *mem, int size __unused, void *arg __unused)
2523 {
2524 	struct unpcb *unp;
2525 
2526 	unp = mem;
2527 
2528 	KASSERT(LIST_EMPTY(&unp->unp_refs),
2529 	    ("%s: unpcb %p has lingering refs", __func__, unp));
2530 	KASSERT(unp->unp_socket == NULL,
2531 	    ("%s: unpcb %p has socket backpointer", __func__, unp));
2532 	KASSERT(unp->unp_vnode == NULL,
2533 	    ("%s: unpcb %p has vnode references", __func__, unp));
2534 	KASSERT(unp->unp_conn == NULL,
2535 	    ("%s: unpcb %p is still connected", __func__, unp));
2536 	KASSERT(unp->unp_addr == NULL,
2537 	    ("%s: unpcb %p has leaked addr", __func__, unp));
2538 }
2539 #endif
2540 
2541 static void
2542 unp_init(void *arg __unused)
2543 {
2544 	uma_dtor dtor;
2545 
2546 #ifdef INVARIANTS
2547 	dtor = unp_zdtor;
2548 #else
2549 	dtor = NULL;
2550 #endif
2551 	unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, dtor,
2552 	    NULL, NULL, UMA_ALIGN_CACHE, 0);
2553 	uma_zone_set_max(unp_zone, maxsockets);
2554 	uma_zone_set_warning(unp_zone, "kern.ipc.maxsockets limit reached");
2555 	EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change,
2556 	    NULL, EVENTHANDLER_PRI_ANY);
2557 	LIST_INIT(&unp_dhead);
2558 	LIST_INIT(&unp_shead);
2559 	LIST_INIT(&unp_sphead);
2560 	SLIST_INIT(&unp_defers);
2561 	TIMEOUT_TASK_INIT(taskqueue_thread, &unp_gc_task, 0, unp_gc, NULL);
2562 	TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL);
2563 	UNP_LINK_LOCK_INIT();
2564 	UNP_DEFERRED_LOCK_INIT();
2565 	unp_vp_mtxpool = mtx_pool_create("unp vp mtxpool", 32, MTX_DEF);
2566 }
2567 SYSINIT(unp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, unp_init, NULL);
2568 
2569 static void
2570 unp_internalize_cleanup_rights(struct mbuf *control)
2571 {
2572 	struct cmsghdr *cp;
2573 	struct mbuf *m;
2574 	void *data;
2575 	socklen_t datalen;
2576 
2577 	for (m = control; m != NULL; m = m->m_next) {
2578 		cp = mtod(m, struct cmsghdr *);
2579 		if (cp->cmsg_level != SOL_SOCKET ||
2580 		    cp->cmsg_type != SCM_RIGHTS)
2581 			continue;
2582 		data = CMSG_DATA(cp);
2583 		datalen = (caddr_t)cp + cp->cmsg_len - (caddr_t)data;
2584 		unp_freerights(data, datalen / sizeof(struct filedesc *));
2585 	}
2586 }
2587 
2588 static int
2589 unp_internalize(struct mbuf **controlp, struct thread *td,
2590     struct mbuf **clast, u_int *space, u_int *mbcnt)
2591 {
2592 	struct mbuf *control, **initial_controlp;
2593 	struct proc *p;
2594 	struct filedesc *fdesc;
2595 	struct bintime *bt;
2596 	struct cmsghdr *cm;
2597 	struct cmsgcred *cmcred;
2598 	struct filedescent *fde, **fdep, *fdev;
2599 	struct file *fp;
2600 	struct timeval *tv;
2601 	struct timespec *ts;
2602 	void *data;
2603 	socklen_t clen, datalen;
2604 	int i, j, error, *fdp, oldfds;
2605 	u_int newlen;
2606 
2607 	MPASS((*controlp)->m_next == NULL); /* COMPAT_OLDSOCK may violate */
2608 	UNP_LINK_UNLOCK_ASSERT();
2609 
2610 	p = td->td_proc;
2611 	fdesc = p->p_fd;
2612 	error = 0;
2613 	control = *controlp;
2614 	*controlp = NULL;
2615 	initial_controlp = controlp;
2616 	for (clen = control->m_len, cm = mtod(control, struct cmsghdr *),
2617 	    data = CMSG_DATA(cm);
2618 
2619 	    clen >= sizeof(*cm) && cm->cmsg_level == SOL_SOCKET &&
2620 	    clen >= cm->cmsg_len && cm->cmsg_len >= sizeof(*cm) &&
2621 	    (char *)cm + cm->cmsg_len >= (char *)data;
2622 
2623 	    clen -= min(CMSG_SPACE(datalen), clen),
2624 	    cm = (struct cmsghdr *) ((char *)cm + CMSG_SPACE(datalen)),
2625 	    data = CMSG_DATA(cm)) {
2626 		datalen = (char *)cm + cm->cmsg_len - (char *)data;
2627 		switch (cm->cmsg_type) {
2628 		case SCM_CREDS:
2629 			*controlp = sbcreatecontrol(NULL, sizeof(*cmcred),
2630 			    SCM_CREDS, SOL_SOCKET, M_WAITOK);
2631 			cmcred = (struct cmsgcred *)
2632 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2633 			cmcred->cmcred_pid = p->p_pid;
2634 			cmcred->cmcred_uid = td->td_ucred->cr_ruid;
2635 			cmcred->cmcred_gid = td->td_ucred->cr_rgid;
2636 			cmcred->cmcred_euid = td->td_ucred->cr_uid;
2637 			cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups,
2638 			    CMGROUP_MAX);
2639 			for (i = 0; i < cmcred->cmcred_ngroups; i++)
2640 				cmcred->cmcred_groups[i] =
2641 				    td->td_ucred->cr_groups[i];
2642 			break;
2643 
2644 		case SCM_RIGHTS:
2645 			oldfds = datalen / sizeof (int);
2646 			if (oldfds == 0)
2647 				continue;
2648 			/* On some machines sizeof pointer is bigger than
2649 			 * sizeof int, so we need to check if data fits into
2650 			 * single mbuf.  We could allocate several mbufs, and
2651 			 * unp_externalize() should even properly handle that.
2652 			 * But it is not worth to complicate the code for an
2653 			 * insane scenario of passing over 200 file descriptors
2654 			 * at once.
2655 			 */
2656 			newlen = oldfds * sizeof(fdep[0]);
2657 			if (CMSG_SPACE(newlen) > MCLBYTES) {
2658 				error = EMSGSIZE;
2659 				goto out;
2660 			}
2661 			/*
2662 			 * Check that all the FDs passed in refer to legal
2663 			 * files.  If not, reject the entire operation.
2664 			 */
2665 			fdp = data;
2666 			FILEDESC_SLOCK(fdesc);
2667 			for (i = 0; i < oldfds; i++, fdp++) {
2668 				fp = fget_noref(fdesc, *fdp);
2669 				if (fp == NULL) {
2670 					FILEDESC_SUNLOCK(fdesc);
2671 					error = EBADF;
2672 					goto out;
2673 				}
2674 				if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) {
2675 					FILEDESC_SUNLOCK(fdesc);
2676 					error = EOPNOTSUPP;
2677 					goto out;
2678 				}
2679 			}
2680 
2681 			/*
2682 			 * Now replace the integer FDs with pointers to the
2683 			 * file structure and capability rights.
2684 			 */
2685 			*controlp = sbcreatecontrol(NULL, newlen,
2686 			    SCM_RIGHTS, SOL_SOCKET, M_WAITOK);
2687 			fdp = data;
2688 			for (i = 0; i < oldfds; i++, fdp++) {
2689 				if (!fhold(fdesc->fd_ofiles[*fdp].fde_file)) {
2690 					fdp = data;
2691 					for (j = 0; j < i; j++, fdp++) {
2692 						fdrop(fdesc->fd_ofiles[*fdp].
2693 						    fde_file, td);
2694 					}
2695 					FILEDESC_SUNLOCK(fdesc);
2696 					error = EBADF;
2697 					goto out;
2698 				}
2699 			}
2700 			fdp = data;
2701 			fdep = (struct filedescent **)
2702 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2703 			fdev = malloc(sizeof(*fdev) * oldfds, M_FILECAPS,
2704 			    M_WAITOK);
2705 			for (i = 0; i < oldfds; i++, fdev++, fdp++) {
2706 				fde = &fdesc->fd_ofiles[*fdp];
2707 				fdep[i] = fdev;
2708 				fdep[i]->fde_file = fde->fde_file;
2709 				filecaps_copy(&fde->fde_caps,
2710 				    &fdep[i]->fde_caps, true);
2711 				unp_internalize_fp(fdep[i]->fde_file);
2712 			}
2713 			FILEDESC_SUNLOCK(fdesc);
2714 			break;
2715 
2716 		case SCM_TIMESTAMP:
2717 			*controlp = sbcreatecontrol(NULL, sizeof(*tv),
2718 			    SCM_TIMESTAMP, SOL_SOCKET, M_WAITOK);
2719 			tv = (struct timeval *)
2720 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2721 			microtime(tv);
2722 			break;
2723 
2724 		case SCM_BINTIME:
2725 			*controlp = sbcreatecontrol(NULL, sizeof(*bt),
2726 			    SCM_BINTIME, SOL_SOCKET, M_WAITOK);
2727 			bt = (struct bintime *)
2728 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2729 			bintime(bt);
2730 			break;
2731 
2732 		case SCM_REALTIME:
2733 			*controlp = sbcreatecontrol(NULL, sizeof(*ts),
2734 			    SCM_REALTIME, SOL_SOCKET, M_WAITOK);
2735 			ts = (struct timespec *)
2736 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2737 			nanotime(ts);
2738 			break;
2739 
2740 		case SCM_MONOTONIC:
2741 			*controlp = sbcreatecontrol(NULL, sizeof(*ts),
2742 			    SCM_MONOTONIC, SOL_SOCKET, M_WAITOK);
2743 			ts = (struct timespec *)
2744 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2745 			nanouptime(ts);
2746 			break;
2747 
2748 		default:
2749 			error = EINVAL;
2750 			goto out;
2751 		}
2752 
2753 		if (space != NULL) {
2754 			*space += (*controlp)->m_len;
2755 			*mbcnt += MSIZE;
2756 			if ((*controlp)->m_flags & M_EXT)
2757 				*mbcnt += (*controlp)->m_ext.ext_size;
2758 			*clast = *controlp;
2759 		}
2760 		controlp = &(*controlp)->m_next;
2761 	}
2762 	if (clen > 0)
2763 		error = EINVAL;
2764 
2765 out:
2766 	if (error != 0 && initial_controlp != NULL)
2767 		unp_internalize_cleanup_rights(*initial_controlp);
2768 	m_freem(control);
2769 	return (error);
2770 }
2771 
2772 static struct mbuf *
2773 unp_addsockcred(struct thread *td, struct mbuf *control, int mode,
2774     struct mbuf **clast, u_int *space, u_int *mbcnt)
2775 {
2776 	struct mbuf *m, *n, *n_prev;
2777 	const struct cmsghdr *cm;
2778 	int ngroups, i, cmsgtype;
2779 	size_t ctrlsz;
2780 
2781 	ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX);
2782 	if (mode & UNP_WANTCRED_ALWAYS) {
2783 		ctrlsz = SOCKCRED2SIZE(ngroups);
2784 		cmsgtype = SCM_CREDS2;
2785 	} else {
2786 		ctrlsz = SOCKCREDSIZE(ngroups);
2787 		cmsgtype = SCM_CREDS;
2788 	}
2789 
2790 	m = sbcreatecontrol(NULL, ctrlsz, cmsgtype, SOL_SOCKET, M_NOWAIT);
2791 	if (m == NULL)
2792 		return (control);
2793 	MPASS((m->m_flags & M_EXT) == 0 && m->m_next == NULL);
2794 
2795 	if (mode & UNP_WANTCRED_ALWAYS) {
2796 		struct sockcred2 *sc;
2797 
2798 		sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *));
2799 		sc->sc_version = 0;
2800 		sc->sc_pid = td->td_proc->p_pid;
2801 		sc->sc_uid = td->td_ucred->cr_ruid;
2802 		sc->sc_euid = td->td_ucred->cr_uid;
2803 		sc->sc_gid = td->td_ucred->cr_rgid;
2804 		sc->sc_egid = td->td_ucred->cr_gid;
2805 		sc->sc_ngroups = ngroups;
2806 		for (i = 0; i < sc->sc_ngroups; i++)
2807 			sc->sc_groups[i] = td->td_ucred->cr_groups[i];
2808 	} else {
2809 		struct sockcred *sc;
2810 
2811 		sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *));
2812 		sc->sc_uid = td->td_ucred->cr_ruid;
2813 		sc->sc_euid = td->td_ucred->cr_uid;
2814 		sc->sc_gid = td->td_ucred->cr_rgid;
2815 		sc->sc_egid = td->td_ucred->cr_gid;
2816 		sc->sc_ngroups = ngroups;
2817 		for (i = 0; i < sc->sc_ngroups; i++)
2818 			sc->sc_groups[i] = td->td_ucred->cr_groups[i];
2819 	}
2820 
2821 	/*
2822 	 * Unlink SCM_CREDS control messages (struct cmsgcred), since just
2823 	 * created SCM_CREDS control message (struct sockcred) has another
2824 	 * format.
2825 	 */
2826 	if (control != NULL && cmsgtype == SCM_CREDS)
2827 		for (n = control, n_prev = NULL; n != NULL;) {
2828 			cm = mtod(n, struct cmsghdr *);
2829     			if (cm->cmsg_level == SOL_SOCKET &&
2830 			    cm->cmsg_type == SCM_CREDS) {
2831     				if (n_prev == NULL)
2832 					control = n->m_next;
2833 				else
2834 					n_prev->m_next = n->m_next;
2835 				if (space != NULL) {
2836 					MPASS(*space >= n->m_len);
2837 					*space -= n->m_len;
2838 					MPASS(*mbcnt >= MSIZE);
2839 					*mbcnt -= MSIZE;
2840 					if (n->m_flags & M_EXT) {
2841 						MPASS(*mbcnt >=
2842 						    n->m_ext.ext_size);
2843 						*mbcnt -= n->m_ext.ext_size;
2844 					}
2845 					MPASS(clast);
2846 					if (*clast == n) {
2847 						MPASS(n->m_next == NULL);
2848 						if (n_prev == NULL)
2849 							*clast = m;
2850 						else
2851 							*clast = n_prev;
2852 					}
2853 				}
2854 				n = m_free(n);
2855 			} else {
2856 				n_prev = n;
2857 				n = n->m_next;
2858 			}
2859 		}
2860 
2861 	/* Prepend it to the head. */
2862 	m->m_next = control;
2863 	if (space != NULL) {
2864 		*space += m->m_len;
2865 		*mbcnt += MSIZE;
2866 		if (control == NULL)
2867 			*clast = m;
2868 	}
2869 	return (m);
2870 }
2871 
2872 static struct unpcb *
2873 fptounp(struct file *fp)
2874 {
2875 	struct socket *so;
2876 
2877 	if (fp->f_type != DTYPE_SOCKET)
2878 		return (NULL);
2879 	if ((so = fp->f_data) == NULL)
2880 		return (NULL);
2881 	if (so->so_proto->pr_domain != &localdomain)
2882 		return (NULL);
2883 	return sotounpcb(so);
2884 }
2885 
2886 static void
2887 unp_discard(struct file *fp)
2888 {
2889 	struct unp_defer *dr;
2890 
2891 	if (unp_externalize_fp(fp)) {
2892 		dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK);
2893 		dr->ud_fp = fp;
2894 		UNP_DEFERRED_LOCK();
2895 		SLIST_INSERT_HEAD(&unp_defers, dr, ud_link);
2896 		UNP_DEFERRED_UNLOCK();
2897 		atomic_add_int(&unp_defers_count, 1);
2898 		taskqueue_enqueue(taskqueue_thread, &unp_defer_task);
2899 	} else
2900 		closef_nothread(fp);
2901 }
2902 
2903 static void
2904 unp_process_defers(void *arg __unused, int pending)
2905 {
2906 	struct unp_defer *dr;
2907 	SLIST_HEAD(, unp_defer) drl;
2908 	int count;
2909 
2910 	SLIST_INIT(&drl);
2911 	for (;;) {
2912 		UNP_DEFERRED_LOCK();
2913 		if (SLIST_FIRST(&unp_defers) == NULL) {
2914 			UNP_DEFERRED_UNLOCK();
2915 			break;
2916 		}
2917 		SLIST_SWAP(&unp_defers, &drl, unp_defer);
2918 		UNP_DEFERRED_UNLOCK();
2919 		count = 0;
2920 		while ((dr = SLIST_FIRST(&drl)) != NULL) {
2921 			SLIST_REMOVE_HEAD(&drl, ud_link);
2922 			closef_nothread(dr->ud_fp);
2923 			free(dr, M_TEMP);
2924 			count++;
2925 		}
2926 		atomic_add_int(&unp_defers_count, -count);
2927 	}
2928 }
2929 
2930 static void
2931 unp_internalize_fp(struct file *fp)
2932 {
2933 	struct unpcb *unp;
2934 
2935 	UNP_LINK_WLOCK();
2936 	if ((unp = fptounp(fp)) != NULL) {
2937 		unp->unp_file = fp;
2938 		unp->unp_msgcount++;
2939 	}
2940 	unp_rights++;
2941 	UNP_LINK_WUNLOCK();
2942 }
2943 
2944 static int
2945 unp_externalize_fp(struct file *fp)
2946 {
2947 	struct unpcb *unp;
2948 	int ret;
2949 
2950 	UNP_LINK_WLOCK();
2951 	if ((unp = fptounp(fp)) != NULL) {
2952 		unp->unp_msgcount--;
2953 		ret = 1;
2954 	} else
2955 		ret = 0;
2956 	unp_rights--;
2957 	UNP_LINK_WUNLOCK();
2958 	return (ret);
2959 }
2960 
2961 /*
2962  * unp_defer indicates whether additional work has been defered for a future
2963  * pass through unp_gc().  It is thread local and does not require explicit
2964  * synchronization.
2965  */
2966 static int	unp_marked;
2967 
2968 static void
2969 unp_remove_dead_ref(struct filedescent **fdep, int fdcount)
2970 {
2971 	struct unpcb *unp;
2972 	struct file *fp;
2973 	int i;
2974 
2975 	/*
2976 	 * This function can only be called from the gc task.
2977 	 */
2978 	KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0,
2979 	    ("%s: not on gc callout", __func__));
2980 	UNP_LINK_LOCK_ASSERT();
2981 
2982 	for (i = 0; i < fdcount; i++) {
2983 		fp = fdep[i]->fde_file;
2984 		if ((unp = fptounp(fp)) == NULL)
2985 			continue;
2986 		if ((unp->unp_gcflag & UNPGC_DEAD) == 0)
2987 			continue;
2988 		unp->unp_gcrefs--;
2989 	}
2990 }
2991 
2992 static void
2993 unp_restore_undead_ref(struct filedescent **fdep, int fdcount)
2994 {
2995 	struct unpcb *unp;
2996 	struct file *fp;
2997 	int i;
2998 
2999 	/*
3000 	 * This function can only be called from the gc task.
3001 	 */
3002 	KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0,
3003 	    ("%s: not on gc callout", __func__));
3004 	UNP_LINK_LOCK_ASSERT();
3005 
3006 	for (i = 0; i < fdcount; i++) {
3007 		fp = fdep[i]->fde_file;
3008 		if ((unp = fptounp(fp)) == NULL)
3009 			continue;
3010 		if ((unp->unp_gcflag & UNPGC_DEAD) == 0)
3011 			continue;
3012 		unp->unp_gcrefs++;
3013 		unp_marked++;
3014 	}
3015 }
3016 
3017 static void
3018 unp_scan_socket(struct socket *so, void (*op)(struct filedescent **, int))
3019 {
3020 	struct sockbuf *sb;
3021 
3022 	SOCK_LOCK_ASSERT(so);
3023 
3024 	if (sotounpcb(so)->unp_gcflag & UNPGC_IGNORE_RIGHTS)
3025 		return;
3026 
3027 	SOCK_RECVBUF_LOCK(so);
3028 	switch (so->so_type) {
3029 	case SOCK_DGRAM:
3030 		unp_scan(STAILQ_FIRST(&so->so_rcv.uxdg_mb), op);
3031 		unp_scan(so->so_rcv.uxdg_peeked, op);
3032 		TAILQ_FOREACH(sb, &so->so_rcv.uxdg_conns, uxdg_clist)
3033 			unp_scan(STAILQ_FIRST(&sb->uxdg_mb), op);
3034 		break;
3035 	case SOCK_STREAM:
3036 	case SOCK_SEQPACKET:
3037 		unp_scan(so->so_rcv.sb_mb, op);
3038 		break;
3039 	}
3040 	SOCK_RECVBUF_UNLOCK(so);
3041 }
3042 
3043 static void
3044 unp_gc_scan(struct unpcb *unp, void (*op)(struct filedescent **, int))
3045 {
3046 	struct socket *so, *soa;
3047 
3048 	so = unp->unp_socket;
3049 	SOCK_LOCK(so);
3050 	if (SOLISTENING(so)) {
3051 		/*
3052 		 * Mark all sockets in our accept queue.
3053 		 */
3054 		TAILQ_FOREACH(soa, &so->sol_comp, so_list)
3055 			unp_scan_socket(soa, op);
3056 	} else {
3057 		/*
3058 		 * Mark all sockets we reference with RIGHTS.
3059 		 */
3060 		unp_scan_socket(so, op);
3061 	}
3062 	SOCK_UNLOCK(so);
3063 }
3064 
3065 static int unp_recycled;
3066 SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0,
3067     "Number of unreachable sockets claimed by the garbage collector.");
3068 
3069 static int unp_taskcount;
3070 SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0,
3071     "Number of times the garbage collector has run.");
3072 
3073 SYSCTL_UINT(_net_local, OID_AUTO, sockcount, CTLFLAG_RD, &unp_count, 0,
3074     "Number of active local sockets.");
3075 
3076 static void
3077 unp_gc(__unused void *arg, int pending)
3078 {
3079 	struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead,
3080 				    NULL };
3081 	struct unp_head **head;
3082 	struct unp_head unp_deadhead;	/* List of potentially-dead sockets. */
3083 	struct file *f, **unref;
3084 	struct unpcb *unp, *unptmp;
3085 	int i, total, unp_unreachable;
3086 
3087 	LIST_INIT(&unp_deadhead);
3088 	unp_taskcount++;
3089 	UNP_LINK_RLOCK();
3090 	/*
3091 	 * First determine which sockets may be in cycles.
3092 	 */
3093 	unp_unreachable = 0;
3094 
3095 	for (head = heads; *head != NULL; head++)
3096 		LIST_FOREACH(unp, *head, unp_link) {
3097 			KASSERT((unp->unp_gcflag & ~UNPGC_IGNORE_RIGHTS) == 0,
3098 			    ("%s: unp %p has unexpected gc flags 0x%x",
3099 			    __func__, unp, (unsigned int)unp->unp_gcflag));
3100 
3101 			f = unp->unp_file;
3102 
3103 			/*
3104 			 * Check for an unreachable socket potentially in a
3105 			 * cycle.  It must be in a queue as indicated by
3106 			 * msgcount, and this must equal the file reference
3107 			 * count.  Note that when msgcount is 0 the file is
3108 			 * NULL.
3109 			 */
3110 			if (f != NULL && unp->unp_msgcount != 0 &&
3111 			    refcount_load(&f->f_count) == unp->unp_msgcount) {
3112 				LIST_INSERT_HEAD(&unp_deadhead, unp, unp_dead);
3113 				unp->unp_gcflag |= UNPGC_DEAD;
3114 				unp->unp_gcrefs = unp->unp_msgcount;
3115 				unp_unreachable++;
3116 			}
3117 		}
3118 
3119 	/*
3120 	 * Scan all sockets previously marked as potentially being in a cycle
3121 	 * and remove the references each socket holds on any UNPGC_DEAD
3122 	 * sockets in its queue.  After this step, all remaining references on
3123 	 * sockets marked UNPGC_DEAD should not be part of any cycle.
3124 	 */
3125 	LIST_FOREACH(unp, &unp_deadhead, unp_dead)
3126 		unp_gc_scan(unp, unp_remove_dead_ref);
3127 
3128 	/*
3129 	 * If a socket still has a non-negative refcount, it cannot be in a
3130 	 * cycle.  In this case increment refcount of all children iteratively.
3131 	 * Stop the scan once we do a complete loop without discovering
3132 	 * a new reachable socket.
3133 	 */
3134 	do {
3135 		unp_marked = 0;
3136 		LIST_FOREACH_SAFE(unp, &unp_deadhead, unp_dead, unptmp)
3137 			if (unp->unp_gcrefs > 0) {
3138 				unp->unp_gcflag &= ~UNPGC_DEAD;
3139 				LIST_REMOVE(unp, unp_dead);
3140 				KASSERT(unp_unreachable > 0,
3141 				    ("%s: unp_unreachable underflow.",
3142 				    __func__));
3143 				unp_unreachable--;
3144 				unp_gc_scan(unp, unp_restore_undead_ref);
3145 			}
3146 	} while (unp_marked);
3147 
3148 	UNP_LINK_RUNLOCK();
3149 
3150 	if (unp_unreachable == 0)
3151 		return;
3152 
3153 	/*
3154 	 * Allocate space for a local array of dead unpcbs.
3155 	 * TODO: can this path be simplified by instead using the local
3156 	 * dead list at unp_deadhead, after taking out references
3157 	 * on the file object and/or unpcb and dropping the link lock?
3158 	 */
3159 	unref = malloc(unp_unreachable * sizeof(struct file *),
3160 	    M_TEMP, M_WAITOK);
3161 
3162 	/*
3163 	 * Iterate looking for sockets which have been specifically marked
3164 	 * as unreachable and store them locally.
3165 	 */
3166 	UNP_LINK_RLOCK();
3167 	total = 0;
3168 	LIST_FOREACH(unp, &unp_deadhead, unp_dead) {
3169 		KASSERT((unp->unp_gcflag & UNPGC_DEAD) != 0,
3170 		    ("%s: unp %p not marked UNPGC_DEAD", __func__, unp));
3171 		unp->unp_gcflag &= ~UNPGC_DEAD;
3172 		f = unp->unp_file;
3173 		if (unp->unp_msgcount == 0 || f == NULL ||
3174 		    refcount_load(&f->f_count) != unp->unp_msgcount ||
3175 		    !fhold(f))
3176 			continue;
3177 		unref[total++] = f;
3178 		KASSERT(total <= unp_unreachable,
3179 		    ("%s: incorrect unreachable count.", __func__));
3180 	}
3181 	UNP_LINK_RUNLOCK();
3182 
3183 	/*
3184 	 * Now flush all sockets, free'ing rights.  This will free the
3185 	 * struct files associated with these sockets but leave each socket
3186 	 * with one remaining ref.
3187 	 */
3188 	for (i = 0; i < total; i++) {
3189 		struct socket *so;
3190 
3191 		so = unref[i]->f_data;
3192 		CURVNET_SET(so->so_vnet);
3193 		socantrcvmore(so);
3194 		unp_dispose(so);
3195 		CURVNET_RESTORE();
3196 	}
3197 
3198 	/*
3199 	 * And finally release the sockets so they can be reclaimed.
3200 	 */
3201 	for (i = 0; i < total; i++)
3202 		fdrop(unref[i], NULL);
3203 	unp_recycled += total;
3204 	free(unref, M_TEMP);
3205 }
3206 
3207 /*
3208  * Synchronize against unp_gc, which can trip over data as we are freeing it.
3209  */
3210 static void
3211 unp_dispose(struct socket *so)
3212 {
3213 	struct sockbuf *sb;
3214 	struct unpcb *unp;
3215 	struct mbuf *m;
3216 	int error __diagused;
3217 
3218 	MPASS(!SOLISTENING(so));
3219 
3220 	unp = sotounpcb(so);
3221 	UNP_LINK_WLOCK();
3222 	unp->unp_gcflag |= UNPGC_IGNORE_RIGHTS;
3223 	UNP_LINK_WUNLOCK();
3224 
3225 	/*
3226 	 * Grab our special mbufs before calling sbrelease().
3227 	 */
3228 	error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR);
3229 	MPASS(!error);
3230 	SOCK_RECVBUF_LOCK(so);
3231 	switch (so->so_type) {
3232 	case SOCK_DGRAM:
3233 		while ((sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) != NULL) {
3234 			STAILQ_CONCAT(&so->so_rcv.uxdg_mb, &sb->uxdg_mb);
3235 			TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist);
3236 			/* Note: socket of sb may reconnect. */
3237 			sb->uxdg_cc = sb->uxdg_ctl = sb->uxdg_mbcnt = 0;
3238 		}
3239 		sb = &so->so_rcv;
3240 		if (sb->uxdg_peeked != NULL) {
3241 			STAILQ_INSERT_HEAD(&sb->uxdg_mb, sb->uxdg_peeked,
3242 			    m_stailqpkt);
3243 			sb->uxdg_peeked = NULL;
3244 		}
3245 		m = STAILQ_FIRST(&sb->uxdg_mb);
3246 		STAILQ_INIT(&sb->uxdg_mb);
3247 		/* XXX: our shortened sbrelease() */
3248 		(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
3249 		    RLIM_INFINITY);
3250 		/*
3251 		 * XXXGL Mark sb with SBS_CANTRCVMORE.  This is needed to
3252 		 * prevent uipc_sosend_dgram() or unp_disconnect() adding more
3253 		 * data to the socket.
3254 		 * We came here either through shutdown(2) or from the final
3255 		 * sofree().  The sofree() case is simple as it guarantees
3256 		 * that no more sends will happen, however we can race with
3257 		 * unp_disconnect() from our peer.  The shutdown(2) case is
3258 		 * more exotic.  It would call into unp_dispose() only if
3259 		 * socket is SS_ISCONNECTED.  This is possible if we did
3260 		 * connect(2) on this socket and we also had it bound with
3261 		 * bind(2) and receive connections from other sockets.
3262 		 * Because uipc_shutdown() violates POSIX (see comment
3263 		 * there) we will end up here shutting down our receive side.
3264 		 * Of course this will have affect not only on the peer we
3265 		 * connect(2)ed to, but also on all of the peers who had
3266 		 * connect(2)ed to us.  Their sends would end up with ENOBUFS.
3267 		 */
3268 		sb->sb_state |= SBS_CANTRCVMORE;
3269 		break;
3270 	case SOCK_STREAM:
3271 	case SOCK_SEQPACKET:
3272 		sb = &so->so_rcv;
3273 		m = sbcut_locked(sb, sb->sb_ccc);
3274 		KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0,
3275 		    ("%s: ccc %u mb %p mbcnt %u", __func__,
3276 		    sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt));
3277 		sbrelease_locked(so, SO_RCV);
3278 		break;
3279 	}
3280 	SOCK_RECVBUF_UNLOCK(so);
3281 	SOCK_IO_RECV_UNLOCK(so);
3282 
3283 	if (m != NULL) {
3284 		unp_scan(m, unp_freerights);
3285 		m_freemp(m);
3286 	}
3287 }
3288 
3289 static void
3290 unp_scan(struct mbuf *m0, void (*op)(struct filedescent **, int))
3291 {
3292 	struct mbuf *m;
3293 	struct cmsghdr *cm;
3294 	void *data;
3295 	socklen_t clen, datalen;
3296 
3297 	while (m0 != NULL) {
3298 		for (m = m0; m; m = m->m_next) {
3299 			if (m->m_type != MT_CONTROL)
3300 				continue;
3301 
3302 			cm = mtod(m, struct cmsghdr *);
3303 			clen = m->m_len;
3304 
3305 			while (cm != NULL) {
3306 				if (sizeof(*cm) > clen || cm->cmsg_len > clen)
3307 					break;
3308 
3309 				data = CMSG_DATA(cm);
3310 				datalen = (caddr_t)cm + cm->cmsg_len
3311 				    - (caddr_t)data;
3312 
3313 				if (cm->cmsg_level == SOL_SOCKET &&
3314 				    cm->cmsg_type == SCM_RIGHTS) {
3315 					(*op)(data, datalen /
3316 					    sizeof(struct filedescent *));
3317 				}
3318 
3319 				if (CMSG_SPACE(datalen) < clen) {
3320 					clen -= CMSG_SPACE(datalen);
3321 					cm = (struct cmsghdr *)
3322 					    ((caddr_t)cm + CMSG_SPACE(datalen));
3323 				} else {
3324 					clen = 0;
3325 					cm = NULL;
3326 				}
3327 			}
3328 		}
3329 		m0 = m0->m_nextpkt;
3330 	}
3331 }
3332 
3333 /*
3334  * Definitions of protocols supported in the LOCAL domain.
3335  */
3336 static struct protosw streamproto = {
3337 	.pr_type =		SOCK_STREAM,
3338 	.pr_flags =		PR_CONNREQUIRED | PR_WANTRCVD | PR_CAPATTACH,
3339 	.pr_ctloutput =		&uipc_ctloutput,
3340 	.pr_abort = 		uipc_abort,
3341 	.pr_accept =		uipc_peeraddr,
3342 	.pr_attach =		uipc_attach,
3343 	.pr_bind =		uipc_bind,
3344 	.pr_bindat =		uipc_bindat,
3345 	.pr_connect =		uipc_connect,
3346 	.pr_connectat =		uipc_connectat,
3347 	.pr_connect2 =		uipc_connect2,
3348 	.pr_detach =		uipc_detach,
3349 	.pr_disconnect =	uipc_disconnect,
3350 	.pr_listen =		uipc_listen,
3351 	.pr_peeraddr =		uipc_peeraddr,
3352 	.pr_rcvd =		uipc_rcvd,
3353 	.pr_send =		uipc_send,
3354 	.pr_ready =		uipc_ready,
3355 	.pr_sense =		uipc_sense,
3356 	.pr_shutdown =		uipc_shutdown,
3357 	.pr_sockaddr =		uipc_sockaddr,
3358 	.pr_soreceive =		soreceive_generic,
3359 	.pr_close =		uipc_close,
3360 };
3361 
3362 static struct protosw dgramproto = {
3363 	.pr_type =		SOCK_DGRAM,
3364 	.pr_flags =		PR_ATOMIC | PR_ADDR | PR_CAPATTACH | PR_SOCKBUF,
3365 	.pr_ctloutput =		&uipc_ctloutput,
3366 	.pr_abort = 		uipc_abort,
3367 	.pr_accept =		uipc_peeraddr,
3368 	.pr_attach =		uipc_attach,
3369 	.pr_bind =		uipc_bind,
3370 	.pr_bindat =		uipc_bindat,
3371 	.pr_connect =		uipc_connect,
3372 	.pr_connectat =		uipc_connectat,
3373 	.pr_connect2 =		uipc_connect2,
3374 	.pr_detach =		uipc_detach,
3375 	.pr_disconnect =	uipc_disconnect,
3376 	.pr_peeraddr =		uipc_peeraddr,
3377 	.pr_sosend =		uipc_sosend_dgram,
3378 	.pr_sense =		uipc_sense,
3379 	.pr_shutdown =		uipc_shutdown,
3380 	.pr_sockaddr =		uipc_sockaddr,
3381 	.pr_soreceive =		uipc_soreceive_dgram,
3382 	.pr_close =		uipc_close,
3383 };
3384 
3385 static struct protosw seqpacketproto = {
3386 	.pr_type =		SOCK_SEQPACKET,
3387 	/*
3388 	 * XXXRW: For now, PR_ADDR because soreceive will bump into them
3389 	 * due to our use of sbappendaddr.  A new sbappend variants is needed
3390 	 * that supports both atomic record writes and control data.
3391 	 */
3392 	.pr_flags =		PR_ADDR | PR_ATOMIC | PR_CONNREQUIRED |
3393 				PR_WANTRCVD | PR_CAPATTACH,
3394 	.pr_ctloutput =		&uipc_ctloutput,
3395 	.pr_abort =		uipc_abort,
3396 	.pr_accept =		uipc_peeraddr,
3397 	.pr_attach =		uipc_attach,
3398 	.pr_bind =		uipc_bind,
3399 	.pr_bindat =		uipc_bindat,
3400 	.pr_connect =		uipc_connect,
3401 	.pr_connectat =		uipc_connectat,
3402 	.pr_connect2 =		uipc_connect2,
3403 	.pr_detach =		uipc_detach,
3404 	.pr_disconnect =	uipc_disconnect,
3405 	.pr_listen =		uipc_listen,
3406 	.pr_peeraddr =		uipc_peeraddr,
3407 	.pr_rcvd =		uipc_rcvd,
3408 	.pr_send =		uipc_send,
3409 	.pr_sense =		uipc_sense,
3410 	.pr_shutdown =		uipc_shutdown,
3411 	.pr_sockaddr =		uipc_sockaddr,
3412 	.pr_soreceive =		soreceive_generic,	/* XXX: or...? */
3413 	.pr_close =		uipc_close,
3414 };
3415 
3416 static struct domain localdomain = {
3417 	.dom_family =		AF_LOCAL,
3418 	.dom_name =		"local",
3419 	.dom_externalize =	unp_externalize,
3420 	.dom_nprotosw =		3,
3421 	.dom_protosw =		{
3422 		&streamproto,
3423 		&dgramproto,
3424 		&seqpacketproto,
3425 	}
3426 };
3427 DOMAIN_SET(local);
3428 
3429 /*
3430  * A helper function called by VFS before socket-type vnode reclamation.
3431  * For an active vnode it clears unp_vnode pointer and decrements unp_vnode
3432  * use count.
3433  */
3434 void
3435 vfs_unp_reclaim(struct vnode *vp)
3436 {
3437 	struct unpcb *unp;
3438 	int active;
3439 	struct mtx *vplock;
3440 
3441 	ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim");
3442 	KASSERT(vp->v_type == VSOCK,
3443 	    ("vfs_unp_reclaim: vp->v_type != VSOCK"));
3444 
3445 	active = 0;
3446 	vplock = mtx_pool_find(unp_vp_mtxpool, vp);
3447 	mtx_lock(vplock);
3448 	VOP_UNP_CONNECT(vp, &unp);
3449 	if (unp == NULL)
3450 		goto done;
3451 	UNP_PCB_LOCK(unp);
3452 	if (unp->unp_vnode == vp) {
3453 		VOP_UNP_DETACH(vp);
3454 		unp->unp_vnode = NULL;
3455 		active = 1;
3456 	}
3457 	UNP_PCB_UNLOCK(unp);
3458  done:
3459 	mtx_unlock(vplock);
3460 	if (active)
3461 		vunref(vp);
3462 }
3463 
3464 #ifdef DDB
3465 static void
3466 db_print_indent(int indent)
3467 {
3468 	int i;
3469 
3470 	for (i = 0; i < indent; i++)
3471 		db_printf(" ");
3472 }
3473 
3474 static void
3475 db_print_unpflags(int unp_flags)
3476 {
3477 	int comma;
3478 
3479 	comma = 0;
3480 	if (unp_flags & UNP_HAVEPC) {
3481 		db_printf("%sUNP_HAVEPC", comma ? ", " : "");
3482 		comma = 1;
3483 	}
3484 	if (unp_flags & UNP_WANTCRED_ALWAYS) {
3485 		db_printf("%sUNP_WANTCRED_ALWAYS", comma ? ", " : "");
3486 		comma = 1;
3487 	}
3488 	if (unp_flags & UNP_WANTCRED_ONESHOT) {
3489 		db_printf("%sUNP_WANTCRED_ONESHOT", comma ? ", " : "");
3490 		comma = 1;
3491 	}
3492 	if (unp_flags & UNP_CONNECTING) {
3493 		db_printf("%sUNP_CONNECTING", comma ? ", " : "");
3494 		comma = 1;
3495 	}
3496 	if (unp_flags & UNP_BINDING) {
3497 		db_printf("%sUNP_BINDING", comma ? ", " : "");
3498 		comma = 1;
3499 	}
3500 }
3501 
3502 static void
3503 db_print_xucred(int indent, struct xucred *xu)
3504 {
3505 	int comma, i;
3506 
3507 	db_print_indent(indent);
3508 	db_printf("cr_version: %u   cr_uid: %u   cr_pid: %d   cr_ngroups: %d\n",
3509 	    xu->cr_version, xu->cr_uid, xu->cr_pid, xu->cr_ngroups);
3510 	db_print_indent(indent);
3511 	db_printf("cr_groups: ");
3512 	comma = 0;
3513 	for (i = 0; i < xu->cr_ngroups; i++) {
3514 		db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]);
3515 		comma = 1;
3516 	}
3517 	db_printf("\n");
3518 }
3519 
3520 static void
3521 db_print_unprefs(int indent, struct unp_head *uh)
3522 {
3523 	struct unpcb *unp;
3524 	int counter;
3525 
3526 	counter = 0;
3527 	LIST_FOREACH(unp, uh, unp_reflink) {
3528 		if (counter % 4 == 0)
3529 			db_print_indent(indent);
3530 		db_printf("%p  ", unp);
3531 		if (counter % 4 == 3)
3532 			db_printf("\n");
3533 		counter++;
3534 	}
3535 	if (counter != 0 && counter % 4 != 0)
3536 		db_printf("\n");
3537 }
3538 
3539 DB_SHOW_COMMAND(unpcb, db_show_unpcb)
3540 {
3541 	struct unpcb *unp;
3542 
3543         if (!have_addr) {
3544                 db_printf("usage: show unpcb <addr>\n");
3545                 return;
3546         }
3547         unp = (struct unpcb *)addr;
3548 
3549 	db_printf("unp_socket: %p   unp_vnode: %p\n", unp->unp_socket,
3550 	    unp->unp_vnode);
3551 
3552 	db_printf("unp_ino: %ju   unp_conn: %p\n", (uintmax_t)unp->unp_ino,
3553 	    unp->unp_conn);
3554 
3555 	db_printf("unp_refs:\n");
3556 	db_print_unprefs(2, &unp->unp_refs);
3557 
3558 	/* XXXRW: Would be nice to print the full address, if any. */
3559 	db_printf("unp_addr: %p\n", unp->unp_addr);
3560 
3561 	db_printf("unp_gencnt: %llu\n",
3562 	    (unsigned long long)unp->unp_gencnt);
3563 
3564 	db_printf("unp_flags: %x (", unp->unp_flags);
3565 	db_print_unpflags(unp->unp_flags);
3566 	db_printf(")\n");
3567 
3568 	db_printf("unp_peercred:\n");
3569 	db_print_xucred(2, &unp->unp_peercred);
3570 
3571 	db_printf("unp_refcount: %u\n", unp->unp_refcount);
3572 }
3573 #endif
3574