xref: /freebsd/sys/kern/uipc_usrreq.c (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1989, 1991, 1993
5  *	The Regents of the University of California. All Rights Reserved.
6  * Copyright (c) 2004-2009 Robert N. M. Watson All Rights Reserved.
7  * Copyright (c) 2018 Matthew Macy
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	From: @(#)uipc_usrreq.c	8.3 (Berkeley) 1/4/94
34  */
35 
36 /*
37  * UNIX Domain (Local) Sockets
38  *
39  * This is an implementation of UNIX (local) domain sockets.  Each socket has
40  * an associated struct unpcb (UNIX protocol control block).  Stream sockets
41  * may be connected to 0 or 1 other socket.  Datagram sockets may be
42  * connected to 0, 1, or many other sockets.  Sockets may be created and
43  * connected in pairs (socketpair(2)), or bound/connected to using the file
44  * system name space.  For most purposes, only the receive socket buffer is
45  * used, as sending on one socket delivers directly to the receive socket
46  * buffer of a second socket.
47  *
48  * The implementation is substantially complicated by the fact that
49  * "ancillary data", such as file descriptors or credentials, may be passed
50  * across UNIX domain sockets.  The potential for passing UNIX domain sockets
51  * over other UNIX domain sockets requires the implementation of a simple
52  * garbage collector to find and tear down cycles of disconnected sockets.
53  *
54  * TODO:
55  *	RDM
56  *	rethink name space problems
57  *	need a proper out-of-band
58  */
59 
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
62 
63 #include "opt_ddb.h"
64 
65 #include <sys/param.h>
66 #include <sys/capsicum.h>
67 #include <sys/domain.h>
68 #include <sys/eventhandler.h>
69 #include <sys/fcntl.h>
70 #include <sys/file.h>
71 #include <sys/filedesc.h>
72 #include <sys/kernel.h>
73 #include <sys/lock.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/mount.h>
77 #include <sys/mutex.h>
78 #include <sys/namei.h>
79 #include <sys/proc.h>
80 #include <sys/protosw.h>
81 #include <sys/queue.h>
82 #include <sys/resourcevar.h>
83 #include <sys/rwlock.h>
84 #include <sys/socket.h>
85 #include <sys/socketvar.h>
86 #include <sys/signalvar.h>
87 #include <sys/stat.h>
88 #include <sys/sx.h>
89 #include <sys/sysctl.h>
90 #include <sys/systm.h>
91 #include <sys/taskqueue.h>
92 #include <sys/un.h>
93 #include <sys/unpcb.h>
94 #include <sys/vnode.h>
95 
96 #include <net/vnet.h>
97 
98 #ifdef DDB
99 #include <ddb/ddb.h>
100 #endif
101 
102 #include <security/mac/mac_framework.h>
103 
104 #include <vm/uma.h>
105 
106 MALLOC_DECLARE(M_FILECAPS);
107 
108 /*
109  * See unpcb.h for the locking key.
110  */
111 
112 static uma_zone_t	unp_zone;
113 static unp_gen_t	unp_gencnt;	/* (l) */
114 static u_int		unp_count;	/* (l) Count of local sockets. */
115 static ino_t		unp_ino;	/* Prototype for fake inode numbers. */
116 static int		unp_rights;	/* (g) File descriptors in flight. */
117 static struct unp_head	unp_shead;	/* (l) List of stream sockets. */
118 static struct unp_head	unp_dhead;	/* (l) List of datagram sockets. */
119 static struct unp_head	unp_sphead;	/* (l) List of seqpacket sockets. */
120 
121 struct unp_defer {
122 	SLIST_ENTRY(unp_defer) ud_link;
123 	struct file *ud_fp;
124 };
125 static SLIST_HEAD(, unp_defer) unp_defers;
126 static int unp_defers_count;
127 
128 static const struct sockaddr	sun_noname = { sizeof(sun_noname), AF_LOCAL };
129 
130 /*
131  * Garbage collection of cyclic file descriptor/socket references occurs
132  * asynchronously in a taskqueue context in order to avoid recursion and
133  * reentrance in the UNIX domain socket, file descriptor, and socket layer
134  * code.  See unp_gc() for a full description.
135  */
136 static struct timeout_task unp_gc_task;
137 
138 /*
139  * The close of unix domain sockets attached as SCM_RIGHTS is
140  * postponed to the taskqueue, to avoid arbitrary recursion depth.
141  * The attached sockets might have another sockets attached.
142  */
143 static struct task	unp_defer_task;
144 
145 /*
146  * Both send and receive buffers are allocated PIPSIZ bytes of buffering for
147  * stream sockets, although the total for sender and receiver is actually
148  * only PIPSIZ.
149  *
150  * Datagram sockets really use the sendspace as the maximum datagram size,
151  * and don't really want to reserve the sendspace.  Their recvspace should be
152  * large enough for at least one max-size datagram plus address.
153  */
154 #ifndef PIPSIZ
155 #define	PIPSIZ	8192
156 #endif
157 static u_long	unpst_sendspace = PIPSIZ;
158 static u_long	unpst_recvspace = PIPSIZ;
159 static u_long	unpdg_maxdgram = 2*1024;
160 static u_long	unpdg_recvspace = 16*1024;	/* support 8KB syslog msgs */
161 static u_long	unpsp_sendspace = PIPSIZ;	/* really max datagram size */
162 static u_long	unpsp_recvspace = PIPSIZ;
163 
164 static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
165     "Local domain");
166 static SYSCTL_NODE(_net_local, SOCK_STREAM, stream,
167     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
168     "SOCK_STREAM");
169 static SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram,
170     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
171     "SOCK_DGRAM");
172 static SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket,
173     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
174     "SOCK_SEQPACKET");
175 
176 SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
177 	   &unpst_sendspace, 0, "Default stream send space.");
178 SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
179 	   &unpst_recvspace, 0, "Default stream receive space.");
180 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
181 	   &unpdg_maxdgram, 0, "Maximum datagram size.");
182 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
183 	   &unpdg_recvspace, 0, "Default datagram receive space.");
184 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW,
185 	   &unpsp_sendspace, 0, "Default seqpacket send space.");
186 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW,
187 	   &unpsp_recvspace, 0, "Default seqpacket receive space.");
188 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0,
189     "File descriptors in flight.");
190 SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD,
191     &unp_defers_count, 0,
192     "File descriptors deferred to taskqueue for close.");
193 
194 /*
195  * Locking and synchronization:
196  *
197  * Several types of locks exist in the local domain socket implementation:
198  * - a global linkage lock
199  * - a global connection list lock
200  * - the mtxpool lock
201  * - per-unpcb mutexes
202  *
203  * The linkage lock protects the global socket lists, the generation number
204  * counter and garbage collector state.
205  *
206  * The connection list lock protects the list of referring sockets in a datagram
207  * socket PCB.  This lock is also overloaded to protect a global list of
208  * sockets whose buffers contain socket references in the form of SCM_RIGHTS
209  * messages.  To avoid recursion, such references are released by a dedicated
210  * thread.
211  *
212  * The mtxpool lock protects the vnode from being modified while referenced.
213  * Lock ordering rules require that it be acquired before any PCB locks.
214  *
215  * The unpcb lock (unp_mtx) protects the most commonly referenced fields in the
216  * unpcb.  This includes the unp_conn field, which either links two connected
217  * PCBs together (for connected socket types) or points at the destination
218  * socket (for connectionless socket types).  The operations of creating or
219  * destroying a connection therefore involve locking multiple PCBs.  To avoid
220  * lock order reversals, in some cases this involves dropping a PCB lock and
221  * using a reference counter to maintain liveness.
222  *
223  * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer,
224  * allocated in pru_attach() and freed in pru_detach().  The validity of that
225  * pointer is an invariant, so no lock is required to dereference the so_pcb
226  * pointer if a valid socket reference is held by the caller.  In practice,
227  * this is always true during operations performed on a socket.  Each unpcb
228  * has a back-pointer to its socket, unp_socket, which will be stable under
229  * the same circumstances.
230  *
231  * This pointer may only be safely dereferenced as long as a valid reference
232  * to the unpcb is held.  Typically, this reference will be from the socket,
233  * or from another unpcb when the referring unpcb's lock is held (in order
234  * that the reference not be invalidated during use).  For example, to follow
235  * unp->unp_conn->unp_socket, you need to hold a lock on unp_conn to guarantee
236  * that detach is not run clearing unp_socket.
237  *
238  * Blocking with UNIX domain sockets is a tricky issue: unlike most network
239  * protocols, bind() is a non-atomic operation, and connect() requires
240  * potential sleeping in the protocol, due to potentially waiting on local or
241  * distributed file systems.  We try to separate "lookup" operations, which
242  * may sleep, and the IPC operations themselves, which typically can occur
243  * with relative atomicity as locks can be held over the entire operation.
244  *
245  * Another tricky issue is simultaneous multi-threaded or multi-process
246  * access to a single UNIX domain socket.  These are handled by the flags
247  * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or
248  * binding, both of which involve dropping UNIX domain socket locks in order
249  * to perform namei() and other file system operations.
250  */
251 static struct rwlock	unp_link_rwlock;
252 static struct mtx	unp_defers_lock;
253 
254 #define	UNP_LINK_LOCK_INIT()		rw_init(&unp_link_rwlock,	\
255 					    "unp_link_rwlock")
256 
257 #define	UNP_LINK_LOCK_ASSERT()		rw_assert(&unp_link_rwlock,	\
258 					    RA_LOCKED)
259 #define	UNP_LINK_UNLOCK_ASSERT()	rw_assert(&unp_link_rwlock,	\
260 					    RA_UNLOCKED)
261 
262 #define	UNP_LINK_RLOCK()		rw_rlock(&unp_link_rwlock)
263 #define	UNP_LINK_RUNLOCK()		rw_runlock(&unp_link_rwlock)
264 #define	UNP_LINK_WLOCK()		rw_wlock(&unp_link_rwlock)
265 #define	UNP_LINK_WUNLOCK()		rw_wunlock(&unp_link_rwlock)
266 #define	UNP_LINK_WLOCK_ASSERT()		rw_assert(&unp_link_rwlock,	\
267 					    RA_WLOCKED)
268 #define	UNP_LINK_WOWNED()		rw_wowned(&unp_link_rwlock)
269 
270 #define	UNP_DEFERRED_LOCK_INIT()	mtx_init(&unp_defers_lock, \
271 					    "unp_defer", NULL, MTX_DEF)
272 #define	UNP_DEFERRED_LOCK()		mtx_lock(&unp_defers_lock)
273 #define	UNP_DEFERRED_UNLOCK()		mtx_unlock(&unp_defers_lock)
274 
275 #define UNP_REF_LIST_LOCK()		UNP_DEFERRED_LOCK();
276 #define UNP_REF_LIST_UNLOCK()		UNP_DEFERRED_UNLOCK();
277 
278 #define UNP_PCB_LOCK_INIT(unp)		mtx_init(&(unp)->unp_mtx,	\
279 					    "unp", "unp",	\
280 					    MTX_DUPOK|MTX_DEF)
281 #define	UNP_PCB_LOCK_DESTROY(unp)	mtx_destroy(&(unp)->unp_mtx)
282 #define	UNP_PCB_LOCKPTR(unp)		(&(unp)->unp_mtx)
283 #define	UNP_PCB_LOCK(unp)		mtx_lock(&(unp)->unp_mtx)
284 #define	UNP_PCB_TRYLOCK(unp)		mtx_trylock(&(unp)->unp_mtx)
285 #define	UNP_PCB_UNLOCK(unp)		mtx_unlock(&(unp)->unp_mtx)
286 #define	UNP_PCB_OWNED(unp)		mtx_owned(&(unp)->unp_mtx)
287 #define	UNP_PCB_LOCK_ASSERT(unp)	mtx_assert(&(unp)->unp_mtx, MA_OWNED)
288 #define	UNP_PCB_UNLOCK_ASSERT(unp)	mtx_assert(&(unp)->unp_mtx, MA_NOTOWNED)
289 
290 static int	uipc_connect2(struct socket *, struct socket *);
291 static int	uipc_ctloutput(struct socket *, struct sockopt *);
292 static int	unp_connect(struct socket *, struct sockaddr *,
293 		    struct thread *);
294 static int	unp_connectat(int, struct socket *, struct sockaddr *,
295 		    struct thread *, bool);
296 static void	unp_connect2(struct socket *so, struct socket *so2, int);
297 static void	unp_disconnect(struct unpcb *unp, struct unpcb *unp2);
298 static void	unp_dispose(struct socket *so);
299 static void	unp_shutdown(struct unpcb *);
300 static void	unp_drop(struct unpcb *);
301 static void	unp_gc(__unused void *, int);
302 static void	unp_scan(struct mbuf *, void (*)(struct filedescent **, int));
303 static void	unp_discard(struct file *);
304 static void	unp_freerights(struct filedescent **, int);
305 static int	unp_internalize(struct mbuf **, struct thread *,
306 		    struct mbuf **, u_int *, u_int *);
307 static void	unp_internalize_fp(struct file *);
308 static int	unp_externalize(struct mbuf *, struct mbuf **, int);
309 static int	unp_externalize_fp(struct file *);
310 static struct mbuf	*unp_addsockcred(struct thread *, struct mbuf *,
311 		    int, struct mbuf **, u_int *, u_int *);
312 static void	unp_process_defers(void * __unused, int);
313 
314 static void
315 unp_pcb_hold(struct unpcb *unp)
316 {
317 	u_int old __unused;
318 
319 	old = refcount_acquire(&unp->unp_refcount);
320 	KASSERT(old > 0, ("%s: unpcb %p has no references", __func__, unp));
321 }
322 
323 static __result_use_check bool
324 unp_pcb_rele(struct unpcb *unp)
325 {
326 	bool ret;
327 
328 	UNP_PCB_LOCK_ASSERT(unp);
329 
330 	if ((ret = refcount_release(&unp->unp_refcount))) {
331 		UNP_PCB_UNLOCK(unp);
332 		UNP_PCB_LOCK_DESTROY(unp);
333 		uma_zfree(unp_zone, unp);
334 	}
335 	return (ret);
336 }
337 
338 static void
339 unp_pcb_rele_notlast(struct unpcb *unp)
340 {
341 	bool ret __unused;
342 
343 	ret = refcount_release(&unp->unp_refcount);
344 	KASSERT(!ret, ("%s: unpcb %p has no references", __func__, unp));
345 }
346 
347 static void
348 unp_pcb_lock_pair(struct unpcb *unp, struct unpcb *unp2)
349 {
350 	UNP_PCB_UNLOCK_ASSERT(unp);
351 	UNP_PCB_UNLOCK_ASSERT(unp2);
352 
353 	if (unp == unp2) {
354 		UNP_PCB_LOCK(unp);
355 	} else if ((uintptr_t)unp2 > (uintptr_t)unp) {
356 		UNP_PCB_LOCK(unp);
357 		UNP_PCB_LOCK(unp2);
358 	} else {
359 		UNP_PCB_LOCK(unp2);
360 		UNP_PCB_LOCK(unp);
361 	}
362 }
363 
364 static void
365 unp_pcb_unlock_pair(struct unpcb *unp, struct unpcb *unp2)
366 {
367 	UNP_PCB_UNLOCK(unp);
368 	if (unp != unp2)
369 		UNP_PCB_UNLOCK(unp2);
370 }
371 
372 /*
373  * Try to lock the connected peer of an already locked socket.  In some cases
374  * this requires that we unlock the current socket.  The pairbusy counter is
375  * used to block concurrent connection attempts while the lock is dropped.  The
376  * caller must be careful to revalidate PCB state.
377  */
378 static struct unpcb *
379 unp_pcb_lock_peer(struct unpcb *unp)
380 {
381 	struct unpcb *unp2;
382 
383 	UNP_PCB_LOCK_ASSERT(unp);
384 	unp2 = unp->unp_conn;
385 	if (unp2 == NULL)
386 		return (NULL);
387 	if (__predict_false(unp == unp2))
388 		return (unp);
389 
390 	UNP_PCB_UNLOCK_ASSERT(unp2);
391 
392 	if (__predict_true(UNP_PCB_TRYLOCK(unp2)))
393 		return (unp2);
394 	if ((uintptr_t)unp2 > (uintptr_t)unp) {
395 		UNP_PCB_LOCK(unp2);
396 		return (unp2);
397 	}
398 	unp->unp_pairbusy++;
399 	unp_pcb_hold(unp2);
400 	UNP_PCB_UNLOCK(unp);
401 
402 	UNP_PCB_LOCK(unp2);
403 	UNP_PCB_LOCK(unp);
404 	KASSERT(unp->unp_conn == unp2 || unp->unp_conn == NULL,
405 	    ("%s: socket %p was reconnected", __func__, unp));
406 	if (--unp->unp_pairbusy == 0 && (unp->unp_flags & UNP_WAITING) != 0) {
407 		unp->unp_flags &= ~UNP_WAITING;
408 		wakeup(unp);
409 	}
410 	if (unp_pcb_rele(unp2)) {
411 		/* unp2 is unlocked. */
412 		return (NULL);
413 	}
414 	if (unp->unp_conn == NULL) {
415 		UNP_PCB_UNLOCK(unp2);
416 		return (NULL);
417 	}
418 	return (unp2);
419 }
420 
421 /*
422  * Definitions of protocols supported in the LOCAL domain.
423  */
424 static struct domain localdomain;
425 static struct pr_usrreqs uipc_usrreqs_dgram, uipc_usrreqs_stream;
426 static struct pr_usrreqs uipc_usrreqs_seqpacket;
427 static struct protosw localsw[] = {
428 {
429 	.pr_type =		SOCK_STREAM,
430 	.pr_domain =		&localdomain,
431 	.pr_flags =		PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS|
432 				    PR_CAPATTACH,
433 	.pr_ctloutput =		&uipc_ctloutput,
434 	.pr_usrreqs =		&uipc_usrreqs_stream
435 },
436 {
437 	.pr_type =		SOCK_DGRAM,
438 	.pr_domain =		&localdomain,
439 	.pr_flags =		PR_ATOMIC | PR_ADDR |PR_RIGHTS | PR_CAPATTACH |
440 				    PR_SOCKBUF,
441 	.pr_ctloutput =		&uipc_ctloutput,
442 	.pr_usrreqs =		&uipc_usrreqs_dgram
443 },
444 {
445 	.pr_type =		SOCK_SEQPACKET,
446 	.pr_domain =		&localdomain,
447 
448 	/*
449 	 * XXXRW: For now, PR_ADDR because soreceive will bump into them
450 	 * due to our use of sbappendaddr.  A new sbappend variants is needed
451 	 * that supports both atomic record writes and control data.
452 	 */
453 	.pr_flags =		PR_ADDR|PR_ATOMIC|PR_CONNREQUIRED|
454 				    PR_WANTRCVD|PR_RIGHTS|PR_CAPATTACH,
455 	.pr_ctloutput =		&uipc_ctloutput,
456 	.pr_usrreqs =		&uipc_usrreqs_seqpacket,
457 },
458 };
459 
460 static struct domain localdomain = {
461 	.dom_family =		AF_LOCAL,
462 	.dom_name =		"local",
463 	.dom_externalize =	unp_externalize,
464 	.dom_dispose =		unp_dispose,
465 	.dom_protosw =		localsw,
466 	.dom_protoswNPROTOSW =	&localsw[nitems(localsw)]
467 };
468 DOMAIN_SET(local);
469 
470 static void
471 uipc_abort(struct socket *so)
472 {
473 	struct unpcb *unp, *unp2;
474 
475 	unp = sotounpcb(so);
476 	KASSERT(unp != NULL, ("uipc_abort: unp == NULL"));
477 	UNP_PCB_UNLOCK_ASSERT(unp);
478 
479 	UNP_PCB_LOCK(unp);
480 	unp2 = unp->unp_conn;
481 	if (unp2 != NULL) {
482 		unp_pcb_hold(unp2);
483 		UNP_PCB_UNLOCK(unp);
484 		unp_drop(unp2);
485 	} else
486 		UNP_PCB_UNLOCK(unp);
487 }
488 
489 static int
490 uipc_accept(struct socket *so, struct sockaddr **nam)
491 {
492 	struct unpcb *unp, *unp2;
493 	const struct sockaddr *sa;
494 
495 	/*
496 	 * Pass back name of connected socket, if it was bound and we are
497 	 * still connected (our peer may have closed already!).
498 	 */
499 	unp = sotounpcb(so);
500 	KASSERT(unp != NULL, ("uipc_accept: unp == NULL"));
501 
502 	*nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
503 	UNP_PCB_LOCK(unp);
504 	unp2 = unp_pcb_lock_peer(unp);
505 	if (unp2 != NULL && unp2->unp_addr != NULL)
506 		sa = (struct sockaddr *)unp2->unp_addr;
507 	else
508 		sa = &sun_noname;
509 	bcopy(sa, *nam, sa->sa_len);
510 	if (unp2 != NULL)
511 		unp_pcb_unlock_pair(unp, unp2);
512 	else
513 		UNP_PCB_UNLOCK(unp);
514 	return (0);
515 }
516 
517 static int
518 uipc_attach(struct socket *so, int proto, struct thread *td)
519 {
520 	u_long sendspace, recvspace;
521 	struct unpcb *unp;
522 	int error;
523 	bool locked;
524 
525 	KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL"));
526 	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
527 		switch (so->so_type) {
528 		case SOCK_STREAM:
529 			sendspace = unpst_sendspace;
530 			recvspace = unpst_recvspace;
531 			break;
532 
533 		case SOCK_DGRAM:
534 			STAILQ_INIT(&so->so_rcv.uxdg_mb);
535 			STAILQ_INIT(&so->so_snd.uxdg_mb);
536 			TAILQ_INIT(&so->so_rcv.uxdg_conns);
537 			/*
538 			 * Since send buffer is either bypassed or is a part
539 			 * of one-to-many receive buffer, we assign both space
540 			 * limits to unpdg_recvspace.
541 			 */
542 			sendspace = recvspace = unpdg_recvspace;
543 			break;
544 
545 		case SOCK_SEQPACKET:
546 			sendspace = unpsp_sendspace;
547 			recvspace = unpsp_recvspace;
548 			break;
549 
550 		default:
551 			panic("uipc_attach");
552 		}
553 		error = soreserve(so, sendspace, recvspace);
554 		if (error)
555 			return (error);
556 	}
557 	unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO);
558 	if (unp == NULL)
559 		return (ENOBUFS);
560 	LIST_INIT(&unp->unp_refs);
561 	UNP_PCB_LOCK_INIT(unp);
562 	unp->unp_socket = so;
563 	so->so_pcb = unp;
564 	refcount_init(&unp->unp_refcount, 1);
565 
566 	if ((locked = UNP_LINK_WOWNED()) == false)
567 		UNP_LINK_WLOCK();
568 
569 	unp->unp_gencnt = ++unp_gencnt;
570 	unp->unp_ino = ++unp_ino;
571 	unp_count++;
572 	switch (so->so_type) {
573 	case SOCK_STREAM:
574 		LIST_INSERT_HEAD(&unp_shead, unp, unp_link);
575 		break;
576 
577 	case SOCK_DGRAM:
578 		LIST_INSERT_HEAD(&unp_dhead, unp, unp_link);
579 		break;
580 
581 	case SOCK_SEQPACKET:
582 		LIST_INSERT_HEAD(&unp_sphead, unp, unp_link);
583 		break;
584 
585 	default:
586 		panic("uipc_attach");
587 	}
588 
589 	if (locked == false)
590 		UNP_LINK_WUNLOCK();
591 
592 	return (0);
593 }
594 
595 static int
596 uipc_bindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
597 {
598 	struct sockaddr_un *soun = (struct sockaddr_un *)nam;
599 	struct vattr vattr;
600 	int error, namelen;
601 	struct nameidata nd;
602 	struct unpcb *unp;
603 	struct vnode *vp;
604 	struct mount *mp;
605 	cap_rights_t rights;
606 	char *buf;
607 
608 	if (nam->sa_family != AF_UNIX)
609 		return (EAFNOSUPPORT);
610 
611 	unp = sotounpcb(so);
612 	KASSERT(unp != NULL, ("uipc_bind: unp == NULL"));
613 
614 	if (soun->sun_len > sizeof(struct sockaddr_un))
615 		return (EINVAL);
616 	namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
617 	if (namelen <= 0)
618 		return (EINVAL);
619 
620 	/*
621 	 * We don't allow simultaneous bind() calls on a single UNIX domain
622 	 * socket, so flag in-progress operations, and return an error if an
623 	 * operation is already in progress.
624 	 *
625 	 * Historically, we have not allowed a socket to be rebound, so this
626 	 * also returns an error.  Not allowing re-binding simplifies the
627 	 * implementation and avoids a great many possible failure modes.
628 	 */
629 	UNP_PCB_LOCK(unp);
630 	if (unp->unp_vnode != NULL) {
631 		UNP_PCB_UNLOCK(unp);
632 		return (EINVAL);
633 	}
634 	if (unp->unp_flags & UNP_BINDING) {
635 		UNP_PCB_UNLOCK(unp);
636 		return (EALREADY);
637 	}
638 	unp->unp_flags |= UNP_BINDING;
639 	UNP_PCB_UNLOCK(unp);
640 
641 	buf = malloc(namelen + 1, M_TEMP, M_WAITOK);
642 	bcopy(soun->sun_path, buf, namelen);
643 	buf[namelen] = 0;
644 
645 restart:
646 	NDINIT_ATRIGHTS(&nd, CREATE, NOFOLLOW | LOCKPARENT | SAVENAME | NOCACHE,
647 	    UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_BINDAT));
648 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
649 	error = namei(&nd);
650 	if (error)
651 		goto error;
652 	vp = nd.ni_vp;
653 	if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
654 		NDFREE_PNBUF(&nd);
655 		if (nd.ni_dvp == vp)
656 			vrele(nd.ni_dvp);
657 		else
658 			vput(nd.ni_dvp);
659 		if (vp != NULL) {
660 			vrele(vp);
661 			error = EADDRINUSE;
662 			goto error;
663 		}
664 		error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH);
665 		if (error)
666 			goto error;
667 		goto restart;
668 	}
669 	VATTR_NULL(&vattr);
670 	vattr.va_type = VSOCK;
671 	vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_pd->pd_cmask);
672 #ifdef MAC
673 	error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd,
674 	    &vattr);
675 #endif
676 	if (error == 0)
677 		error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
678 	NDFREE_PNBUF(&nd);
679 	if (error) {
680 		VOP_VPUT_PAIR(nd.ni_dvp, NULL, true);
681 		vn_finished_write(mp);
682 		if (error == ERELOOKUP)
683 			goto restart;
684 		goto error;
685 	}
686 	vp = nd.ni_vp;
687 	ASSERT_VOP_ELOCKED(vp, "uipc_bind");
688 	soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK);
689 
690 	UNP_PCB_LOCK(unp);
691 	VOP_UNP_BIND(vp, unp);
692 	unp->unp_vnode = vp;
693 	unp->unp_addr = soun;
694 	unp->unp_flags &= ~UNP_BINDING;
695 	UNP_PCB_UNLOCK(unp);
696 	vref(vp);
697 	VOP_VPUT_PAIR(nd.ni_dvp, &vp, true);
698 	vn_finished_write(mp);
699 	free(buf, M_TEMP);
700 	return (0);
701 
702 error:
703 	UNP_PCB_LOCK(unp);
704 	unp->unp_flags &= ~UNP_BINDING;
705 	UNP_PCB_UNLOCK(unp);
706 	free(buf, M_TEMP);
707 	return (error);
708 }
709 
710 static int
711 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
712 {
713 
714 	return (uipc_bindat(AT_FDCWD, so, nam, td));
715 }
716 
717 static int
718 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
719 {
720 	int error;
721 
722 	KASSERT(td == curthread, ("uipc_connect: td != curthread"));
723 	error = unp_connect(so, nam, td);
724 	return (error);
725 }
726 
727 static int
728 uipc_connectat(int fd, struct socket *so, struct sockaddr *nam,
729     struct thread *td)
730 {
731 	int error;
732 
733 	KASSERT(td == curthread, ("uipc_connectat: td != curthread"));
734 	error = unp_connectat(fd, so, nam, td, false);
735 	return (error);
736 }
737 
738 static void
739 uipc_close(struct socket *so)
740 {
741 	struct unpcb *unp, *unp2;
742 	struct vnode *vp = NULL;
743 	struct mtx *vplock;
744 
745 	unp = sotounpcb(so);
746 	KASSERT(unp != NULL, ("uipc_close: unp == NULL"));
747 
748 	vplock = NULL;
749 	if ((vp = unp->unp_vnode) != NULL) {
750 		vplock = mtx_pool_find(mtxpool_sleep, vp);
751 		mtx_lock(vplock);
752 	}
753 	UNP_PCB_LOCK(unp);
754 	if (vp && unp->unp_vnode == NULL) {
755 		mtx_unlock(vplock);
756 		vp = NULL;
757 	}
758 	if (vp != NULL) {
759 		VOP_UNP_DETACH(vp);
760 		unp->unp_vnode = NULL;
761 	}
762 	if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
763 		unp_disconnect(unp, unp2);
764 	else
765 		UNP_PCB_UNLOCK(unp);
766 	if (vp) {
767 		mtx_unlock(vplock);
768 		vrele(vp);
769 	}
770 }
771 
772 static int
773 uipc_connect2(struct socket *so1, struct socket *so2)
774 {
775 	struct unpcb *unp, *unp2;
776 
777 	if (so1->so_type != so2->so_type)
778 		return (EPROTOTYPE);
779 
780 	unp = so1->so_pcb;
781 	KASSERT(unp != NULL, ("uipc_connect2: unp == NULL"));
782 	unp2 = so2->so_pcb;
783 	KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL"));
784 	unp_pcb_lock_pair(unp, unp2);
785 	unp_connect2(so1, so2, PRU_CONNECT2);
786 	unp_pcb_unlock_pair(unp, unp2);
787 
788 	return (0);
789 }
790 
791 static void
792 uipc_detach(struct socket *so)
793 {
794 	struct unpcb *unp, *unp2;
795 	struct mtx *vplock;
796 	struct vnode *vp;
797 	int local_unp_rights;
798 
799 	unp = sotounpcb(so);
800 	KASSERT(unp != NULL, ("uipc_detach: unp == NULL"));
801 
802 	vp = NULL;
803 	vplock = NULL;
804 
805 	UNP_LINK_WLOCK();
806 	LIST_REMOVE(unp, unp_link);
807 	if (unp->unp_gcflag & UNPGC_DEAD)
808 		LIST_REMOVE(unp, unp_dead);
809 	unp->unp_gencnt = ++unp_gencnt;
810 	--unp_count;
811 	UNP_LINK_WUNLOCK();
812 
813 	UNP_PCB_UNLOCK_ASSERT(unp);
814  restart:
815 	if ((vp = unp->unp_vnode) != NULL) {
816 		vplock = mtx_pool_find(mtxpool_sleep, vp);
817 		mtx_lock(vplock);
818 	}
819 	UNP_PCB_LOCK(unp);
820 	if (unp->unp_vnode != vp && unp->unp_vnode != NULL) {
821 		if (vplock)
822 			mtx_unlock(vplock);
823 		UNP_PCB_UNLOCK(unp);
824 		goto restart;
825 	}
826 	if ((vp = unp->unp_vnode) != NULL) {
827 		VOP_UNP_DETACH(vp);
828 		unp->unp_vnode = NULL;
829 	}
830 	if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
831 		unp_disconnect(unp, unp2);
832 	else
833 		UNP_PCB_UNLOCK(unp);
834 
835 	UNP_REF_LIST_LOCK();
836 	while (!LIST_EMPTY(&unp->unp_refs)) {
837 		struct unpcb *ref = LIST_FIRST(&unp->unp_refs);
838 
839 		unp_pcb_hold(ref);
840 		UNP_REF_LIST_UNLOCK();
841 
842 		MPASS(ref != unp);
843 		UNP_PCB_UNLOCK_ASSERT(ref);
844 		unp_drop(ref);
845 		UNP_REF_LIST_LOCK();
846 	}
847 	UNP_REF_LIST_UNLOCK();
848 
849 	UNP_PCB_LOCK(unp);
850 	local_unp_rights = unp_rights;
851 	unp->unp_socket->so_pcb = NULL;
852 	unp->unp_socket = NULL;
853 	free(unp->unp_addr, M_SONAME);
854 	unp->unp_addr = NULL;
855 	if (!unp_pcb_rele(unp))
856 		UNP_PCB_UNLOCK(unp);
857 	if (vp) {
858 		mtx_unlock(vplock);
859 		vrele(vp);
860 	}
861 	if (local_unp_rights)
862 		taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1);
863 
864 	switch (so->so_type) {
865 	case SOCK_DGRAM:
866 		/*
867 		 * Everything should have been unlinked/freed by unp_dispose()
868 		 * and/or unp_disconnect().
869 		 */
870 		MPASS(so->so_rcv.uxdg_peeked == NULL);
871 		MPASS(STAILQ_EMPTY(&so->so_rcv.uxdg_mb));
872 		MPASS(TAILQ_EMPTY(&so->so_rcv.uxdg_conns));
873 		MPASS(STAILQ_EMPTY(&so->so_snd.uxdg_mb));
874 	}
875 }
876 
877 static int
878 uipc_disconnect(struct socket *so)
879 {
880 	struct unpcb *unp, *unp2;
881 
882 	unp = sotounpcb(so);
883 	KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL"));
884 
885 	UNP_PCB_LOCK(unp);
886 	if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
887 		unp_disconnect(unp, unp2);
888 	else
889 		UNP_PCB_UNLOCK(unp);
890 	return (0);
891 }
892 
893 static int
894 uipc_listen(struct socket *so, int backlog, struct thread *td)
895 {
896 	struct unpcb *unp;
897 	int error;
898 
899 	MPASS(so->so_type != SOCK_DGRAM);
900 
901 	/*
902 	 * Synchronize with concurrent connection attempts.
903 	 */
904 	error = 0;
905 	unp = sotounpcb(so);
906 	UNP_PCB_LOCK(unp);
907 	if (unp->unp_conn != NULL || (unp->unp_flags & UNP_CONNECTING) != 0)
908 		error = EINVAL;
909 	else if (unp->unp_vnode == NULL)
910 		error = EDESTADDRREQ;
911 	if (error != 0) {
912 		UNP_PCB_UNLOCK(unp);
913 		return (error);
914 	}
915 
916 	SOCK_LOCK(so);
917 	error = solisten_proto_check(so);
918 	if (error == 0) {
919 		cru2xt(td, &unp->unp_peercred);
920 		solisten_proto(so, backlog);
921 	}
922 	SOCK_UNLOCK(so);
923 	UNP_PCB_UNLOCK(unp);
924 	return (error);
925 }
926 
927 static int
928 uipc_peeraddr(struct socket *so, struct sockaddr **nam)
929 {
930 	struct unpcb *unp, *unp2;
931 	const struct sockaddr *sa;
932 
933 	unp = sotounpcb(so);
934 	KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL"));
935 
936 	*nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
937 	UNP_LINK_RLOCK();
938 	/*
939 	 * XXX: It seems that this test always fails even when connection is
940 	 * established.  So, this else clause is added as workaround to
941 	 * return PF_LOCAL sockaddr.
942 	 */
943 	unp2 = unp->unp_conn;
944 	if (unp2 != NULL) {
945 		UNP_PCB_LOCK(unp2);
946 		if (unp2->unp_addr != NULL)
947 			sa = (struct sockaddr *) unp2->unp_addr;
948 		else
949 			sa = &sun_noname;
950 		bcopy(sa, *nam, sa->sa_len);
951 		UNP_PCB_UNLOCK(unp2);
952 	} else {
953 		sa = &sun_noname;
954 		bcopy(sa, *nam, sa->sa_len);
955 	}
956 	UNP_LINK_RUNLOCK();
957 	return (0);
958 }
959 
960 static int
961 uipc_rcvd(struct socket *so, int flags)
962 {
963 	struct unpcb *unp, *unp2;
964 	struct socket *so2;
965 	u_int mbcnt, sbcc;
966 
967 	unp = sotounpcb(so);
968 	KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
969 	KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET,
970 	    ("%s: socktype %d", __func__, so->so_type));
971 
972 	/*
973 	 * Adjust backpressure on sender and wakeup any waiting to write.
974 	 *
975 	 * The unp lock is acquired to maintain the validity of the unp_conn
976 	 * pointer; no lock on unp2 is required as unp2->unp_socket will be
977 	 * static as long as we don't permit unp2 to disconnect from unp,
978 	 * which is prevented by the lock on unp.  We cache values from
979 	 * so_rcv to avoid holding the so_rcv lock over the entire
980 	 * transaction on the remote so_snd.
981 	 */
982 	SOCKBUF_LOCK(&so->so_rcv);
983 	mbcnt = so->so_rcv.sb_mbcnt;
984 	sbcc = sbavail(&so->so_rcv);
985 	SOCKBUF_UNLOCK(&so->so_rcv);
986 	/*
987 	 * There is a benign race condition at this point.  If we're planning to
988 	 * clear SB_STOP, but uipc_send is called on the connected socket at
989 	 * this instant, it might add data to the sockbuf and set SB_STOP.  Then
990 	 * we would erroneously clear SB_STOP below, even though the sockbuf is
991 	 * full.  The race is benign because the only ill effect is to allow the
992 	 * sockbuf to exceed its size limit, and the size limits are not
993 	 * strictly guaranteed anyway.
994 	 */
995 	UNP_PCB_LOCK(unp);
996 	unp2 = unp->unp_conn;
997 	if (unp2 == NULL) {
998 		UNP_PCB_UNLOCK(unp);
999 		return (0);
1000 	}
1001 	so2 = unp2->unp_socket;
1002 	SOCKBUF_LOCK(&so2->so_snd);
1003 	if (sbcc < so2->so_snd.sb_hiwat && mbcnt < so2->so_snd.sb_mbmax)
1004 		so2->so_snd.sb_flags &= ~SB_STOP;
1005 	sowwakeup_locked(so2);
1006 	UNP_PCB_UNLOCK(unp);
1007 	return (0);
1008 }
1009 
1010 static int
1011 uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
1012     struct mbuf *control, struct thread *td)
1013 {
1014 	struct unpcb *unp, *unp2;
1015 	struct socket *so2;
1016 	u_int mbcnt, sbcc;
1017 	int error;
1018 
1019 	unp = sotounpcb(so);
1020 	KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
1021 	KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET,
1022 	    ("%s: socktype %d", __func__, so->so_type));
1023 
1024 	error = 0;
1025 	if (flags & PRUS_OOB) {
1026 		error = EOPNOTSUPP;
1027 		goto release;
1028 	}
1029 	if (control != NULL &&
1030 	    (error = unp_internalize(&control, td, NULL, NULL, NULL)))
1031 		goto release;
1032 
1033 	unp2 = NULL;
1034 	if ((so->so_state & SS_ISCONNECTED) == 0) {
1035 		if (nam != NULL) {
1036 			if ((error = unp_connect(so, nam, td)) != 0)
1037 				goto out;
1038 		} else {
1039 			error = ENOTCONN;
1040 			goto out;
1041 		}
1042 	}
1043 
1044 	UNP_PCB_LOCK(unp);
1045 	if ((unp2 = unp_pcb_lock_peer(unp)) == NULL) {
1046 		UNP_PCB_UNLOCK(unp);
1047 		error = ENOTCONN;
1048 		goto out;
1049 	} else if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1050 		unp_pcb_unlock_pair(unp, unp2);
1051 		error = EPIPE;
1052 		goto out;
1053 	}
1054 	UNP_PCB_UNLOCK(unp);
1055 	if ((so2 = unp2->unp_socket) == NULL) {
1056 		UNP_PCB_UNLOCK(unp2);
1057 		error = ENOTCONN;
1058 		goto out;
1059 	}
1060 	SOCKBUF_LOCK(&so2->so_rcv);
1061 	if (unp2->unp_flags & UNP_WANTCRED_MASK) {
1062 		/*
1063 		 * Credentials are passed only once on SOCK_STREAM and
1064 		 * SOCK_SEQPACKET (LOCAL_CREDS => WANTCRED_ONESHOT), or
1065 		 * forever (LOCAL_CREDS_PERSISTENT => WANTCRED_ALWAYS).
1066 		 */
1067 		control = unp_addsockcred(td, control, unp2->unp_flags, NULL,
1068 		    NULL, NULL);
1069 		unp2->unp_flags &= ~UNP_WANTCRED_ONESHOT;
1070 	}
1071 
1072 	/*
1073 	 * Send to paired receive port and wake up readers.  Don't
1074 	 * check for space available in the receive buffer if we're
1075 	 * attaching ancillary data; Unix domain sockets only check
1076 	 * for space in the sending sockbuf, and that check is
1077 	 * performed one level up the stack.  At that level we cannot
1078 	 * precisely account for the amount of buffer space used
1079 	 * (e.g., because control messages are not yet internalized).
1080 	 */
1081 	switch (so->so_type) {
1082 	case SOCK_STREAM:
1083 		if (control != NULL) {
1084 			sbappendcontrol_locked(&so2->so_rcv, m,
1085 			    control, flags);
1086 			control = NULL;
1087 		} else
1088 			sbappend_locked(&so2->so_rcv, m, flags);
1089 		break;
1090 
1091 	case SOCK_SEQPACKET:
1092 		if (sbappendaddr_nospacecheck_locked(&so2->so_rcv,
1093 		    &sun_noname, m, control))
1094 			control = NULL;
1095 		break;
1096 	}
1097 
1098 	mbcnt = so2->so_rcv.sb_mbcnt;
1099 	sbcc = sbavail(&so2->so_rcv);
1100 	if (sbcc)
1101 		sorwakeup_locked(so2);
1102 	else
1103 		SOCKBUF_UNLOCK(&so2->so_rcv);
1104 
1105 	/*
1106 	 * The PCB lock on unp2 protects the SB_STOP flag.  Without it,
1107 	 * it would be possible for uipc_rcvd to be called at this
1108 	 * point, drain the receiving sockbuf, clear SB_STOP, and then
1109 	 * we would set SB_STOP below.  That could lead to an empty
1110 	 * sockbuf having SB_STOP set
1111 	 */
1112 	SOCKBUF_LOCK(&so->so_snd);
1113 	if (sbcc >= so->so_snd.sb_hiwat || mbcnt >= so->so_snd.sb_mbmax)
1114 		so->so_snd.sb_flags |= SB_STOP;
1115 	SOCKBUF_UNLOCK(&so->so_snd);
1116 	UNP_PCB_UNLOCK(unp2);
1117 	m = NULL;
1118 out:
1119 	/*
1120 	 * PRUS_EOF is equivalent to pru_send followed by pru_shutdown.
1121 	 */
1122 	if (flags & PRUS_EOF) {
1123 		UNP_PCB_LOCK(unp);
1124 		socantsendmore(so);
1125 		unp_shutdown(unp);
1126 		UNP_PCB_UNLOCK(unp);
1127 	}
1128 	if (control != NULL && error != 0)
1129 		unp_scan(control, unp_freerights);
1130 
1131 release:
1132 	if (control != NULL)
1133 		m_freem(control);
1134 	/*
1135 	 * In case of PRUS_NOTREADY, uipc_ready() is responsible
1136 	 * for freeing memory.
1137 	 */
1138 	if (m != NULL && (flags & PRUS_NOTREADY) == 0)
1139 		m_freem(m);
1140 	return (error);
1141 }
1142 
1143 /* PF_UNIX/SOCK_DGRAM version of sbspace() */
1144 static inline bool
1145 uipc_dgram_sbspace(struct sockbuf *sb, u_int cc, u_int mbcnt)
1146 {
1147 	u_int bleft, mleft;
1148 
1149 	MPASS(sb->sb_hiwat >= sb->uxdg_cc);
1150 	MPASS(sb->sb_mbmax >= sb->uxdg_mbcnt);
1151 
1152 	if (__predict_false(sb->sb_state & SBS_CANTRCVMORE))
1153 		return (false);
1154 
1155 	bleft = sb->sb_hiwat - sb->uxdg_cc;
1156 	mleft = sb->sb_mbmax - sb->uxdg_mbcnt;
1157 
1158 	return (bleft >= cc && mleft >= mbcnt);
1159 }
1160 
1161 /*
1162  * PF_UNIX/SOCK_DGRAM send
1163  *
1164  * Allocate a record consisting of 3 mbufs in the sequence of
1165  * from -> control -> data and append it to the socket buffer.
1166  *
1167  * The first mbuf carries sender's name and is a pkthdr that stores
1168  * overall length of datagram, its memory consumption and control length.
1169  */
1170 #define	ctllen	PH_loc.thirtytwo[1]
1171 _Static_assert(offsetof(struct pkthdr, memlen) + sizeof(u_int) <=
1172     offsetof(struct pkthdr, ctllen), "unix/dgram can not store ctllen");
1173 static int
1174 uipc_sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1175     struct mbuf *m, struct mbuf *c, int flags, struct thread *td)
1176 {
1177 	struct unpcb *unp, *unp2;
1178 	const struct sockaddr *from;
1179 	struct socket *so2;
1180 	struct sockbuf *sb;
1181 	struct mbuf *f, *clast;
1182 	u_int cc, ctl, mbcnt;
1183 	u_int dcc __diagused, dctl __diagused, dmbcnt __diagused;
1184 	int error;
1185 
1186 	MPASS((uio != NULL && m == NULL) || (m != NULL && uio == NULL));
1187 
1188 	error = 0;
1189 	f = NULL;
1190 	ctl = 0;
1191 
1192 	if (__predict_false(flags & MSG_OOB)) {
1193 		error = EOPNOTSUPP;
1194 		goto out;
1195 	}
1196 	if (m == NULL) {
1197 		if (__predict_false(uio->uio_resid > unpdg_maxdgram)) {
1198 			error = EMSGSIZE;
1199 			goto out;
1200 		}
1201 		m = m_uiotombuf(uio, M_WAITOK, 0, max_hdr, M_PKTHDR);
1202 		if (__predict_false(m == NULL)) {
1203 			error = EFAULT;
1204 			goto out;
1205 		}
1206 		f = m_gethdr(M_WAITOK, MT_SONAME);
1207 		cc = m->m_pkthdr.len;
1208 		mbcnt = MSIZE + m->m_pkthdr.memlen;
1209 		if (c != NULL &&
1210 		    (error = unp_internalize(&c, td, &clast, &ctl, &mbcnt)))
1211 			goto out;
1212 	} else {
1213 		/* pru_sosend() with mbuf usually is a kernel thread. */
1214 
1215 		M_ASSERTPKTHDR(m);
1216 		if (__predict_false(c != NULL))
1217 			panic("%s: control from a kernel thread", __func__);
1218 
1219 		if (__predict_false(m->m_pkthdr.len > unpdg_maxdgram)) {
1220 			error = EMSGSIZE;
1221 			goto out;
1222 		}
1223 		if ((f = m_gethdr(M_NOWAIT, MT_SONAME)) == NULL) {
1224 			error = ENOBUFS;
1225 			goto out;
1226 		}
1227 		/* Condition the foreign mbuf to our standards. */
1228 		m_clrprotoflags(m);
1229 		m_tag_delete_chain(m, NULL);
1230 		m->m_pkthdr.rcvif = NULL;
1231 		m->m_pkthdr.flowid = 0;
1232 		m->m_pkthdr.csum_flags = 0;
1233 		m->m_pkthdr.fibnum = 0;
1234 		m->m_pkthdr.rsstype = 0;
1235 
1236 		cc = m->m_pkthdr.len;
1237 		mbcnt = MSIZE;
1238 		for (struct mbuf *mb = m; mb != NULL; mb = mb->m_next) {
1239 			mbcnt += MSIZE;
1240 			if (mb->m_flags & M_EXT)
1241 				mbcnt += mb->m_ext.ext_size;
1242 		}
1243 	}
1244 
1245 	unp = sotounpcb(so);
1246 	MPASS(unp);
1247 
1248 	/*
1249 	 * XXXGL: would be cool to fully remove so_snd out of the equation
1250 	 * and avoid this lock, which is not only extraneous, but also being
1251 	 * released, thus still leaving possibility for a race.  We can easily
1252 	 * handle SBS_CANTSENDMORE/SS_ISCONNECTED complement in unpcb, but it
1253 	 * is more difficult to invent something to handle so_error.
1254 	 */
1255 	error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
1256 	if (error)
1257 		goto out2;
1258 	SOCK_SENDBUF_LOCK(so);
1259 	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1260 		SOCK_SENDBUF_UNLOCK(so);
1261 		error = EPIPE;
1262 		goto out3;
1263 	}
1264 	if (so->so_error != 0) {
1265 		error = so->so_error;
1266 		so->so_error = 0;
1267 		SOCK_SENDBUF_UNLOCK(so);
1268 		goto out3;
1269 	}
1270 	if (((so->so_state & SS_ISCONNECTED) == 0) && addr == NULL) {
1271 		SOCK_SENDBUF_UNLOCK(so);
1272 		error = EDESTADDRREQ;
1273 		goto out3;
1274 	}
1275 	SOCK_SENDBUF_UNLOCK(so);
1276 
1277 	if (addr != NULL) {
1278 		if ((error = unp_connectat(AT_FDCWD, so, addr, td, true)))
1279 			goto out3;
1280 		UNP_PCB_LOCK_ASSERT(unp);
1281 		unp2 = unp->unp_conn;
1282 		UNP_PCB_LOCK_ASSERT(unp2);
1283 	} else {
1284 		UNP_PCB_LOCK(unp);
1285 		unp2 = unp_pcb_lock_peer(unp);
1286 		if (unp2 == NULL) {
1287 			UNP_PCB_UNLOCK(unp);
1288 			error = ENOTCONN;
1289 			goto out3;
1290 		}
1291 	}
1292 
1293 	if (unp2->unp_flags & UNP_WANTCRED_MASK)
1294 		c = unp_addsockcred(td, c, unp2->unp_flags, &clast, &ctl,
1295 		    &mbcnt);
1296 	if (unp->unp_addr != NULL)
1297 		from = (struct sockaddr *)unp->unp_addr;
1298 	else
1299 		from = &sun_noname;
1300 	f->m_len = from->sa_len;
1301 	MPASS(from->sa_len <= MLEN);
1302 	bcopy(from, mtod(f, void *), from->sa_len);
1303 	ctl += f->m_len;
1304 
1305 	/*
1306 	 * Concatenate mbufs: from -> control -> data.
1307 	 * Save overall cc and mbcnt in "from" mbuf.
1308 	 */
1309 	if (c != NULL) {
1310 #ifdef INVARIANTS
1311 		struct mbuf *mc;
1312 
1313 		for (mc = c; mc->m_next != NULL; mc = mc->m_next);
1314 		MPASS(mc == clast);
1315 #endif
1316 		f->m_next = c;
1317 		clast->m_next = m;
1318 		c = NULL;
1319 	} else
1320 		f->m_next = m;
1321 	m = NULL;
1322 #ifdef INVARIANTS
1323 	dcc = dctl = dmbcnt = 0;
1324 	for (struct mbuf *mb = f; mb != NULL; mb = mb->m_next) {
1325 		if (mb->m_type == MT_DATA)
1326 			dcc += mb->m_len;
1327 		else
1328 			dctl += mb->m_len;
1329 		dmbcnt += MSIZE;
1330 		if (mb->m_flags & M_EXT)
1331 			dmbcnt += mb->m_ext.ext_size;
1332 	}
1333 	MPASS(dcc == cc);
1334 	MPASS(dctl == ctl);
1335 	MPASS(dmbcnt == mbcnt);
1336 #endif
1337 	f->m_pkthdr.len = cc + ctl;
1338 	f->m_pkthdr.memlen = mbcnt;
1339 	f->m_pkthdr.ctllen = ctl;
1340 
1341 	/*
1342 	 * Destination socket buffer selection.
1343 	 *
1344 	 * Unconnected sends, when !(so->so_state & SS_ISCONNECTED) and the
1345 	 * destination address is supplied, create a temporary connection for
1346 	 * the run time of the function (see call to unp_connectat() above and
1347 	 * to unp_disconnect() below).  We distinguish them by condition of
1348 	 * (addr != NULL).  We intentionally avoid adding 'bool connected' for
1349 	 * that condition, since, again, through the run time of this code we
1350 	 * are always connected.  For such "unconnected" sends, the destination
1351 	 * buffer would be the receive buffer of destination socket so2.
1352 	 *
1353 	 * For connected sends, data lands on the send buffer of the sender's
1354 	 * socket "so".  Then, if we just added the very first datagram
1355 	 * on this send buffer, we need to add the send buffer on to the
1356 	 * receiving socket's buffer list.  We put ourselves on top of the
1357 	 * list.  Such logic gives infrequent senders priority over frequent
1358 	 * senders.
1359 	 *
1360 	 * Note on byte count management. As long as event methods kevent(2),
1361 	 * select(2) are not protocol specific (yet), we need to maintain
1362 	 * meaningful values on the receive buffer.  So, the receive buffer
1363 	 * would accumulate counters from all connected buffers potentially
1364 	 * having sb_ccc > sb_hiwat or sb_mbcnt > sb_mbmax.
1365 	 */
1366 	so2 = unp2->unp_socket;
1367 	sb = (addr == NULL) ? &so->so_snd : &so2->so_rcv;
1368 	SOCK_RECVBUF_LOCK(so2);
1369 	if (uipc_dgram_sbspace(sb, cc + ctl, mbcnt)) {
1370 		if (addr == NULL && STAILQ_EMPTY(&sb->uxdg_mb))
1371 			TAILQ_INSERT_HEAD(&so2->so_rcv.uxdg_conns, &so->so_snd,
1372 			    uxdg_clist);
1373 		STAILQ_INSERT_TAIL(&sb->uxdg_mb, f, m_stailqpkt);
1374 		sb->uxdg_cc += cc + ctl;
1375 		sb->uxdg_ctl += ctl;
1376 		sb->uxdg_mbcnt += mbcnt;
1377 		so2->so_rcv.sb_acc += cc + ctl;
1378 		so2->so_rcv.sb_ccc += cc + ctl;
1379 		so2->so_rcv.sb_ctl += ctl;
1380 		so2->so_rcv.sb_mbcnt += mbcnt;
1381 		sorwakeup_locked(so2);
1382 		f = NULL;
1383 	} else {
1384 		soroverflow_locked(so2);
1385 		error = (so->so_state & SS_NBIO) ? EAGAIN : ENOBUFS;
1386 	}
1387 
1388 	if (addr != NULL)
1389 		unp_disconnect(unp, unp2);
1390 	else
1391 		unp_pcb_unlock_pair(unp, unp2);
1392 
1393 	td->td_ru.ru_msgsnd++;
1394 
1395 out3:
1396 	SOCK_IO_SEND_UNLOCK(so);
1397 out2:
1398 	if (c)
1399 		unp_scan(c, unp_freerights);
1400 out:
1401 	if (f)
1402 		m_freem(f);
1403 	if (c)
1404 		m_freem(c);
1405 	if (m)
1406 		m_freem(m);
1407 
1408 	return (error);
1409 }
1410 
1411 /*
1412  * PF_UNIX/SOCK_DGRAM receive with MSG_PEEK.
1413  * The mbuf has already been unlinked from the uxdg_mb of socket buffer
1414  * and needs to be linked onto uxdg_peeked of receive socket buffer.
1415  */
1416 static int
1417 uipc_peek_dgram(struct socket *so, struct mbuf *m, struct sockaddr **psa,
1418     struct uio *uio, struct mbuf **controlp, int *flagsp)
1419 {
1420 	ssize_t len;
1421 	int error;
1422 
1423 	so->so_rcv.uxdg_peeked = m;
1424 	so->so_rcv.uxdg_cc += m->m_pkthdr.len;
1425 	so->so_rcv.uxdg_ctl += m->m_pkthdr.ctllen;
1426 	so->so_rcv.uxdg_mbcnt += m->m_pkthdr.memlen;
1427 	SOCK_RECVBUF_UNLOCK(so);
1428 
1429 	KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type));
1430 	if (psa != NULL)
1431 		*psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK);
1432 
1433 	m = m->m_next;
1434 	KASSERT(m, ("%s: no data or control after soname", __func__));
1435 
1436 	/*
1437 	 * With MSG_PEEK the control isn't executed, just copied.
1438 	 */
1439 	while (m != NULL && m->m_type == MT_CONTROL) {
1440 		if (controlp != NULL) {
1441 			*controlp = m_copym(m, 0, m->m_len, M_WAITOK);
1442 			controlp = &(*controlp)->m_next;
1443 		}
1444 		m = m->m_next;
1445 	}
1446 	KASSERT(m == NULL || m->m_type == MT_DATA,
1447 	    ("%s: not MT_DATA mbuf %p", __func__, m));
1448 	while (m != NULL && uio->uio_resid > 0) {
1449 		len = uio->uio_resid;
1450 		if (len > m->m_len)
1451 			len = m->m_len;
1452 		error = uiomove(mtod(m, char *), (int)len, uio);
1453 		if (error) {
1454 			SOCK_IO_RECV_UNLOCK(so);
1455 			return (error);
1456 		}
1457 		if (len == m->m_len)
1458 			m = m->m_next;
1459 	}
1460 	SOCK_IO_RECV_UNLOCK(so);
1461 
1462 	if (m != NULL && flagsp != NULL)
1463 		*flagsp |= MSG_TRUNC;
1464 
1465 	return (0);
1466 }
1467 
1468 /*
1469  * PF_UNIX/SOCK_DGRAM receive
1470  */
1471 static int
1472 uipc_soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
1473     struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1474 {
1475 	struct sockbuf *sb = NULL;
1476 	struct mbuf *m;
1477 	int flags, error;
1478 	ssize_t len;
1479 	bool nonblock;
1480 
1481 	MPASS(mp0 == NULL);
1482 
1483 	if (psa != NULL)
1484 		*psa = NULL;
1485 	if (controlp != NULL)
1486 		*controlp = NULL;
1487 
1488 	flags = flagsp != NULL ? *flagsp : 0;
1489 	nonblock = (so->so_state & SS_NBIO) ||
1490 	    (flags & (MSG_DONTWAIT | MSG_NBIO));
1491 
1492 	error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
1493 	if (__predict_false(error))
1494 		return (error);
1495 
1496 	/*
1497 	 * Loop blocking while waiting for a datagram.  Prioritize connected
1498 	 * peers over unconnected sends.  Set sb to selected socket buffer
1499 	 * containing an mbuf on exit from the wait loop.  A datagram that
1500 	 * had already been peeked at has top priority.
1501 	 */
1502 	SOCK_RECVBUF_LOCK(so);
1503 	while ((m = so->so_rcv.uxdg_peeked) == NULL &&
1504 	    (sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) == NULL &&
1505 	    (m = STAILQ_FIRST(&so->so_rcv.uxdg_mb)) == NULL) {
1506 		if (so->so_error) {
1507 			error = so->so_error;
1508 			so->so_error = 0;
1509 			SOCK_RECVBUF_UNLOCK(so);
1510 			SOCK_IO_RECV_UNLOCK(so);
1511 			return (error);
1512 		}
1513 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
1514 		    uio->uio_resid == 0) {
1515 			SOCK_RECVBUF_UNLOCK(so);
1516 			SOCK_IO_RECV_UNLOCK(so);
1517 			return (0);
1518 		}
1519 		if (nonblock) {
1520 			SOCK_RECVBUF_UNLOCK(so);
1521 			SOCK_IO_RECV_UNLOCK(so);
1522 			return (EWOULDBLOCK);
1523 		}
1524 		error = sbwait(so, SO_RCV);
1525 		if (error) {
1526 			SOCK_RECVBUF_UNLOCK(so);
1527 			SOCK_IO_RECV_UNLOCK(so);
1528 			return (error);
1529 		}
1530 	}
1531 
1532 	if (sb == NULL)
1533 		sb = &so->so_rcv;
1534 	else if (m == NULL)
1535 		m = STAILQ_FIRST(&sb->uxdg_mb);
1536 	else
1537 		MPASS(m == so->so_rcv.uxdg_peeked);
1538 
1539 	MPASS(sb->uxdg_cc > 0);
1540 	M_ASSERTPKTHDR(m);
1541 	KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type));
1542 
1543 	if (uio->uio_td)
1544 		uio->uio_td->td_ru.ru_msgrcv++;
1545 
1546 	if (__predict_true(m != so->so_rcv.uxdg_peeked)) {
1547 		STAILQ_REMOVE_HEAD(&sb->uxdg_mb, m_stailqpkt);
1548 		if (STAILQ_EMPTY(&sb->uxdg_mb) && sb != &so->so_rcv)
1549 			TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist);
1550 	} else
1551 		so->so_rcv.uxdg_peeked = NULL;
1552 
1553 	sb->uxdg_cc -= m->m_pkthdr.len;
1554 	sb->uxdg_ctl -= m->m_pkthdr.ctllen;
1555 	sb->uxdg_mbcnt -= m->m_pkthdr.memlen;
1556 
1557 	if (__predict_false(flags & MSG_PEEK))
1558 		return (uipc_peek_dgram(so, m, psa, uio, controlp, flagsp));
1559 
1560 	so->so_rcv.sb_acc -= m->m_pkthdr.len;
1561 	so->so_rcv.sb_ccc -= m->m_pkthdr.len;
1562 	so->so_rcv.sb_ctl -= m->m_pkthdr.ctllen;
1563 	so->so_rcv.sb_mbcnt -= m->m_pkthdr.memlen;
1564 	SOCK_RECVBUF_UNLOCK(so);
1565 
1566 	if (psa != NULL)
1567 		*psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK);
1568 	m = m_free(m);
1569 	KASSERT(m, ("%s: no data or control after soname", __func__));
1570 
1571 	/*
1572 	 * Packet to copyout() is now in 'm' and it is disconnected from the
1573 	 * queue.
1574 	 *
1575 	 * Process one or more MT_CONTROL mbufs present before any data mbufs
1576 	 * in the first mbuf chain on the socket buffer.  We call into the
1577 	 * unp_externalize() to perform externalization (or freeing if
1578 	 * controlp == NULL). In some cases there can be only MT_CONTROL mbufs
1579 	 * without MT_DATA mbufs.
1580 	 */
1581 	while (m != NULL && m->m_type == MT_CONTROL) {
1582 		struct mbuf *cm;
1583 
1584 		/* XXXGL: unp_externalize() is also dom_externalize() KBI and
1585 		 * it frees whole chain, so we must disconnect the mbuf.
1586 		 */
1587 		cm = m; m = m->m_next; cm->m_next = NULL;
1588 		error = unp_externalize(cm, controlp, flags);
1589 		if (error != 0) {
1590 			SOCK_IO_RECV_UNLOCK(so);
1591 			unp_scan(m, unp_freerights);
1592 			m_freem(m);
1593 			return (error);
1594 		}
1595 		if (controlp != NULL) {
1596 			while (*controlp != NULL)
1597 				controlp = &(*controlp)->m_next;
1598 		}
1599 	}
1600 	KASSERT(m == NULL || m->m_type == MT_DATA,
1601 	    ("%s: not MT_DATA mbuf %p", __func__, m));
1602 	while (m != NULL && uio->uio_resid > 0) {
1603 		len = uio->uio_resid;
1604 		if (len > m->m_len)
1605 			len = m->m_len;
1606 		error = uiomove(mtod(m, char *), (int)len, uio);
1607 		if (error) {
1608 			SOCK_IO_RECV_UNLOCK(so);
1609 			m_freem(m);
1610 			return (error);
1611 		}
1612 		if (len == m->m_len)
1613 			m = m_free(m);
1614 		else {
1615 			m->m_data += len;
1616 			m->m_len -= len;
1617 		}
1618 	}
1619 	SOCK_IO_RECV_UNLOCK(so);
1620 
1621 	if (m != NULL) {
1622 		flags |= MSG_TRUNC;
1623 		m_freem(m);
1624 	}
1625 	if (flagsp != NULL)
1626 		*flagsp |= flags;
1627 
1628 	return (0);
1629 }
1630 
1631 static bool
1632 uipc_ready_scan(struct socket *so, struct mbuf *m, int count, int *errorp)
1633 {
1634 	struct mbuf *mb, *n;
1635 	struct sockbuf *sb;
1636 
1637 	SOCK_LOCK(so);
1638 	if (SOLISTENING(so)) {
1639 		SOCK_UNLOCK(so);
1640 		return (false);
1641 	}
1642 	mb = NULL;
1643 	sb = &so->so_rcv;
1644 	SOCKBUF_LOCK(sb);
1645 	if (sb->sb_fnrdy != NULL) {
1646 		for (mb = sb->sb_mb, n = mb->m_nextpkt; mb != NULL;) {
1647 			if (mb == m) {
1648 				*errorp = sbready(sb, m, count);
1649 				break;
1650 			}
1651 			mb = mb->m_next;
1652 			if (mb == NULL) {
1653 				mb = n;
1654 				if (mb != NULL)
1655 					n = mb->m_nextpkt;
1656 			}
1657 		}
1658 	}
1659 	SOCKBUF_UNLOCK(sb);
1660 	SOCK_UNLOCK(so);
1661 	return (mb != NULL);
1662 }
1663 
1664 static int
1665 uipc_ready(struct socket *so, struct mbuf *m, int count)
1666 {
1667 	struct unpcb *unp, *unp2;
1668 	struct socket *so2;
1669 	int error, i;
1670 
1671 	unp = sotounpcb(so);
1672 
1673 	KASSERT(so->so_type == SOCK_STREAM,
1674 	    ("%s: unexpected socket type for %p", __func__, so));
1675 
1676 	UNP_PCB_LOCK(unp);
1677 	if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) {
1678 		UNP_PCB_UNLOCK(unp);
1679 		so2 = unp2->unp_socket;
1680 		SOCKBUF_LOCK(&so2->so_rcv);
1681 		if ((error = sbready(&so2->so_rcv, m, count)) == 0)
1682 			sorwakeup_locked(so2);
1683 		else
1684 			SOCKBUF_UNLOCK(&so2->so_rcv);
1685 		UNP_PCB_UNLOCK(unp2);
1686 		return (error);
1687 	}
1688 	UNP_PCB_UNLOCK(unp);
1689 
1690 	/*
1691 	 * The receiving socket has been disconnected, but may still be valid.
1692 	 * In this case, the now-ready mbufs are still present in its socket
1693 	 * buffer, so perform an exhaustive search before giving up and freeing
1694 	 * the mbufs.
1695 	 */
1696 	UNP_LINK_RLOCK();
1697 	LIST_FOREACH(unp, &unp_shead, unp_link) {
1698 		if (uipc_ready_scan(unp->unp_socket, m, count, &error))
1699 			break;
1700 	}
1701 	UNP_LINK_RUNLOCK();
1702 
1703 	if (unp == NULL) {
1704 		for (i = 0; i < count; i++)
1705 			m = m_free(m);
1706 		error = ECONNRESET;
1707 	}
1708 	return (error);
1709 }
1710 
1711 static int
1712 uipc_sense(struct socket *so, struct stat *sb)
1713 {
1714 	struct unpcb *unp;
1715 
1716 	unp = sotounpcb(so);
1717 	KASSERT(unp != NULL, ("uipc_sense: unp == NULL"));
1718 
1719 	sb->st_blksize = so->so_snd.sb_hiwat;
1720 	sb->st_dev = NODEV;
1721 	sb->st_ino = unp->unp_ino;
1722 	return (0);
1723 }
1724 
1725 static int
1726 uipc_shutdown(struct socket *so)
1727 {
1728 	struct unpcb *unp;
1729 
1730 	unp = sotounpcb(so);
1731 	KASSERT(unp != NULL, ("uipc_shutdown: unp == NULL"));
1732 
1733 	UNP_PCB_LOCK(unp);
1734 	socantsendmore(so);
1735 	unp_shutdown(unp);
1736 	UNP_PCB_UNLOCK(unp);
1737 	return (0);
1738 }
1739 
1740 static int
1741 uipc_sockaddr(struct socket *so, struct sockaddr **nam)
1742 {
1743 	struct unpcb *unp;
1744 	const struct sockaddr *sa;
1745 
1746 	unp = sotounpcb(so);
1747 	KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL"));
1748 
1749 	*nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
1750 	UNP_PCB_LOCK(unp);
1751 	if (unp->unp_addr != NULL)
1752 		sa = (struct sockaddr *) unp->unp_addr;
1753 	else
1754 		sa = &sun_noname;
1755 	bcopy(sa, *nam, sa->sa_len);
1756 	UNP_PCB_UNLOCK(unp);
1757 	return (0);
1758 }
1759 
1760 static struct pr_usrreqs uipc_usrreqs_dgram = {
1761 	.pru_abort = 		uipc_abort,
1762 	.pru_accept =		uipc_accept,
1763 	.pru_attach =		uipc_attach,
1764 	.pru_bind =		uipc_bind,
1765 	.pru_bindat =		uipc_bindat,
1766 	.pru_connect =		uipc_connect,
1767 	.pru_connectat =	uipc_connectat,
1768 	.pru_connect2 =		uipc_connect2,
1769 	.pru_detach =		uipc_detach,
1770 	.pru_disconnect =	uipc_disconnect,
1771 	.pru_peeraddr =		uipc_peeraddr,
1772 	.pru_sosend =		uipc_sosend_dgram,
1773 	.pru_sense =		uipc_sense,
1774 	.pru_shutdown =		uipc_shutdown,
1775 	.pru_sockaddr =		uipc_sockaddr,
1776 	.pru_soreceive =	uipc_soreceive_dgram,
1777 	.pru_close =		uipc_close,
1778 };
1779 
1780 static struct pr_usrreqs uipc_usrreqs_seqpacket = {
1781 	.pru_abort =		uipc_abort,
1782 	.pru_accept =		uipc_accept,
1783 	.pru_attach =		uipc_attach,
1784 	.pru_bind =		uipc_bind,
1785 	.pru_bindat =		uipc_bindat,
1786 	.pru_connect =		uipc_connect,
1787 	.pru_connectat =	uipc_connectat,
1788 	.pru_connect2 =		uipc_connect2,
1789 	.pru_detach =		uipc_detach,
1790 	.pru_disconnect =	uipc_disconnect,
1791 	.pru_listen =		uipc_listen,
1792 	.pru_peeraddr =		uipc_peeraddr,
1793 	.pru_rcvd =		uipc_rcvd,
1794 	.pru_send =		uipc_send,
1795 	.pru_sense =		uipc_sense,
1796 	.pru_shutdown =		uipc_shutdown,
1797 	.pru_sockaddr =		uipc_sockaddr,
1798 	.pru_soreceive =	soreceive_generic,	/* XXX: or...? */
1799 	.pru_close =		uipc_close,
1800 };
1801 
1802 static struct pr_usrreqs uipc_usrreqs_stream = {
1803 	.pru_abort = 		uipc_abort,
1804 	.pru_accept =		uipc_accept,
1805 	.pru_attach =		uipc_attach,
1806 	.pru_bind =		uipc_bind,
1807 	.pru_bindat =		uipc_bindat,
1808 	.pru_connect =		uipc_connect,
1809 	.pru_connectat =	uipc_connectat,
1810 	.pru_connect2 =		uipc_connect2,
1811 	.pru_detach =		uipc_detach,
1812 	.pru_disconnect =	uipc_disconnect,
1813 	.pru_listen =		uipc_listen,
1814 	.pru_peeraddr =		uipc_peeraddr,
1815 	.pru_rcvd =		uipc_rcvd,
1816 	.pru_send =		uipc_send,
1817 	.pru_ready =		uipc_ready,
1818 	.pru_sense =		uipc_sense,
1819 	.pru_shutdown =		uipc_shutdown,
1820 	.pru_sockaddr =		uipc_sockaddr,
1821 	.pru_soreceive =	soreceive_generic,
1822 	.pru_close =		uipc_close,
1823 };
1824 
1825 static int
1826 uipc_ctloutput(struct socket *so, struct sockopt *sopt)
1827 {
1828 	struct unpcb *unp;
1829 	struct xucred xu;
1830 	int error, optval;
1831 
1832 	if (sopt->sopt_level != SOL_LOCAL)
1833 		return (EINVAL);
1834 
1835 	unp = sotounpcb(so);
1836 	KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL"));
1837 	error = 0;
1838 	switch (sopt->sopt_dir) {
1839 	case SOPT_GET:
1840 		switch (sopt->sopt_name) {
1841 		case LOCAL_PEERCRED:
1842 			UNP_PCB_LOCK(unp);
1843 			if (unp->unp_flags & UNP_HAVEPC)
1844 				xu = unp->unp_peercred;
1845 			else {
1846 				if (so->so_type == SOCK_STREAM)
1847 					error = ENOTCONN;
1848 				else
1849 					error = EINVAL;
1850 			}
1851 			UNP_PCB_UNLOCK(unp);
1852 			if (error == 0)
1853 				error = sooptcopyout(sopt, &xu, sizeof(xu));
1854 			break;
1855 
1856 		case LOCAL_CREDS:
1857 			/* Unlocked read. */
1858 			optval = unp->unp_flags & UNP_WANTCRED_ONESHOT ? 1 : 0;
1859 			error = sooptcopyout(sopt, &optval, sizeof(optval));
1860 			break;
1861 
1862 		case LOCAL_CREDS_PERSISTENT:
1863 			/* Unlocked read. */
1864 			optval = unp->unp_flags & UNP_WANTCRED_ALWAYS ? 1 : 0;
1865 			error = sooptcopyout(sopt, &optval, sizeof(optval));
1866 			break;
1867 
1868 		case LOCAL_CONNWAIT:
1869 			/* Unlocked read. */
1870 			optval = unp->unp_flags & UNP_CONNWAIT ? 1 : 0;
1871 			error = sooptcopyout(sopt, &optval, sizeof(optval));
1872 			break;
1873 
1874 		default:
1875 			error = EOPNOTSUPP;
1876 			break;
1877 		}
1878 		break;
1879 
1880 	case SOPT_SET:
1881 		switch (sopt->sopt_name) {
1882 		case LOCAL_CREDS:
1883 		case LOCAL_CREDS_PERSISTENT:
1884 		case LOCAL_CONNWAIT:
1885 			error = sooptcopyin(sopt, &optval, sizeof(optval),
1886 					    sizeof(optval));
1887 			if (error)
1888 				break;
1889 
1890 #define	OPTSET(bit, exclusive) do {					\
1891 	UNP_PCB_LOCK(unp);						\
1892 	if (optval) {							\
1893 		if ((unp->unp_flags & (exclusive)) != 0) {		\
1894 			UNP_PCB_UNLOCK(unp);				\
1895 			error = EINVAL;					\
1896 			break;						\
1897 		}							\
1898 		unp->unp_flags |= (bit);				\
1899 	} else								\
1900 		unp->unp_flags &= ~(bit);				\
1901 	UNP_PCB_UNLOCK(unp);						\
1902 } while (0)
1903 
1904 			switch (sopt->sopt_name) {
1905 			case LOCAL_CREDS:
1906 				OPTSET(UNP_WANTCRED_ONESHOT, UNP_WANTCRED_ALWAYS);
1907 				break;
1908 
1909 			case LOCAL_CREDS_PERSISTENT:
1910 				OPTSET(UNP_WANTCRED_ALWAYS, UNP_WANTCRED_ONESHOT);
1911 				break;
1912 
1913 			case LOCAL_CONNWAIT:
1914 				OPTSET(UNP_CONNWAIT, 0);
1915 				break;
1916 
1917 			default:
1918 				break;
1919 			}
1920 			break;
1921 #undef	OPTSET
1922 		default:
1923 			error = ENOPROTOOPT;
1924 			break;
1925 		}
1926 		break;
1927 
1928 	default:
1929 		error = EOPNOTSUPP;
1930 		break;
1931 	}
1932 	return (error);
1933 }
1934 
1935 static int
1936 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1937 {
1938 
1939 	return (unp_connectat(AT_FDCWD, so, nam, td, false));
1940 }
1941 
1942 static int
1943 unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
1944     struct thread *td, bool return_locked)
1945 {
1946 	struct mtx *vplock;
1947 	struct sockaddr_un *soun;
1948 	struct vnode *vp;
1949 	struct socket *so2;
1950 	struct unpcb *unp, *unp2, *unp3;
1951 	struct nameidata nd;
1952 	char buf[SOCK_MAXADDRLEN];
1953 	struct sockaddr *sa;
1954 	cap_rights_t rights;
1955 	int error, len;
1956 	bool connreq;
1957 
1958 	if (nam->sa_family != AF_UNIX)
1959 		return (EAFNOSUPPORT);
1960 	if (nam->sa_len > sizeof(struct sockaddr_un))
1961 		return (EINVAL);
1962 	len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
1963 	if (len <= 0)
1964 		return (EINVAL);
1965 	soun = (struct sockaddr_un *)nam;
1966 	bcopy(soun->sun_path, buf, len);
1967 	buf[len] = 0;
1968 
1969 	error = 0;
1970 	unp = sotounpcb(so);
1971 	UNP_PCB_LOCK(unp);
1972 	for (;;) {
1973 		/*
1974 		 * Wait for connection state to stabilize.  If a connection
1975 		 * already exists, give up.  For datagram sockets, which permit
1976 		 * multiple consecutive connect(2) calls, upper layers are
1977 		 * responsible for disconnecting in advance of a subsequent
1978 		 * connect(2), but this is not synchronized with PCB connection
1979 		 * state.
1980 		 *
1981 		 * Also make sure that no threads are currently attempting to
1982 		 * lock the peer socket, to ensure that unp_conn cannot
1983 		 * transition between two valid sockets while locks are dropped.
1984 		 */
1985 		if (SOLISTENING(so))
1986 			error = EOPNOTSUPP;
1987 		else if (unp->unp_conn != NULL)
1988 			error = EISCONN;
1989 		else if ((unp->unp_flags & UNP_CONNECTING) != 0) {
1990 			error = EALREADY;
1991 		}
1992 		if (error != 0) {
1993 			UNP_PCB_UNLOCK(unp);
1994 			return (error);
1995 		}
1996 		if (unp->unp_pairbusy > 0) {
1997 			unp->unp_flags |= UNP_WAITING;
1998 			mtx_sleep(unp, UNP_PCB_LOCKPTR(unp), 0, "unpeer", 0);
1999 			continue;
2000 		}
2001 		break;
2002 	}
2003 	unp->unp_flags |= UNP_CONNECTING;
2004 	UNP_PCB_UNLOCK(unp);
2005 
2006 	connreq = (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0;
2007 	if (connreq)
2008 		sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
2009 	else
2010 		sa = NULL;
2011 	NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF,
2012 	    UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_CONNECTAT));
2013 	error = namei(&nd);
2014 	if (error)
2015 		vp = NULL;
2016 	else
2017 		vp = nd.ni_vp;
2018 	ASSERT_VOP_LOCKED(vp, "unp_connect");
2019 	NDFREE_NOTHING(&nd);
2020 	if (error)
2021 		goto bad;
2022 
2023 	if (vp->v_type != VSOCK) {
2024 		error = ENOTSOCK;
2025 		goto bad;
2026 	}
2027 #ifdef MAC
2028 	error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD);
2029 	if (error)
2030 		goto bad;
2031 #endif
2032 	error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td);
2033 	if (error)
2034 		goto bad;
2035 
2036 	unp = sotounpcb(so);
2037 	KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
2038 
2039 	vplock = mtx_pool_find(mtxpool_sleep, vp);
2040 	mtx_lock(vplock);
2041 	VOP_UNP_CONNECT(vp, &unp2);
2042 	if (unp2 == NULL) {
2043 		error = ECONNREFUSED;
2044 		goto bad2;
2045 	}
2046 	so2 = unp2->unp_socket;
2047 	if (so->so_type != so2->so_type) {
2048 		error = EPROTOTYPE;
2049 		goto bad2;
2050 	}
2051 	if (connreq) {
2052 		if (SOLISTENING(so2)) {
2053 			CURVNET_SET(so2->so_vnet);
2054 			so2 = sonewconn(so2, 0);
2055 			CURVNET_RESTORE();
2056 		} else
2057 			so2 = NULL;
2058 		if (so2 == NULL) {
2059 			error = ECONNREFUSED;
2060 			goto bad2;
2061 		}
2062 		unp3 = sotounpcb(so2);
2063 		unp_pcb_lock_pair(unp2, unp3);
2064 		if (unp2->unp_addr != NULL) {
2065 			bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len);
2066 			unp3->unp_addr = (struct sockaddr_un *) sa;
2067 			sa = NULL;
2068 		}
2069 
2070 		unp_copy_peercred(td, unp3, unp, unp2);
2071 
2072 		UNP_PCB_UNLOCK(unp2);
2073 		unp2 = unp3;
2074 
2075 		/*
2076 		 * It is safe to block on the PCB lock here since unp2 is
2077 		 * nascent and cannot be connected to any other sockets.
2078 		 */
2079 		UNP_PCB_LOCK(unp);
2080 #ifdef MAC
2081 		mac_socketpeer_set_from_socket(so, so2);
2082 		mac_socketpeer_set_from_socket(so2, so);
2083 #endif
2084 	} else {
2085 		unp_pcb_lock_pair(unp, unp2);
2086 	}
2087 	KASSERT(unp2 != NULL && so2 != NULL && unp2->unp_socket == so2 &&
2088 	    sotounpcb(so2) == unp2,
2089 	    ("%s: unp2 %p so2 %p", __func__, unp2, so2));
2090 	unp_connect2(so, so2, PRU_CONNECT);
2091 	KASSERT((unp->unp_flags & UNP_CONNECTING) != 0,
2092 	    ("%s: unp %p has UNP_CONNECTING clear", __func__, unp));
2093 	unp->unp_flags &= ~UNP_CONNECTING;
2094 	if (!return_locked)
2095 		unp_pcb_unlock_pair(unp, unp2);
2096 bad2:
2097 	mtx_unlock(vplock);
2098 bad:
2099 	if (vp != NULL) {
2100 		/*
2101 		 * If we are returning locked (called via uipc_sosend_dgram()),
2102 		 * we need to be sure that vput() won't sleep.  This is
2103 		 * guaranteed by VOP_UNP_CONNECT() call above and unp2 lock.
2104 		 * SOCK_STREAM/SEQPACKET can't request return_locked (yet).
2105 		 */
2106 		MPASS(!(return_locked && connreq));
2107 		vput(vp);
2108 	}
2109 	free(sa, M_SONAME);
2110 	if (__predict_false(error)) {
2111 		UNP_PCB_LOCK(unp);
2112 		KASSERT((unp->unp_flags & UNP_CONNECTING) != 0,
2113 		    ("%s: unp %p has UNP_CONNECTING clear", __func__, unp));
2114 		unp->unp_flags &= ~UNP_CONNECTING;
2115 		UNP_PCB_UNLOCK(unp);
2116 	}
2117 	return (error);
2118 }
2119 
2120 /*
2121  * Set socket peer credentials at connection time.
2122  *
2123  * The client's PCB credentials are copied from its process structure.  The
2124  * server's PCB credentials are copied from the socket on which it called
2125  * listen(2).  uipc_listen cached that process's credentials at the time.
2126  */
2127 void
2128 unp_copy_peercred(struct thread *td, struct unpcb *client_unp,
2129     struct unpcb *server_unp, struct unpcb *listen_unp)
2130 {
2131 	cru2xt(td, &client_unp->unp_peercred);
2132 	client_unp->unp_flags |= UNP_HAVEPC;
2133 
2134 	memcpy(&server_unp->unp_peercred, &listen_unp->unp_peercred,
2135 	    sizeof(server_unp->unp_peercred));
2136 	server_unp->unp_flags |= UNP_HAVEPC;
2137 	client_unp->unp_flags |= (listen_unp->unp_flags & UNP_WANTCRED_MASK);
2138 }
2139 
2140 static void
2141 unp_connect2(struct socket *so, struct socket *so2, int req)
2142 {
2143 	struct unpcb *unp;
2144 	struct unpcb *unp2;
2145 
2146 	MPASS(so2->so_type == so->so_type);
2147 	unp = sotounpcb(so);
2148 	KASSERT(unp != NULL, ("unp_connect2: unp == NULL"));
2149 	unp2 = sotounpcb(so2);
2150 	KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL"));
2151 
2152 	UNP_PCB_LOCK_ASSERT(unp);
2153 	UNP_PCB_LOCK_ASSERT(unp2);
2154 	KASSERT(unp->unp_conn == NULL,
2155 	    ("%s: socket %p is already connected", __func__, unp));
2156 
2157 	unp->unp_conn = unp2;
2158 	unp_pcb_hold(unp2);
2159 	unp_pcb_hold(unp);
2160 	switch (so->so_type) {
2161 	case SOCK_DGRAM:
2162 		UNP_REF_LIST_LOCK();
2163 		LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
2164 		UNP_REF_LIST_UNLOCK();
2165 		soisconnected(so);
2166 		break;
2167 
2168 	case SOCK_STREAM:
2169 	case SOCK_SEQPACKET:
2170 		KASSERT(unp2->unp_conn == NULL,
2171 		    ("%s: socket %p is already connected", __func__, unp2));
2172 		unp2->unp_conn = unp;
2173 		if (req == PRU_CONNECT &&
2174 		    ((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT))
2175 			soisconnecting(so);
2176 		else
2177 			soisconnected(so);
2178 		soisconnected(so2);
2179 		break;
2180 
2181 	default:
2182 		panic("unp_connect2");
2183 	}
2184 }
2185 
2186 static void
2187 unp_disconnect(struct unpcb *unp, struct unpcb *unp2)
2188 {
2189 	struct socket *so, *so2;
2190 	struct mbuf *m = NULL;
2191 #ifdef INVARIANTS
2192 	struct unpcb *unptmp;
2193 #endif
2194 
2195 	UNP_PCB_LOCK_ASSERT(unp);
2196 	UNP_PCB_LOCK_ASSERT(unp2);
2197 	KASSERT(unp->unp_conn == unp2,
2198 	    ("%s: unpcb %p is not connected to %p", __func__, unp, unp2));
2199 
2200 	unp->unp_conn = NULL;
2201 	so = unp->unp_socket;
2202 	so2 = unp2->unp_socket;
2203 	switch (unp->unp_socket->so_type) {
2204 	case SOCK_DGRAM:
2205 		/*
2206 		 * Remove our send socket buffer from the peer's receive buffer.
2207 		 * Move the data to the receive buffer only if it is empty.
2208 		 * This is a protection against a scenario where a peer
2209 		 * connects, floods and disconnects, effectively blocking
2210 		 * sendto() from unconnected sockets.
2211 		 */
2212 		SOCK_RECVBUF_LOCK(so2);
2213 		if (!STAILQ_EMPTY(&so->so_snd.uxdg_mb)) {
2214 			TAILQ_REMOVE(&so2->so_rcv.uxdg_conns, &so->so_snd,
2215 			    uxdg_clist);
2216 			if (__predict_true((so2->so_rcv.sb_state &
2217 			    SBS_CANTRCVMORE) == 0) &&
2218 			    STAILQ_EMPTY(&so2->so_rcv.uxdg_mb)) {
2219 				STAILQ_CONCAT(&so2->so_rcv.uxdg_mb,
2220 				    &so->so_snd.uxdg_mb);
2221 				so2->so_rcv.uxdg_cc += so->so_snd.uxdg_cc;
2222 				so2->so_rcv.uxdg_ctl += so->so_snd.uxdg_ctl;
2223 				so2->so_rcv.uxdg_mbcnt += so->so_snd.uxdg_mbcnt;
2224 			} else {
2225 				m = STAILQ_FIRST(&so->so_snd.uxdg_mb);
2226 				STAILQ_INIT(&so->so_snd.uxdg_mb);
2227 				so2->so_rcv.sb_acc -= so->so_snd.uxdg_cc;
2228 				so2->so_rcv.sb_ccc -= so->so_snd.uxdg_cc;
2229 				so2->so_rcv.sb_ctl -= so->so_snd.uxdg_ctl;
2230 				so2->so_rcv.sb_mbcnt -= so->so_snd.uxdg_mbcnt;
2231 			}
2232 			/* Note: so may reconnect. */
2233 			so->so_snd.uxdg_cc = 0;
2234 			so->so_snd.uxdg_ctl = 0;
2235 			so->so_snd.uxdg_mbcnt = 0;
2236 		}
2237 		SOCK_RECVBUF_UNLOCK(so2);
2238 		UNP_REF_LIST_LOCK();
2239 #ifdef INVARIANTS
2240 		LIST_FOREACH(unptmp, &unp2->unp_refs, unp_reflink) {
2241 			if (unptmp == unp)
2242 				break;
2243 		}
2244 		KASSERT(unptmp != NULL,
2245 		    ("%s: %p not found in reflist of %p", __func__, unp, unp2));
2246 #endif
2247 		LIST_REMOVE(unp, unp_reflink);
2248 		UNP_REF_LIST_UNLOCK();
2249 		if (so) {
2250 			SOCK_LOCK(so);
2251 			so->so_state &= ~SS_ISCONNECTED;
2252 			SOCK_UNLOCK(so);
2253 		}
2254 		break;
2255 
2256 	case SOCK_STREAM:
2257 	case SOCK_SEQPACKET:
2258 		if (so)
2259 			soisdisconnected(so);
2260 		MPASS(unp2->unp_conn == unp);
2261 		unp2->unp_conn = NULL;
2262 		if (so2)
2263 			soisdisconnected(so2);
2264 		break;
2265 	}
2266 
2267 	if (unp == unp2) {
2268 		unp_pcb_rele_notlast(unp);
2269 		if (!unp_pcb_rele(unp))
2270 			UNP_PCB_UNLOCK(unp);
2271 	} else {
2272 		if (!unp_pcb_rele(unp))
2273 			UNP_PCB_UNLOCK(unp);
2274 		if (!unp_pcb_rele(unp2))
2275 			UNP_PCB_UNLOCK(unp2);
2276 	}
2277 
2278 	if (m != NULL) {
2279 		unp_scan(m, unp_freerights);
2280 		m_freem(m);
2281 	}
2282 }
2283 
2284 /*
2285  * unp_pcblist() walks the global list of struct unpcb's to generate a
2286  * pointer list, bumping the refcount on each unpcb.  It then copies them out
2287  * sequentially, validating the generation number on each to see if it has
2288  * been detached.  All of this is necessary because copyout() may sleep on
2289  * disk I/O.
2290  */
2291 static int
2292 unp_pcblist(SYSCTL_HANDLER_ARGS)
2293 {
2294 	struct unpcb *unp, **unp_list;
2295 	unp_gen_t gencnt;
2296 	struct xunpgen *xug;
2297 	struct unp_head *head;
2298 	struct xunpcb *xu;
2299 	u_int i;
2300 	int error, n;
2301 
2302 	switch ((intptr_t)arg1) {
2303 	case SOCK_STREAM:
2304 		head = &unp_shead;
2305 		break;
2306 
2307 	case SOCK_DGRAM:
2308 		head = &unp_dhead;
2309 		break;
2310 
2311 	case SOCK_SEQPACKET:
2312 		head = &unp_sphead;
2313 		break;
2314 
2315 	default:
2316 		panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1);
2317 	}
2318 
2319 	/*
2320 	 * The process of preparing the PCB list is too time-consuming and
2321 	 * resource-intensive to repeat twice on every request.
2322 	 */
2323 	if (req->oldptr == NULL) {
2324 		n = unp_count;
2325 		req->oldidx = 2 * (sizeof *xug)
2326 			+ (n + n/8) * sizeof(struct xunpcb);
2327 		return (0);
2328 	}
2329 
2330 	if (req->newptr != NULL)
2331 		return (EPERM);
2332 
2333 	/*
2334 	 * OK, now we're committed to doing something.
2335 	 */
2336 	xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK | M_ZERO);
2337 	UNP_LINK_RLOCK();
2338 	gencnt = unp_gencnt;
2339 	n = unp_count;
2340 	UNP_LINK_RUNLOCK();
2341 
2342 	xug->xug_len = sizeof *xug;
2343 	xug->xug_count = n;
2344 	xug->xug_gen = gencnt;
2345 	xug->xug_sogen = so_gencnt;
2346 	error = SYSCTL_OUT(req, xug, sizeof *xug);
2347 	if (error) {
2348 		free(xug, M_TEMP);
2349 		return (error);
2350 	}
2351 
2352 	unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK);
2353 
2354 	UNP_LINK_RLOCK();
2355 	for (unp = LIST_FIRST(head), i = 0; unp && i < n;
2356 	     unp = LIST_NEXT(unp, unp_link)) {
2357 		UNP_PCB_LOCK(unp);
2358 		if (unp->unp_gencnt <= gencnt) {
2359 			if (cr_cansee(req->td->td_ucred,
2360 			    unp->unp_socket->so_cred)) {
2361 				UNP_PCB_UNLOCK(unp);
2362 				continue;
2363 			}
2364 			unp_list[i++] = unp;
2365 			unp_pcb_hold(unp);
2366 		}
2367 		UNP_PCB_UNLOCK(unp);
2368 	}
2369 	UNP_LINK_RUNLOCK();
2370 	n = i;			/* In case we lost some during malloc. */
2371 
2372 	error = 0;
2373 	xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO);
2374 	for (i = 0; i < n; i++) {
2375 		unp = unp_list[i];
2376 		UNP_PCB_LOCK(unp);
2377 		if (unp_pcb_rele(unp))
2378 			continue;
2379 
2380 		if (unp->unp_gencnt <= gencnt) {
2381 			xu->xu_len = sizeof *xu;
2382 			xu->xu_unpp = (uintptr_t)unp;
2383 			/*
2384 			 * XXX - need more locking here to protect against
2385 			 * connect/disconnect races for SMP.
2386 			 */
2387 			if (unp->unp_addr != NULL)
2388 				bcopy(unp->unp_addr, &xu->xu_addr,
2389 				      unp->unp_addr->sun_len);
2390 			else
2391 				bzero(&xu->xu_addr, sizeof(xu->xu_addr));
2392 			if (unp->unp_conn != NULL &&
2393 			    unp->unp_conn->unp_addr != NULL)
2394 				bcopy(unp->unp_conn->unp_addr,
2395 				      &xu->xu_caddr,
2396 				      unp->unp_conn->unp_addr->sun_len);
2397 			else
2398 				bzero(&xu->xu_caddr, sizeof(xu->xu_caddr));
2399 			xu->unp_vnode = (uintptr_t)unp->unp_vnode;
2400 			xu->unp_conn = (uintptr_t)unp->unp_conn;
2401 			xu->xu_firstref = (uintptr_t)LIST_FIRST(&unp->unp_refs);
2402 			xu->xu_nextref = (uintptr_t)LIST_NEXT(unp, unp_reflink);
2403 			xu->unp_gencnt = unp->unp_gencnt;
2404 			sotoxsocket(unp->unp_socket, &xu->xu_socket);
2405 			UNP_PCB_UNLOCK(unp);
2406 			error = SYSCTL_OUT(req, xu, sizeof *xu);
2407 		} else {
2408 			UNP_PCB_UNLOCK(unp);
2409 		}
2410 	}
2411 	free(xu, M_TEMP);
2412 	if (!error) {
2413 		/*
2414 		 * Give the user an updated idea of our state.  If the
2415 		 * generation differs from what we told her before, she knows
2416 		 * that something happened while we were processing this
2417 		 * request, and it might be necessary to retry.
2418 		 */
2419 		xug->xug_gen = unp_gencnt;
2420 		xug->xug_sogen = so_gencnt;
2421 		xug->xug_count = unp_count;
2422 		error = SYSCTL_OUT(req, xug, sizeof *xug);
2423 	}
2424 	free(unp_list, M_TEMP);
2425 	free(xug, M_TEMP);
2426 	return (error);
2427 }
2428 
2429 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist,
2430     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2431     (void *)(intptr_t)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
2432     "List of active local datagram sockets");
2433 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist,
2434     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2435     (void *)(intptr_t)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
2436     "List of active local stream sockets");
2437 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist,
2438     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2439     (void *)(intptr_t)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb",
2440     "List of active local seqpacket sockets");
2441 
2442 static void
2443 unp_shutdown(struct unpcb *unp)
2444 {
2445 	struct unpcb *unp2;
2446 	struct socket *so;
2447 
2448 	UNP_PCB_LOCK_ASSERT(unp);
2449 
2450 	unp2 = unp->unp_conn;
2451 	if ((unp->unp_socket->so_type == SOCK_STREAM ||
2452 	    (unp->unp_socket->so_type == SOCK_SEQPACKET)) && unp2 != NULL) {
2453 		so = unp2->unp_socket;
2454 		if (so != NULL)
2455 			socantrcvmore(so);
2456 	}
2457 }
2458 
2459 static void
2460 unp_drop(struct unpcb *unp)
2461 {
2462 	struct socket *so;
2463 	struct unpcb *unp2;
2464 
2465 	/*
2466 	 * Regardless of whether the socket's peer dropped the connection
2467 	 * with this socket by aborting or disconnecting, POSIX requires
2468 	 * that ECONNRESET is returned.
2469 	 */
2470 
2471 	UNP_PCB_LOCK(unp);
2472 	so = unp->unp_socket;
2473 	if (so)
2474 		so->so_error = ECONNRESET;
2475 	if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) {
2476 		/* Last reference dropped in unp_disconnect(). */
2477 		unp_pcb_rele_notlast(unp);
2478 		unp_disconnect(unp, unp2);
2479 	} else if (!unp_pcb_rele(unp)) {
2480 		UNP_PCB_UNLOCK(unp);
2481 	}
2482 }
2483 
2484 static void
2485 unp_freerights(struct filedescent **fdep, int fdcount)
2486 {
2487 	struct file *fp;
2488 	int i;
2489 
2490 	KASSERT(fdcount > 0, ("%s: fdcount %d", __func__, fdcount));
2491 
2492 	for (i = 0; i < fdcount; i++) {
2493 		fp = fdep[i]->fde_file;
2494 		filecaps_free(&fdep[i]->fde_caps);
2495 		unp_discard(fp);
2496 	}
2497 	free(fdep[0], M_FILECAPS);
2498 }
2499 
2500 static int
2501 unp_externalize(struct mbuf *control, struct mbuf **controlp, int flags)
2502 {
2503 	struct thread *td = curthread;		/* XXX */
2504 	struct cmsghdr *cm = mtod(control, struct cmsghdr *);
2505 	int i;
2506 	int *fdp;
2507 	struct filedesc *fdesc = td->td_proc->p_fd;
2508 	struct filedescent **fdep;
2509 	void *data;
2510 	socklen_t clen = control->m_len, datalen;
2511 	int error, newfds;
2512 	u_int newlen;
2513 
2514 	UNP_LINK_UNLOCK_ASSERT();
2515 
2516 	error = 0;
2517 	if (controlp != NULL) /* controlp == NULL => free control messages */
2518 		*controlp = NULL;
2519 	while (cm != NULL) {
2520 		MPASS(clen >= sizeof(*cm) && clen >= cm->cmsg_len);
2521 
2522 		data = CMSG_DATA(cm);
2523 		datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data;
2524 		if (cm->cmsg_level == SOL_SOCKET
2525 		    && cm->cmsg_type == SCM_RIGHTS) {
2526 			newfds = datalen / sizeof(*fdep);
2527 			if (newfds == 0)
2528 				goto next;
2529 			fdep = data;
2530 
2531 			/* If we're not outputting the descriptors free them. */
2532 			if (error || controlp == NULL) {
2533 				unp_freerights(fdep, newfds);
2534 				goto next;
2535 			}
2536 			FILEDESC_XLOCK(fdesc);
2537 
2538 			/*
2539 			 * Now change each pointer to an fd in the global
2540 			 * table to an integer that is the index to the local
2541 			 * fd table entry that we set up to point to the
2542 			 * global one we are transferring.
2543 			 */
2544 			newlen = newfds * sizeof(int);
2545 			*controlp = sbcreatecontrol(NULL, newlen,
2546 			    SCM_RIGHTS, SOL_SOCKET, M_WAITOK);
2547 
2548 			fdp = (int *)
2549 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2550 			if (fdallocn(td, 0, fdp, newfds) != 0) {
2551 				FILEDESC_XUNLOCK(fdesc);
2552 				error = EMSGSIZE;
2553 				unp_freerights(fdep, newfds);
2554 				m_freem(*controlp);
2555 				*controlp = NULL;
2556 				goto next;
2557 			}
2558 			for (i = 0; i < newfds; i++, fdp++) {
2559 				_finstall(fdesc, fdep[i]->fde_file, *fdp,
2560 				    (flags & MSG_CMSG_CLOEXEC) != 0 ? O_CLOEXEC : 0,
2561 				    &fdep[i]->fde_caps);
2562 				unp_externalize_fp(fdep[i]->fde_file);
2563 			}
2564 
2565 			/*
2566 			 * The new type indicates that the mbuf data refers to
2567 			 * kernel resources that may need to be released before
2568 			 * the mbuf is freed.
2569 			 */
2570 			m_chtype(*controlp, MT_EXTCONTROL);
2571 			FILEDESC_XUNLOCK(fdesc);
2572 			free(fdep[0], M_FILECAPS);
2573 		} else {
2574 			/* We can just copy anything else across. */
2575 			if (error || controlp == NULL)
2576 				goto next;
2577 			*controlp = sbcreatecontrol(NULL, datalen,
2578 			    cm->cmsg_type, cm->cmsg_level, M_WAITOK);
2579 			bcopy(data,
2580 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *)),
2581 			    datalen);
2582 		}
2583 		controlp = &(*controlp)->m_next;
2584 
2585 next:
2586 		if (CMSG_SPACE(datalen) < clen) {
2587 			clen -= CMSG_SPACE(datalen);
2588 			cm = (struct cmsghdr *)
2589 			    ((caddr_t)cm + CMSG_SPACE(datalen));
2590 		} else {
2591 			clen = 0;
2592 			cm = NULL;
2593 		}
2594 	}
2595 
2596 	m_freem(control);
2597 	return (error);
2598 }
2599 
2600 static void
2601 unp_zone_change(void *tag)
2602 {
2603 
2604 	uma_zone_set_max(unp_zone, maxsockets);
2605 }
2606 
2607 #ifdef INVARIANTS
2608 static void
2609 unp_zdtor(void *mem, int size __unused, void *arg __unused)
2610 {
2611 	struct unpcb *unp;
2612 
2613 	unp = mem;
2614 
2615 	KASSERT(LIST_EMPTY(&unp->unp_refs),
2616 	    ("%s: unpcb %p has lingering refs", __func__, unp));
2617 	KASSERT(unp->unp_socket == NULL,
2618 	    ("%s: unpcb %p has socket backpointer", __func__, unp));
2619 	KASSERT(unp->unp_vnode == NULL,
2620 	    ("%s: unpcb %p has vnode references", __func__, unp));
2621 	KASSERT(unp->unp_conn == NULL,
2622 	    ("%s: unpcb %p is still connected", __func__, unp));
2623 	KASSERT(unp->unp_addr == NULL,
2624 	    ("%s: unpcb %p has leaked addr", __func__, unp));
2625 }
2626 #endif
2627 
2628 static void
2629 unp_init(void *arg __unused)
2630 {
2631 	uma_dtor dtor;
2632 
2633 #ifdef INVARIANTS
2634 	dtor = unp_zdtor;
2635 #else
2636 	dtor = NULL;
2637 #endif
2638 	unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, dtor,
2639 	    NULL, NULL, UMA_ALIGN_CACHE, 0);
2640 	uma_zone_set_max(unp_zone, maxsockets);
2641 	uma_zone_set_warning(unp_zone, "kern.ipc.maxsockets limit reached");
2642 	EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change,
2643 	    NULL, EVENTHANDLER_PRI_ANY);
2644 	LIST_INIT(&unp_dhead);
2645 	LIST_INIT(&unp_shead);
2646 	LIST_INIT(&unp_sphead);
2647 	SLIST_INIT(&unp_defers);
2648 	TIMEOUT_TASK_INIT(taskqueue_thread, &unp_gc_task, 0, unp_gc, NULL);
2649 	TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL);
2650 	UNP_LINK_LOCK_INIT();
2651 	UNP_DEFERRED_LOCK_INIT();
2652 }
2653 SYSINIT(unp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, unp_init, NULL);
2654 
2655 static void
2656 unp_internalize_cleanup_rights(struct mbuf *control)
2657 {
2658 	struct cmsghdr *cp;
2659 	struct mbuf *m;
2660 	void *data;
2661 	socklen_t datalen;
2662 
2663 	for (m = control; m != NULL; m = m->m_next) {
2664 		cp = mtod(m, struct cmsghdr *);
2665 		if (cp->cmsg_level != SOL_SOCKET ||
2666 		    cp->cmsg_type != SCM_RIGHTS)
2667 			continue;
2668 		data = CMSG_DATA(cp);
2669 		datalen = (caddr_t)cp + cp->cmsg_len - (caddr_t)data;
2670 		unp_freerights(data, datalen / sizeof(struct filedesc *));
2671 	}
2672 }
2673 
2674 static int
2675 unp_internalize(struct mbuf **controlp, struct thread *td,
2676     struct mbuf **clast, u_int *space, u_int *mbcnt)
2677 {
2678 	struct mbuf *control, **initial_controlp;
2679 	struct proc *p;
2680 	struct filedesc *fdesc;
2681 	struct bintime *bt;
2682 	struct cmsghdr *cm;
2683 	struct cmsgcred *cmcred;
2684 	struct filedescent *fde, **fdep, *fdev;
2685 	struct file *fp;
2686 	struct timeval *tv;
2687 	struct timespec *ts;
2688 	void *data;
2689 	socklen_t clen, datalen;
2690 	int i, j, error, *fdp, oldfds;
2691 	u_int newlen;
2692 
2693 	MPASS((*controlp)->m_next == NULL); /* COMPAT_OLDSOCK may violate */
2694 	UNP_LINK_UNLOCK_ASSERT();
2695 
2696 	p = td->td_proc;
2697 	fdesc = p->p_fd;
2698 	error = 0;
2699 	control = *controlp;
2700 	*controlp = NULL;
2701 	initial_controlp = controlp;
2702 	for (clen = control->m_len, cm = mtod(control, struct cmsghdr *),
2703 	    data = CMSG_DATA(cm);
2704 
2705 	    clen >= sizeof(*cm) && cm->cmsg_level == SOL_SOCKET &&
2706 	    clen >= cm->cmsg_len && cm->cmsg_len >= sizeof(*cm) &&
2707 	    (char *)cm + cm->cmsg_len >= (char *)data;
2708 
2709 	    clen -= min(CMSG_SPACE(datalen), clen),
2710 	    cm = (struct cmsghdr *) ((char *)cm + CMSG_SPACE(datalen)),
2711 	    data = CMSG_DATA(cm)) {
2712 		datalen = (char *)cm + cm->cmsg_len - (char *)data;
2713 		switch (cm->cmsg_type) {
2714 		case SCM_CREDS:
2715 			*controlp = sbcreatecontrol(NULL, sizeof(*cmcred),
2716 			    SCM_CREDS, SOL_SOCKET, M_WAITOK);
2717 			cmcred = (struct cmsgcred *)
2718 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2719 			cmcred->cmcred_pid = p->p_pid;
2720 			cmcred->cmcred_uid = td->td_ucred->cr_ruid;
2721 			cmcred->cmcred_gid = td->td_ucred->cr_rgid;
2722 			cmcred->cmcred_euid = td->td_ucred->cr_uid;
2723 			cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups,
2724 			    CMGROUP_MAX);
2725 			for (i = 0; i < cmcred->cmcred_ngroups; i++)
2726 				cmcred->cmcred_groups[i] =
2727 				    td->td_ucred->cr_groups[i];
2728 			break;
2729 
2730 		case SCM_RIGHTS:
2731 			oldfds = datalen / sizeof (int);
2732 			if (oldfds == 0)
2733 				continue;
2734 			/* On some machines sizeof pointer is bigger than
2735 			 * sizeof int, so we need to check if data fits into
2736 			 * single mbuf.  We could allocate several mbufs, and
2737 			 * unp_externalize() should even properly handle that.
2738 			 * But it is not worth to complicate the code for an
2739 			 * insane scenario of passing over 200 file descriptors
2740 			 * at once.
2741 			 */
2742 			newlen = oldfds * sizeof(fdep[0]);
2743 			if (CMSG_SPACE(newlen) > MCLBYTES) {
2744 				error = EMSGSIZE;
2745 				goto out;
2746 			}
2747 			/*
2748 			 * Check that all the FDs passed in refer to legal
2749 			 * files.  If not, reject the entire operation.
2750 			 */
2751 			fdp = data;
2752 			FILEDESC_SLOCK(fdesc);
2753 			for (i = 0; i < oldfds; i++, fdp++) {
2754 				fp = fget_noref(fdesc, *fdp);
2755 				if (fp == NULL) {
2756 					FILEDESC_SUNLOCK(fdesc);
2757 					error = EBADF;
2758 					goto out;
2759 				}
2760 				if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) {
2761 					FILEDESC_SUNLOCK(fdesc);
2762 					error = EOPNOTSUPP;
2763 					goto out;
2764 				}
2765 			}
2766 
2767 			/*
2768 			 * Now replace the integer FDs with pointers to the
2769 			 * file structure and capability rights.
2770 			 */
2771 			*controlp = sbcreatecontrol(NULL, newlen,
2772 			    SCM_RIGHTS, SOL_SOCKET, M_WAITOK);
2773 			fdp = data;
2774 			for (i = 0; i < oldfds; i++, fdp++) {
2775 				if (!fhold(fdesc->fd_ofiles[*fdp].fde_file)) {
2776 					fdp = data;
2777 					for (j = 0; j < i; j++, fdp++) {
2778 						fdrop(fdesc->fd_ofiles[*fdp].
2779 						    fde_file, td);
2780 					}
2781 					FILEDESC_SUNLOCK(fdesc);
2782 					error = EBADF;
2783 					goto out;
2784 				}
2785 			}
2786 			fdp = data;
2787 			fdep = (struct filedescent **)
2788 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2789 			fdev = malloc(sizeof(*fdev) * oldfds, M_FILECAPS,
2790 			    M_WAITOK);
2791 			for (i = 0; i < oldfds; i++, fdev++, fdp++) {
2792 				fde = &fdesc->fd_ofiles[*fdp];
2793 				fdep[i] = fdev;
2794 				fdep[i]->fde_file = fde->fde_file;
2795 				filecaps_copy(&fde->fde_caps,
2796 				    &fdep[i]->fde_caps, true);
2797 				unp_internalize_fp(fdep[i]->fde_file);
2798 			}
2799 			FILEDESC_SUNLOCK(fdesc);
2800 			break;
2801 
2802 		case SCM_TIMESTAMP:
2803 			*controlp = sbcreatecontrol(NULL, sizeof(*tv),
2804 			    SCM_TIMESTAMP, SOL_SOCKET, M_WAITOK);
2805 			tv = (struct timeval *)
2806 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2807 			microtime(tv);
2808 			break;
2809 
2810 		case SCM_BINTIME:
2811 			*controlp = sbcreatecontrol(NULL, sizeof(*bt),
2812 			    SCM_BINTIME, SOL_SOCKET, M_WAITOK);
2813 			bt = (struct bintime *)
2814 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2815 			bintime(bt);
2816 			break;
2817 
2818 		case SCM_REALTIME:
2819 			*controlp = sbcreatecontrol(NULL, sizeof(*ts),
2820 			    SCM_REALTIME, SOL_SOCKET, M_WAITOK);
2821 			ts = (struct timespec *)
2822 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2823 			nanotime(ts);
2824 			break;
2825 
2826 		case SCM_MONOTONIC:
2827 			*controlp = sbcreatecontrol(NULL, sizeof(*ts),
2828 			    SCM_MONOTONIC, SOL_SOCKET, M_WAITOK);
2829 			ts = (struct timespec *)
2830 			    CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2831 			nanouptime(ts);
2832 			break;
2833 
2834 		default:
2835 			error = EINVAL;
2836 			goto out;
2837 		}
2838 
2839 		if (space != NULL) {
2840 			*space += (*controlp)->m_len;
2841 			*mbcnt += MSIZE;
2842 			if ((*controlp)->m_flags & M_EXT)
2843 				*mbcnt += (*controlp)->m_ext.ext_size;
2844 			*clast = *controlp;
2845 		}
2846 		controlp = &(*controlp)->m_next;
2847 	}
2848 	if (clen > 0)
2849 		error = EINVAL;
2850 
2851 out:
2852 	if (error != 0 && initial_controlp != NULL)
2853 		unp_internalize_cleanup_rights(*initial_controlp);
2854 	m_freem(control);
2855 	return (error);
2856 }
2857 
2858 static struct mbuf *
2859 unp_addsockcred(struct thread *td, struct mbuf *control, int mode,
2860     struct mbuf **clast, u_int *space, u_int *mbcnt)
2861 {
2862 	struct mbuf *m, *n, *n_prev;
2863 	const struct cmsghdr *cm;
2864 	int ngroups, i, cmsgtype;
2865 	size_t ctrlsz;
2866 
2867 	ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX);
2868 	if (mode & UNP_WANTCRED_ALWAYS) {
2869 		ctrlsz = SOCKCRED2SIZE(ngroups);
2870 		cmsgtype = SCM_CREDS2;
2871 	} else {
2872 		ctrlsz = SOCKCREDSIZE(ngroups);
2873 		cmsgtype = SCM_CREDS;
2874 	}
2875 
2876 	m = sbcreatecontrol(NULL, ctrlsz, cmsgtype, SOL_SOCKET, M_NOWAIT);
2877 	if (m == NULL)
2878 		return (control);
2879 	MPASS((m->m_flags & M_EXT) == 0 && m->m_next == NULL);
2880 
2881 	if (mode & UNP_WANTCRED_ALWAYS) {
2882 		struct sockcred2 *sc;
2883 
2884 		sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *));
2885 		sc->sc_version = 0;
2886 		sc->sc_pid = td->td_proc->p_pid;
2887 		sc->sc_uid = td->td_ucred->cr_ruid;
2888 		sc->sc_euid = td->td_ucred->cr_uid;
2889 		sc->sc_gid = td->td_ucred->cr_rgid;
2890 		sc->sc_egid = td->td_ucred->cr_gid;
2891 		sc->sc_ngroups = ngroups;
2892 		for (i = 0; i < sc->sc_ngroups; i++)
2893 			sc->sc_groups[i] = td->td_ucred->cr_groups[i];
2894 	} else {
2895 		struct sockcred *sc;
2896 
2897 		sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *));
2898 		sc->sc_uid = td->td_ucred->cr_ruid;
2899 		sc->sc_euid = td->td_ucred->cr_uid;
2900 		sc->sc_gid = td->td_ucred->cr_rgid;
2901 		sc->sc_egid = td->td_ucred->cr_gid;
2902 		sc->sc_ngroups = ngroups;
2903 		for (i = 0; i < sc->sc_ngroups; i++)
2904 			sc->sc_groups[i] = td->td_ucred->cr_groups[i];
2905 	}
2906 
2907 	/*
2908 	 * Unlink SCM_CREDS control messages (struct cmsgcred), since just
2909 	 * created SCM_CREDS control message (struct sockcred) has another
2910 	 * format.
2911 	 */
2912 	if (control != NULL && cmsgtype == SCM_CREDS)
2913 		for (n = control, n_prev = NULL; n != NULL;) {
2914 			cm = mtod(n, struct cmsghdr *);
2915     			if (cm->cmsg_level == SOL_SOCKET &&
2916 			    cm->cmsg_type == SCM_CREDS) {
2917     				if (n_prev == NULL)
2918 					control = n->m_next;
2919 				else
2920 					n_prev->m_next = n->m_next;
2921 				if (space != NULL) {
2922 					MPASS(*space >= n->m_len);
2923 					*space -= n->m_len;
2924 					MPASS(*mbcnt >= MSIZE);
2925 					*mbcnt -= MSIZE;
2926 					if (n->m_flags & M_EXT) {
2927 						MPASS(*mbcnt >=
2928 						    n->m_ext.ext_size);
2929 						*mbcnt -= n->m_ext.ext_size;
2930 					}
2931 					MPASS(clast);
2932 					if (*clast == n) {
2933 						MPASS(n->m_next == NULL);
2934 						if (n_prev == NULL)
2935 							*clast = m;
2936 						else
2937 							*clast = n_prev;
2938 					}
2939 				}
2940 				n = m_free(n);
2941 			} else {
2942 				n_prev = n;
2943 				n = n->m_next;
2944 			}
2945 		}
2946 
2947 	/* Prepend it to the head. */
2948 	m->m_next = control;
2949 	if (space != NULL) {
2950 		*space += m->m_len;
2951 		*mbcnt += MSIZE;
2952 		if (control == NULL)
2953 			*clast = m;
2954 	}
2955 	return (m);
2956 }
2957 
2958 static struct unpcb *
2959 fptounp(struct file *fp)
2960 {
2961 	struct socket *so;
2962 
2963 	if (fp->f_type != DTYPE_SOCKET)
2964 		return (NULL);
2965 	if ((so = fp->f_data) == NULL)
2966 		return (NULL);
2967 	if (so->so_proto->pr_domain != &localdomain)
2968 		return (NULL);
2969 	return sotounpcb(so);
2970 }
2971 
2972 static void
2973 unp_discard(struct file *fp)
2974 {
2975 	struct unp_defer *dr;
2976 
2977 	if (unp_externalize_fp(fp)) {
2978 		dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK);
2979 		dr->ud_fp = fp;
2980 		UNP_DEFERRED_LOCK();
2981 		SLIST_INSERT_HEAD(&unp_defers, dr, ud_link);
2982 		UNP_DEFERRED_UNLOCK();
2983 		atomic_add_int(&unp_defers_count, 1);
2984 		taskqueue_enqueue(taskqueue_thread, &unp_defer_task);
2985 	} else
2986 		closef_nothread(fp);
2987 }
2988 
2989 static void
2990 unp_process_defers(void *arg __unused, int pending)
2991 {
2992 	struct unp_defer *dr;
2993 	SLIST_HEAD(, unp_defer) drl;
2994 	int count;
2995 
2996 	SLIST_INIT(&drl);
2997 	for (;;) {
2998 		UNP_DEFERRED_LOCK();
2999 		if (SLIST_FIRST(&unp_defers) == NULL) {
3000 			UNP_DEFERRED_UNLOCK();
3001 			break;
3002 		}
3003 		SLIST_SWAP(&unp_defers, &drl, unp_defer);
3004 		UNP_DEFERRED_UNLOCK();
3005 		count = 0;
3006 		while ((dr = SLIST_FIRST(&drl)) != NULL) {
3007 			SLIST_REMOVE_HEAD(&drl, ud_link);
3008 			closef_nothread(dr->ud_fp);
3009 			free(dr, M_TEMP);
3010 			count++;
3011 		}
3012 		atomic_add_int(&unp_defers_count, -count);
3013 	}
3014 }
3015 
3016 static void
3017 unp_internalize_fp(struct file *fp)
3018 {
3019 	struct unpcb *unp;
3020 
3021 	UNP_LINK_WLOCK();
3022 	if ((unp = fptounp(fp)) != NULL) {
3023 		unp->unp_file = fp;
3024 		unp->unp_msgcount++;
3025 	}
3026 	unp_rights++;
3027 	UNP_LINK_WUNLOCK();
3028 }
3029 
3030 static int
3031 unp_externalize_fp(struct file *fp)
3032 {
3033 	struct unpcb *unp;
3034 	int ret;
3035 
3036 	UNP_LINK_WLOCK();
3037 	if ((unp = fptounp(fp)) != NULL) {
3038 		unp->unp_msgcount--;
3039 		ret = 1;
3040 	} else
3041 		ret = 0;
3042 	unp_rights--;
3043 	UNP_LINK_WUNLOCK();
3044 	return (ret);
3045 }
3046 
3047 /*
3048  * unp_defer indicates whether additional work has been defered for a future
3049  * pass through unp_gc().  It is thread local and does not require explicit
3050  * synchronization.
3051  */
3052 static int	unp_marked;
3053 
3054 static void
3055 unp_remove_dead_ref(struct filedescent **fdep, int fdcount)
3056 {
3057 	struct unpcb *unp;
3058 	struct file *fp;
3059 	int i;
3060 
3061 	/*
3062 	 * This function can only be called from the gc task.
3063 	 */
3064 	KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0,
3065 	    ("%s: not on gc callout", __func__));
3066 	UNP_LINK_LOCK_ASSERT();
3067 
3068 	for (i = 0; i < fdcount; i++) {
3069 		fp = fdep[i]->fde_file;
3070 		if ((unp = fptounp(fp)) == NULL)
3071 			continue;
3072 		if ((unp->unp_gcflag & UNPGC_DEAD) == 0)
3073 			continue;
3074 		unp->unp_gcrefs--;
3075 	}
3076 }
3077 
3078 static void
3079 unp_restore_undead_ref(struct filedescent **fdep, int fdcount)
3080 {
3081 	struct unpcb *unp;
3082 	struct file *fp;
3083 	int i;
3084 
3085 	/*
3086 	 * This function can only be called from the gc task.
3087 	 */
3088 	KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0,
3089 	    ("%s: not on gc callout", __func__));
3090 	UNP_LINK_LOCK_ASSERT();
3091 
3092 	for (i = 0; i < fdcount; i++) {
3093 		fp = fdep[i]->fde_file;
3094 		if ((unp = fptounp(fp)) == NULL)
3095 			continue;
3096 		if ((unp->unp_gcflag & UNPGC_DEAD) == 0)
3097 			continue;
3098 		unp->unp_gcrefs++;
3099 		unp_marked++;
3100 	}
3101 }
3102 
3103 static void
3104 unp_scan_socket(struct socket *so, void (*op)(struct filedescent **, int))
3105 {
3106 	struct sockbuf *sb;
3107 
3108 	SOCK_LOCK_ASSERT(so);
3109 
3110 	if (sotounpcb(so)->unp_gcflag & UNPGC_IGNORE_RIGHTS)
3111 		return;
3112 
3113 	SOCK_RECVBUF_LOCK(so);
3114 	switch (so->so_type) {
3115 	case SOCK_DGRAM:
3116 		unp_scan(STAILQ_FIRST(&so->so_rcv.uxdg_mb), op);
3117 		unp_scan(so->so_rcv.uxdg_peeked, op);
3118 		TAILQ_FOREACH(sb, &so->so_rcv.uxdg_conns, uxdg_clist)
3119 			unp_scan(STAILQ_FIRST(&sb->uxdg_mb), op);
3120 		break;
3121 	case SOCK_STREAM:
3122 	case SOCK_SEQPACKET:
3123 		unp_scan(so->so_rcv.sb_mb, op);
3124 		break;
3125 	}
3126 	SOCK_RECVBUF_UNLOCK(so);
3127 }
3128 
3129 static void
3130 unp_gc_scan(struct unpcb *unp, void (*op)(struct filedescent **, int))
3131 {
3132 	struct socket *so, *soa;
3133 
3134 	so = unp->unp_socket;
3135 	SOCK_LOCK(so);
3136 	if (SOLISTENING(so)) {
3137 		/*
3138 		 * Mark all sockets in our accept queue.
3139 		 */
3140 		TAILQ_FOREACH(soa, &so->sol_comp, so_list)
3141 			unp_scan_socket(soa, op);
3142 	} else {
3143 		/*
3144 		 * Mark all sockets we reference with RIGHTS.
3145 		 */
3146 		unp_scan_socket(so, op);
3147 	}
3148 	SOCK_UNLOCK(so);
3149 }
3150 
3151 static int unp_recycled;
3152 SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0,
3153     "Number of unreachable sockets claimed by the garbage collector.");
3154 
3155 static int unp_taskcount;
3156 SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0,
3157     "Number of times the garbage collector has run.");
3158 
3159 SYSCTL_UINT(_net_local, OID_AUTO, sockcount, CTLFLAG_RD, &unp_count, 0,
3160     "Number of active local sockets.");
3161 
3162 static void
3163 unp_gc(__unused void *arg, int pending)
3164 {
3165 	struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead,
3166 				    NULL };
3167 	struct unp_head **head;
3168 	struct unp_head unp_deadhead;	/* List of potentially-dead sockets. */
3169 	struct file *f, **unref;
3170 	struct unpcb *unp, *unptmp;
3171 	int i, total, unp_unreachable;
3172 
3173 	LIST_INIT(&unp_deadhead);
3174 	unp_taskcount++;
3175 	UNP_LINK_RLOCK();
3176 	/*
3177 	 * First determine which sockets may be in cycles.
3178 	 */
3179 	unp_unreachable = 0;
3180 
3181 	for (head = heads; *head != NULL; head++)
3182 		LIST_FOREACH(unp, *head, unp_link) {
3183 			KASSERT((unp->unp_gcflag & ~UNPGC_IGNORE_RIGHTS) == 0,
3184 			    ("%s: unp %p has unexpected gc flags 0x%x",
3185 			    __func__, unp, (unsigned int)unp->unp_gcflag));
3186 
3187 			f = unp->unp_file;
3188 
3189 			/*
3190 			 * Check for an unreachable socket potentially in a
3191 			 * cycle.  It must be in a queue as indicated by
3192 			 * msgcount, and this must equal the file reference
3193 			 * count.  Note that when msgcount is 0 the file is
3194 			 * NULL.
3195 			 */
3196 			if (f != NULL && unp->unp_msgcount != 0 &&
3197 			    refcount_load(&f->f_count) == unp->unp_msgcount) {
3198 				LIST_INSERT_HEAD(&unp_deadhead, unp, unp_dead);
3199 				unp->unp_gcflag |= UNPGC_DEAD;
3200 				unp->unp_gcrefs = unp->unp_msgcount;
3201 				unp_unreachable++;
3202 			}
3203 		}
3204 
3205 	/*
3206 	 * Scan all sockets previously marked as potentially being in a cycle
3207 	 * and remove the references each socket holds on any UNPGC_DEAD
3208 	 * sockets in its queue.  After this step, all remaining references on
3209 	 * sockets marked UNPGC_DEAD should not be part of any cycle.
3210 	 */
3211 	LIST_FOREACH(unp, &unp_deadhead, unp_dead)
3212 		unp_gc_scan(unp, unp_remove_dead_ref);
3213 
3214 	/*
3215 	 * If a socket still has a non-negative refcount, it cannot be in a
3216 	 * cycle.  In this case increment refcount of all children iteratively.
3217 	 * Stop the scan once we do a complete loop without discovering
3218 	 * a new reachable socket.
3219 	 */
3220 	do {
3221 		unp_marked = 0;
3222 		LIST_FOREACH_SAFE(unp, &unp_deadhead, unp_dead, unptmp)
3223 			if (unp->unp_gcrefs > 0) {
3224 				unp->unp_gcflag &= ~UNPGC_DEAD;
3225 				LIST_REMOVE(unp, unp_dead);
3226 				KASSERT(unp_unreachable > 0,
3227 				    ("%s: unp_unreachable underflow.",
3228 				    __func__));
3229 				unp_unreachable--;
3230 				unp_gc_scan(unp, unp_restore_undead_ref);
3231 			}
3232 	} while (unp_marked);
3233 
3234 	UNP_LINK_RUNLOCK();
3235 
3236 	if (unp_unreachable == 0)
3237 		return;
3238 
3239 	/*
3240 	 * Allocate space for a local array of dead unpcbs.
3241 	 * TODO: can this path be simplified by instead using the local
3242 	 * dead list at unp_deadhead, after taking out references
3243 	 * on the file object and/or unpcb and dropping the link lock?
3244 	 */
3245 	unref = malloc(unp_unreachable * sizeof(struct file *),
3246 	    M_TEMP, M_WAITOK);
3247 
3248 	/*
3249 	 * Iterate looking for sockets which have been specifically marked
3250 	 * as unreachable and store them locally.
3251 	 */
3252 	UNP_LINK_RLOCK();
3253 	total = 0;
3254 	LIST_FOREACH(unp, &unp_deadhead, unp_dead) {
3255 		KASSERT((unp->unp_gcflag & UNPGC_DEAD) != 0,
3256 		    ("%s: unp %p not marked UNPGC_DEAD", __func__, unp));
3257 		unp->unp_gcflag &= ~UNPGC_DEAD;
3258 		f = unp->unp_file;
3259 		if (unp->unp_msgcount == 0 || f == NULL ||
3260 		    refcount_load(&f->f_count) != unp->unp_msgcount ||
3261 		    !fhold(f))
3262 			continue;
3263 		unref[total++] = f;
3264 		KASSERT(total <= unp_unreachable,
3265 		    ("%s: incorrect unreachable count.", __func__));
3266 	}
3267 	UNP_LINK_RUNLOCK();
3268 
3269 	/*
3270 	 * Now flush all sockets, free'ing rights.  This will free the
3271 	 * struct files associated with these sockets but leave each socket
3272 	 * with one remaining ref.
3273 	 */
3274 	for (i = 0; i < total; i++) {
3275 		struct socket *so;
3276 
3277 		so = unref[i]->f_data;
3278 		CURVNET_SET(so->so_vnet);
3279 		sorflush(so);
3280 		CURVNET_RESTORE();
3281 	}
3282 
3283 	/*
3284 	 * And finally release the sockets so they can be reclaimed.
3285 	 */
3286 	for (i = 0; i < total; i++)
3287 		fdrop(unref[i], NULL);
3288 	unp_recycled += total;
3289 	free(unref, M_TEMP);
3290 }
3291 
3292 /*
3293  * Synchronize against unp_gc, which can trip over data as we are freeing it.
3294  */
3295 static void
3296 unp_dispose(struct socket *so)
3297 {
3298 	struct sockbuf *sb;
3299 	struct unpcb *unp;
3300 	struct mbuf *m;
3301 
3302 	MPASS(!SOLISTENING(so));
3303 
3304 	unp = sotounpcb(so);
3305 	UNP_LINK_WLOCK();
3306 	unp->unp_gcflag |= UNPGC_IGNORE_RIGHTS;
3307 	UNP_LINK_WUNLOCK();
3308 
3309 	/*
3310 	 * Grab our special mbufs before calling sbrelease().
3311 	 */
3312 	SOCK_RECVBUF_LOCK(so);
3313 	switch (so->so_type) {
3314 	case SOCK_DGRAM:
3315 		while ((sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) != NULL) {
3316 			STAILQ_CONCAT(&so->so_rcv.uxdg_mb, &sb->uxdg_mb);
3317 			TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist);
3318 			/* Note: socket of sb may reconnect. */
3319 			sb->uxdg_cc = sb->uxdg_ctl = sb->uxdg_mbcnt = 0;
3320 		}
3321 		sb = &so->so_rcv;
3322 		if (sb->uxdg_peeked != NULL) {
3323 			STAILQ_INSERT_HEAD(&sb->uxdg_mb, sb->uxdg_peeked,
3324 			    m_stailqpkt);
3325 			sb->uxdg_peeked = NULL;
3326 		}
3327 		m = STAILQ_FIRST(&sb->uxdg_mb);
3328 		STAILQ_INIT(&sb->uxdg_mb);
3329 		/* XXX: our shortened sbrelease() */
3330 		(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
3331 		    RLIM_INFINITY);
3332 		/*
3333 		 * XXXGL Mark sb with SBS_CANTRCVMORE.  This is needed to
3334 		 * prevent uipc_sosend_dgram() or unp_disconnect() adding more
3335 		 * data to the socket.
3336 		 * We are now in dom_dispose and it could be a call from
3337 		 * soshutdown() or from the final sofree().  The sofree() case
3338 		 * is simple as it guarantees that no more sends will happen,
3339 		 * however we can race with unp_disconnect() from our peer.
3340 		 * The shutdown(2) case is more exotic.  It would call into
3341 		 * dom_dispose() only if socket is SS_ISCONNECTED.  This is
3342 		 * possible if we did connect(2) on this socket and we also
3343 		 * had it bound with bind(2) and receive connections from other
3344 		 * sockets.  Because soshutdown() violates POSIX (see comment
3345 		 * there) we will end up here shutting down our receive side.
3346 		 * Of course this will have affect not only on the peer we
3347 		 * connect(2)ed to, but also on all of the peers who had
3348 		 * connect(2)ed to us.  Their sends would end up with ENOBUFS.
3349 		 */
3350 		sb->sb_state |= SBS_CANTRCVMORE;
3351 		break;
3352 	case SOCK_STREAM:
3353 	case SOCK_SEQPACKET:
3354 		sb = &so->so_rcv;
3355 		m = sbcut_locked(sb, sb->sb_ccc);
3356 		KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0,
3357 		    ("%s: ccc %u mb %p mbcnt %u", __func__,
3358 		    sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt));
3359 		sbrelease_locked(so, SO_RCV);
3360 		break;
3361 	}
3362 	SOCK_RECVBUF_UNLOCK(so);
3363 	if (SOCK_IO_RECV_OWNED(so))
3364 		SOCK_IO_RECV_UNLOCK(so);
3365 
3366 	if (m != NULL) {
3367 		unp_scan(m, unp_freerights);
3368 		m_freem(m);
3369 	}
3370 }
3371 
3372 static void
3373 unp_scan(struct mbuf *m0, void (*op)(struct filedescent **, int))
3374 {
3375 	struct mbuf *m;
3376 	struct cmsghdr *cm;
3377 	void *data;
3378 	socklen_t clen, datalen;
3379 
3380 	while (m0 != NULL) {
3381 		for (m = m0; m; m = m->m_next) {
3382 			if (m->m_type != MT_CONTROL)
3383 				continue;
3384 
3385 			cm = mtod(m, struct cmsghdr *);
3386 			clen = m->m_len;
3387 
3388 			while (cm != NULL) {
3389 				if (sizeof(*cm) > clen || cm->cmsg_len > clen)
3390 					break;
3391 
3392 				data = CMSG_DATA(cm);
3393 				datalen = (caddr_t)cm + cm->cmsg_len
3394 				    - (caddr_t)data;
3395 
3396 				if (cm->cmsg_level == SOL_SOCKET &&
3397 				    cm->cmsg_type == SCM_RIGHTS) {
3398 					(*op)(data, datalen /
3399 					    sizeof(struct filedescent *));
3400 				}
3401 
3402 				if (CMSG_SPACE(datalen) < clen) {
3403 					clen -= CMSG_SPACE(datalen);
3404 					cm = (struct cmsghdr *)
3405 					    ((caddr_t)cm + CMSG_SPACE(datalen));
3406 				} else {
3407 					clen = 0;
3408 					cm = NULL;
3409 				}
3410 			}
3411 		}
3412 		m0 = m0->m_nextpkt;
3413 	}
3414 }
3415 
3416 /*
3417  * A helper function called by VFS before socket-type vnode reclamation.
3418  * For an active vnode it clears unp_vnode pointer and decrements unp_vnode
3419  * use count.
3420  */
3421 void
3422 vfs_unp_reclaim(struct vnode *vp)
3423 {
3424 	struct unpcb *unp;
3425 	int active;
3426 	struct mtx *vplock;
3427 
3428 	ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim");
3429 	KASSERT(vp->v_type == VSOCK,
3430 	    ("vfs_unp_reclaim: vp->v_type != VSOCK"));
3431 
3432 	active = 0;
3433 	vplock = mtx_pool_find(mtxpool_sleep, vp);
3434 	mtx_lock(vplock);
3435 	VOP_UNP_CONNECT(vp, &unp);
3436 	if (unp == NULL)
3437 		goto done;
3438 	UNP_PCB_LOCK(unp);
3439 	if (unp->unp_vnode == vp) {
3440 		VOP_UNP_DETACH(vp);
3441 		unp->unp_vnode = NULL;
3442 		active = 1;
3443 	}
3444 	UNP_PCB_UNLOCK(unp);
3445  done:
3446 	mtx_unlock(vplock);
3447 	if (active)
3448 		vunref(vp);
3449 }
3450 
3451 #ifdef DDB
3452 static void
3453 db_print_indent(int indent)
3454 {
3455 	int i;
3456 
3457 	for (i = 0; i < indent; i++)
3458 		db_printf(" ");
3459 }
3460 
3461 static void
3462 db_print_unpflags(int unp_flags)
3463 {
3464 	int comma;
3465 
3466 	comma = 0;
3467 	if (unp_flags & UNP_HAVEPC) {
3468 		db_printf("%sUNP_HAVEPC", comma ? ", " : "");
3469 		comma = 1;
3470 	}
3471 	if (unp_flags & UNP_WANTCRED_ALWAYS) {
3472 		db_printf("%sUNP_WANTCRED_ALWAYS", comma ? ", " : "");
3473 		comma = 1;
3474 	}
3475 	if (unp_flags & UNP_WANTCRED_ONESHOT) {
3476 		db_printf("%sUNP_WANTCRED_ONESHOT", comma ? ", " : "");
3477 		comma = 1;
3478 	}
3479 	if (unp_flags & UNP_CONNWAIT) {
3480 		db_printf("%sUNP_CONNWAIT", comma ? ", " : "");
3481 		comma = 1;
3482 	}
3483 	if (unp_flags & UNP_CONNECTING) {
3484 		db_printf("%sUNP_CONNECTING", comma ? ", " : "");
3485 		comma = 1;
3486 	}
3487 	if (unp_flags & UNP_BINDING) {
3488 		db_printf("%sUNP_BINDING", comma ? ", " : "");
3489 		comma = 1;
3490 	}
3491 }
3492 
3493 static void
3494 db_print_xucred(int indent, struct xucred *xu)
3495 {
3496 	int comma, i;
3497 
3498 	db_print_indent(indent);
3499 	db_printf("cr_version: %u   cr_uid: %u   cr_pid: %d   cr_ngroups: %d\n",
3500 	    xu->cr_version, xu->cr_uid, xu->cr_pid, xu->cr_ngroups);
3501 	db_print_indent(indent);
3502 	db_printf("cr_groups: ");
3503 	comma = 0;
3504 	for (i = 0; i < xu->cr_ngroups; i++) {
3505 		db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]);
3506 		comma = 1;
3507 	}
3508 	db_printf("\n");
3509 }
3510 
3511 static void
3512 db_print_unprefs(int indent, struct unp_head *uh)
3513 {
3514 	struct unpcb *unp;
3515 	int counter;
3516 
3517 	counter = 0;
3518 	LIST_FOREACH(unp, uh, unp_reflink) {
3519 		if (counter % 4 == 0)
3520 			db_print_indent(indent);
3521 		db_printf("%p  ", unp);
3522 		if (counter % 4 == 3)
3523 			db_printf("\n");
3524 		counter++;
3525 	}
3526 	if (counter != 0 && counter % 4 != 0)
3527 		db_printf("\n");
3528 }
3529 
3530 DB_SHOW_COMMAND(unpcb, db_show_unpcb)
3531 {
3532 	struct unpcb *unp;
3533 
3534         if (!have_addr) {
3535                 db_printf("usage: show unpcb <addr>\n");
3536                 return;
3537         }
3538         unp = (struct unpcb *)addr;
3539 
3540 	db_printf("unp_socket: %p   unp_vnode: %p\n", unp->unp_socket,
3541 	    unp->unp_vnode);
3542 
3543 	db_printf("unp_ino: %ju   unp_conn: %p\n", (uintmax_t)unp->unp_ino,
3544 	    unp->unp_conn);
3545 
3546 	db_printf("unp_refs:\n");
3547 	db_print_unprefs(2, &unp->unp_refs);
3548 
3549 	/* XXXRW: Would be nice to print the full address, if any. */
3550 	db_printf("unp_addr: %p\n", unp->unp_addr);
3551 
3552 	db_printf("unp_gencnt: %llu\n",
3553 	    (unsigned long long)unp->unp_gencnt);
3554 
3555 	db_printf("unp_flags: %x (", unp->unp_flags);
3556 	db_print_unpflags(unp->unp_flags);
3557 	db_printf(")\n");
3558 
3559 	db_printf("unp_peercred:\n");
3560 	db_print_xucred(2, &unp->unp_peercred);
3561 
3562 	db_printf("unp_refcount: %u\n", unp->unp_refcount);
3563 }
3564 #endif
3565