xref: /titanic_52/usr/src/uts/common/fs/sockfs/socksubr.c (revision bb25c06cca41ca78e5fb87fbb8e81d55beb18c95)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/t_lock.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/buf.h>
34 #include <sys/conf.h>
35 #include <sys/cred.h>
36 #include <sys/kmem.h>
37 #include <sys/sysmacros.h>
38 #include <sys/vfs.h>
39 #include <sys/vnode.h>
40 #include <sys/debug.h>
41 #include <sys/errno.h>
42 #include <sys/time.h>
43 #include <sys/file.h>
44 #include <sys/open.h>
45 #include <sys/user.h>
46 #include <sys/termios.h>
47 #include <sys/stream.h>
48 #include <sys/strsubr.h>
49 #include <sys/strsun.h>
50 #include <sys/esunddi.h>
51 #include <sys/flock.h>
52 #include <sys/modctl.h>
53 #include <sys/cmn_err.h>
54 #include <sys/mkdev.h>
55 #include <sys/pathname.h>
56 #include <sys/ddi.h>
57 #include <sys/stat.h>
58 #include <sys/fs/snode.h>
59 #include <sys/fs/dv_node.h>
60 #include <sys/zone.h>
61 
62 #include <sys/socket.h>
63 #include <sys/socketvar.h>
64 #include <netinet/in.h>
65 #include <sys/un.h>
66 
67 #include <sys/ucred.h>
68 
69 #include <sys/tiuser.h>
70 #define	_SUN_TPI_VERSION	2
71 #include <sys/tihdr.h>
72 
73 #include <c2/audit.h>
74 
75 #include <fs/sockfs/nl7c.h>
76 
77 /*
78  * Macros that operate on struct cmsghdr.
79  * The CMSG_VALID macro does not assume that the last option buffer is padded.
80  */
81 #define	CMSG_CONTENT(cmsg)	(&((cmsg)[1]))
82 #define	CMSG_CONTENTLEN(cmsg)	((cmsg)->cmsg_len - sizeof (struct cmsghdr))
83 #define	CMSG_VALID(cmsg, start, end)					\
84 	(ISALIGNED_cmsghdr(cmsg) &&					\
85 	((uintptr_t)(cmsg) >= (uintptr_t)(start)) &&			\
86 	((uintptr_t)(cmsg) < (uintptr_t)(end)) &&			\
87 	((ssize_t)(cmsg)->cmsg_len >= sizeof (struct cmsghdr)) &&	\
88 	((uintptr_t)(cmsg) + (cmsg)->cmsg_len <= (uintptr_t)(end)))
89 #define	SO_LOCK_WAKEUP_TIME	3000	/* Wakeup time in milliseconds */
90 
91 static struct kmem_cache *socktpi_cache, *socktpi_unix_cache;
92 
93 dev_t sockdev;	/* For fsid in getattr */
94 
95 struct sockparams *sphead;
96 krwlock_t splist_lock;
97 
98 struct socklist socklist;
99 
100 static int sockfs_update(kstat_t *, int);
101 static int sockfs_snapshot(kstat_t *, void *, int);
102 
103 extern void sendfile_init();
104 
105 extern void nl7c_init(void);
106 
107 #define	ADRSTRLEN (2 * sizeof (void *) + 1)
108 /*
109  * kernel structure for passing the sockinfo data back up to the user.
110  * the strings array allows us to convert AF_UNIX addresses into strings
111  * with a common method regardless of which n-bit kernel we're running.
112  */
113 struct k_sockinfo {
114 	struct sockinfo	ks_si;
115 	char		ks_straddr[3][ADRSTRLEN];
116 };
117 
118 /*
119  * Translate from a device pathname (e.g. "/dev/tcp") to a vnode.
120  * Returns with the vnode held.
121  */
122 static int
123 sogetvp(char *devpath, vnode_t **vpp, int uioflag)
124 {
125 	struct snode *csp;
126 	vnode_t *vp, *dvp;
127 	major_t maj;
128 	int error;
129 
130 	ASSERT(uioflag == UIO_SYSSPACE || uioflag == UIO_USERSPACE);
131 	/*
132 	 * Lookup the underlying filesystem vnode.
133 	 */
134 	error = lookupname(devpath, uioflag, FOLLOW, NULLVPP, &vp);
135 	if (error)
136 		return (error);
137 
138 	/* Check that it is the correct vnode */
139 	if (vp->v_type != VCHR) {
140 		VN_RELE(vp);
141 		return (ENOTSOCK);
142 	}
143 
144 	/*
145 	 * If devpath went through devfs, the device should already
146 	 * be configured. If devpath is a mknod file, however, we
147 	 * need to make sure the device is properly configured.
148 	 * To do this, we do something similar to spec_open()
149 	 * except that we resolve to the minor/leaf level since
150 	 * we need to return a vnode.
151 	 */
152 	csp = VTOS(VTOS(vp)->s_commonvp);
153 	if (!(csp->s_flag & SDIPSET)) {
154 		char *pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
155 		error = ddi_dev_pathname(vp->v_rdev, S_IFCHR, pathname);
156 		if (error == 0)
157 			error = devfs_lookupname(pathname, NULLVPP, &dvp);
158 		VN_RELE(vp);
159 		kmem_free(pathname, MAXPATHLEN);
160 		if (error != 0)
161 			return (ENXIO);
162 		vp = dvp;	/* use the devfs vp */
163 	}
164 
165 	/* device is configured at this point */
166 	maj = getmajor(vp->v_rdev);
167 	if (!STREAMSTAB(maj)) {
168 		VN_RELE(vp);
169 		return (ENOSTR);
170 	}
171 
172 	*vpp = vp;
173 	return (0);
174 }
175 
176 /*
177  * Add or delete (latter if devpath is NULL) an enter to the sockparams
178  * table. If devpathlen is zero the devpath with not be kmem_freed. Otherwise
179  * this routine assumes that the caller has kmem_alloced devpath/devpathlen
180  * for this routine to consume.
181  * The zero devpathlen could be used if the kernel wants to create entries
182  * itself by calling sockconfig(1,2,3, "/dev/tcp", 0);
183  */
184 int
185 soconfig(int domain, int type, int protocol,
186     char *devpath, int devpathlen)
187 {
188 	struct sockparams **spp;
189 	struct sockparams *sp;
190 	int error = 0;
191 
192 	dprint(0, ("soconfig(%d,%d,%d,%s,%d)\n",
193 		domain, type, protocol, devpath, devpathlen));
194 
195 	/*
196 	 * Look for an existing match.
197 	 */
198 	rw_enter(&splist_lock, RW_WRITER);
199 	for (spp = &sphead; (sp = *spp) != NULL; spp = &sp->sp_next) {
200 		if (sp->sp_domain == domain &&
201 		    sp->sp_type == type &&
202 		    sp->sp_protocol == protocol) {
203 			break;
204 		}
205 	}
206 	if (devpath == NULL) {
207 		ASSERT(devpathlen == 0);
208 
209 		/* Delete existing entry */
210 		if (sp == NULL) {
211 			error = ENXIO;
212 			goto done;
213 		}
214 		/* Unlink and free existing entry */
215 		*spp = sp->sp_next;
216 		ASSERT(sp->sp_vnode);
217 		VN_RELE(sp->sp_vnode);
218 		if (sp->sp_devpathlen != 0)
219 			kmem_free(sp->sp_devpath, sp->sp_devpathlen);
220 		kmem_free(sp, sizeof (*sp));
221 	} else {
222 		vnode_t *vp;
223 
224 		/* Add new entry */
225 		if (sp != NULL) {
226 			error = EEXIST;
227 			goto done;
228 		}
229 
230 		error = sogetvp(devpath, &vp, UIO_SYSSPACE);
231 		if (error) {
232 			dprint(0, ("soconfig: vp %s failed with %d\n",
233 				devpath, error));
234 			goto done;
235 		}
236 
237 		dprint(0, ("soconfig: %s => vp %p, dev 0x%lx\n",
238 		    devpath, vp, vp->v_rdev));
239 
240 		sp = kmem_alloc(sizeof (*sp), KM_SLEEP);
241 		sp->sp_domain = domain;
242 		sp->sp_type = type;
243 		sp->sp_protocol = protocol;
244 		sp->sp_devpath = devpath;
245 		sp->sp_devpathlen = devpathlen;
246 		sp->sp_vnode = vp;
247 		sp->sp_next = NULL;
248 		*spp = sp;
249 	}
250 done:
251 	rw_exit(&splist_lock);
252 	if (error) {
253 		if (devpath != NULL)
254 			kmem_free(devpath, devpathlen);
255 #ifdef SOCK_DEBUG
256 		eprintline(error);
257 #endif /* SOCK_DEBUG */
258 	}
259 	return (error);
260 }
261 
262 /*
263  * Lookup an entry in the sockparams list based on the triple.
264  * If no entry is found and devpath is not NULL translate devpath to a
265  * vnode. Note that devpath is a pointer to a user address!
266  * Returns with the vnode held.
267  *
268  * When this routine uses devpath it does not create an entry in the sockparams
269  * list since this routine can run on behalf of any user and one user
270  * should not be able to effect the transport used by another user.
271  *
272  * In order to return the correct error this routine has to do wildcard scans
273  * of the list. The errors are (in decreasing precedence):
274  *	EAFNOSUPPORT - address family not in list
275  *	EPROTONOSUPPORT - address family supported but not protocol.
276  *	EPROTOTYPE - address family and protocol supported but not socket type.
277  */
278 vnode_t *
279 solookup(int domain, int type, int protocol, char *devpath, int *errorp)
280 {
281 	struct sockparams *sp;
282 	int error;
283 	vnode_t *vp;
284 
285 	rw_enter(&splist_lock, RW_READER);
286 	for (sp = sphead; sp != NULL; sp = sp->sp_next) {
287 		if (sp->sp_domain == domain &&
288 		    sp->sp_type == type &&
289 		    sp->sp_protocol == protocol) {
290 			break;
291 		}
292 	}
293 	if (sp == NULL) {
294 		dprint(0, ("solookup(%d,%d,%d) not found\n",
295 			domain, type, protocol));
296 		if (devpath == NULL) {
297 			/* Determine correct error code */
298 			int found = 0;
299 
300 			for (sp = sphead; sp != NULL; sp = sp->sp_next) {
301 				if (sp->sp_domain == domain && found < 1)
302 					found = 1;
303 				if (sp->sp_domain == domain &&
304 				    sp->sp_protocol == protocol && found < 2)
305 					found = 2;
306 			}
307 			rw_exit(&splist_lock);
308 			switch (found) {
309 			case 0:
310 				*errorp = EAFNOSUPPORT;
311 				break;
312 			case 1:
313 				*errorp = EPROTONOSUPPORT;
314 				break;
315 			case 2:
316 				*errorp = EPROTOTYPE;
317 				break;
318 			}
319 			return (NULL);
320 		}
321 		rw_exit(&splist_lock);
322 
323 		/*
324 		 * Return vp based on devpath.
325 		 * Do not enter into table to avoid random users
326 		 * modifying the sockparams list.
327 		 */
328 		error = sogetvp(devpath, &vp, UIO_USERSPACE);
329 		if (error) {
330 			dprint(0, ("solookup: vp %p failed with %d\n",
331 				devpath, error));
332 			*errorp = EPROTONOSUPPORT;
333 			return (NULL);
334 		}
335 		dprint(0, ("solookup: %p => vp %p, dev 0x%lx\n",
336 		    devpath, vp, vp->v_rdev));
337 
338 		return (vp);
339 	}
340 	dprint(0, ("solookup(%d,%d,%d) vp %p devpath %s\n",
341 		domain, type, protocol, sp->sp_vnode, sp->sp_devpath));
342 
343 	vp = sp->sp_vnode;
344 	VN_HOLD(vp);
345 	rw_exit(&splist_lock);
346 	return (vp);
347 }
348 
349 /*
350  * Return a socket vnode.
351  *
352  * Assumes that the caller is "passing" an VN_HOLD for accessvp i.e.
353  * when the socket is freed a VN_RELE will take place.
354  *
355  * Note that sockets assume that the driver will clone (either itself
356  * or by using the clone driver) i.e. a socket() call will always
357  * result in a new vnode being created.
358  */
359 struct vnode *
360 makesockvp(struct vnode *accessvp, int domain, int type, int protocol)
361 {
362 	kmem_cache_t *cp;
363 	struct sonode *so;
364 	struct vnode *vp;
365 	time_t now;
366 	dev_t dev;
367 
368 	cp = (domain == AF_UNIX) ? socktpi_unix_cache : socktpi_cache;
369 	so = kmem_cache_alloc(cp, KM_SLEEP);
370 	so->so_cache = cp;
371 	so->so_obj = so;
372 	vp = SOTOV(so);
373 	now = gethrestime_sec();
374 
375 	so->so_flag	= 0;
376 	ASSERT(so->so_accessvp == NULL);
377 	so->so_accessvp	= accessvp;
378 	dev = accessvp->v_rdev;
379 
380 	/*
381 	 * Record in so_flag that it is a clone.
382 	 */
383 	if (getmajor(dev) == clone_major) {
384 		so->so_flag |= SOCLONE;
385 	}
386 	so->so_dev = dev;
387 
388 	so->so_state	= 0;
389 	so->so_mode	= 0;
390 
391 	so->so_fsid	= sockdev;
392 	so->so_atime	= now;
393 	so->so_mtime	= now;
394 	so->so_ctime	= now;		/* Never modified */
395 	so->so_count	= 0;
396 
397 	so->so_family	= (short)domain;
398 	so->so_type	= (short)type;
399 	so->so_protocol	= (short)protocol;
400 	so->so_pushcnt	= 0;
401 
402 	so->so_options	= 0;
403 	so->so_linger.l_onoff	= 0;
404 	so->so_linger.l_linger = 0;
405 	so->so_sndbuf	= 0;
406 	so->so_rcvbuf	= 0;
407 	so->so_sndlowat	= 0;
408 	so->so_rcvlowat	= 0;
409 #ifdef notyet
410 	so->so_sndtimeo	= 0;
411 	so->so_rcvtimeo	= 0;
412 #endif /* notyet */
413 	so->so_error	= 0;
414 	so->so_delayed_error = 0;
415 
416 	ASSERT(so->so_oobmsg == NULL);
417 	so->so_oobcnt	= 0;
418 	so->so_oobsigcnt = 0;
419 	so->so_pgrp	= 0;
420 	so->so_provinfo = NULL;
421 
422 	ASSERT(so->so_laddr_sa == NULL && so->so_faddr_sa == NULL);
423 	so->so_laddr_len = so->so_faddr_len = 0;
424 	so->so_laddr_maxlen = so->so_faddr_maxlen = 0;
425 	so->so_eaddr_mp = NULL;
426 	so->so_priv = NULL;
427 
428 	so->so_peercred = NULL;
429 
430 	ASSERT(so->so_ack_mp == NULL);
431 	ASSERT(so->so_conn_ind_head == NULL);
432 	ASSERT(so->so_conn_ind_tail == NULL);
433 	ASSERT(so->so_ux_bound_vp == NULL);
434 	ASSERT(so->so_unbind_mp == NULL);
435 
436 	vn_reinit(vp);
437 	vp->v_vfsp	= rootvfs;
438 	vp->v_type	= VSOCK;
439 	vp->v_rdev	= so->so_dev;
440 	vn_exists(vp);
441 
442 	return (vp);
443 }
444 
445 void
446 sockfree(struct sonode *so)
447 {
448 	mblk_t *mp;
449 	vnode_t *vp;
450 
451 	ASSERT(so->so_count == 0);
452 	ASSERT(so->so_accessvp);
453 	ASSERT(so->so_discon_ind_mp == NULL);
454 
455 	vp = so->so_accessvp;
456 	VN_RELE(vp);
457 
458 	/*
459 	 * Protect so->so_[lf]addr_sa so that sockfs_snapshot() can safely
460 	 * indirect them.  It also uses so_accessvp as a validity test.
461 	 */
462 	mutex_enter(&so->so_lock);
463 
464 	so->so_accessvp = NULL;
465 
466 	if (so->so_laddr_sa) {
467 		ASSERT((caddr_t)so->so_faddr_sa ==
468 		    (caddr_t)so->so_laddr_sa + so->so_laddr_maxlen);
469 		ASSERT(so->so_faddr_maxlen == so->so_laddr_maxlen);
470 		so->so_state &= ~(SS_LADDR_VALID | SS_FADDR_VALID);
471 		kmem_free(so->so_laddr_sa, so->so_laddr_maxlen * 2);
472 		so->so_laddr_sa = NULL;
473 		so->so_laddr_len = so->so_laddr_maxlen = 0;
474 		so->so_faddr_sa = NULL;
475 		so->so_faddr_len = so->so_faddr_maxlen = 0;
476 	}
477 
478 	mutex_exit(&so->so_lock);
479 
480 	if ((mp = so->so_eaddr_mp) != NULL) {
481 		freemsg(mp);
482 		so->so_eaddr_mp = NULL;
483 		so->so_delayed_error = 0;
484 	}
485 	if ((mp = so->so_ack_mp) != NULL) {
486 		freemsg(mp);
487 		so->so_ack_mp = NULL;
488 	}
489 	if ((mp = so->so_conn_ind_head) != NULL) {
490 		mblk_t *mp1;
491 
492 		while (mp) {
493 			mp1 = mp->b_next;
494 			mp->b_next = NULL;
495 			freemsg(mp);
496 			mp = mp1;
497 		}
498 		so->so_conn_ind_head = so->so_conn_ind_tail = NULL;
499 		so->so_state &= ~SS_HASCONNIND;
500 	}
501 #ifdef DEBUG
502 	mutex_enter(&so->so_lock);
503 	ASSERT(so_verify_oobstate(so));
504 	mutex_exit(&so->so_lock);
505 #endif /* DEBUG */
506 	if ((mp = so->so_oobmsg) != NULL) {
507 		freemsg(mp);
508 		so->so_oobmsg = NULL;
509 		so->so_state &= ~(SS_OOBPEND|SS_HAVEOOBDATA|SS_HADOOBDATA);
510 	}
511 
512 	if ((mp = so->so_nl7c_rcv_mp) != NULL) {
513 		so->so_nl7c_rcv_mp = NULL;
514 		freemsg(mp);
515 	}
516 	so->so_nl7c_rcv_rval = 0;
517 	if (so->so_nl7c_uri != NULL) {
518 		nl7c_urifree(so);
519 		/* urifree() cleared nl7c_uri */
520 	}
521 	if (so->so_nl7c_flags) {
522 		so->so_nl7c_flags = 0;
523 	}
524 
525 	ASSERT(so->so_ux_bound_vp == NULL);
526 	if ((mp = so->so_unbind_mp) != NULL) {
527 		freemsg(mp);
528 		so->so_unbind_mp = NULL;
529 	}
530 	vn_invalid(SOTOV(so));
531 
532 	if (so->so_peercred != NULL)
533 		crfree(so->so_peercred);
534 
535 	kmem_cache_free(so->so_cache, so->so_obj);
536 }
537 
538 /*
539  * Update the accessed, updated, or changed times in an sonode
540  * with the current time.
541  *
542  * Note that both SunOS 4.X and 4.4BSD sockets do not present reasonable
543  * attributes in a fstat call. (They return the current time and 0 for
544  * all timestamps, respectively.) We maintain the current timestamps
545  * here primarily so that should sockmod be popped the resulting
546  * file descriptor will behave like a stream w.r.t. the timestamps.
547  */
548 void
549 so_update_attrs(struct sonode *so, int flag)
550 {
551 	time_t now = gethrestime_sec();
552 
553 	mutex_enter(&so->so_lock);
554 	so->so_flag |= flag;
555 	if (flag & SOACC)
556 		so->so_atime = now;
557 	if (flag & SOMOD)
558 		so->so_mtime = now;
559 	mutex_exit(&so->so_lock);
560 }
561 
562 /*ARGSUSED*/
563 static int
564 socktpi_constructor(void *buf, void *cdrarg, int kmflags)
565 {
566 	struct sonode *so = buf;
567 	struct vnode *vp;
568 
569 	so->so_nl7c_flags	= 0;
570 	so->so_nl7c_uri		= NULL;
571 	so->so_nl7c_rcv_mp	= NULL;
572 
573 	so->so_oobmsg		= NULL;
574 	so->so_ack_mp		= NULL;
575 	so->so_conn_ind_head	= NULL;
576 	so->so_conn_ind_tail	= NULL;
577 	so->so_discon_ind_mp	= NULL;
578 	so->so_ux_bound_vp	= NULL;
579 	so->so_unbind_mp	= NULL;
580 	so->so_accessvp		= NULL;
581 	so->so_laddr_sa		= NULL;
582 	so->so_faddr_sa		= NULL;
583 	so->so_ops		= &sotpi_sonodeops;
584 
585 	vp = vn_alloc(KM_SLEEP);
586 	so->so_vnode = vp;
587 
588 	vn_setops(vp, socktpi_vnodeops);
589 	vp->v_data = (caddr_t)so;
590 
591 	mutex_init(&so->so_lock, NULL, MUTEX_DEFAULT, NULL);
592 	mutex_init(&so->so_plumb_lock, NULL, MUTEX_DEFAULT, NULL);
593 	cv_init(&so->so_state_cv, NULL, CV_DEFAULT, NULL);
594 	cv_init(&so->so_ack_cv, NULL, CV_DEFAULT, NULL);
595 	cv_init(&so->so_connind_cv, NULL, CV_DEFAULT, NULL);
596 	cv_init(&so->so_want_cv, NULL, CV_DEFAULT, NULL);
597 
598 	return (0);
599 }
600 
601 /*ARGSUSED1*/
602 static void
603 socktpi_destructor(void *buf, void *cdrarg)
604 {
605 	struct sonode *so = buf;
606 	struct vnode *vp = SOTOV(so);
607 
608 	ASSERT(so->so_nl7c_flags == 0);
609 	ASSERT(so->so_nl7c_uri == NULL);
610 	ASSERT(so->so_nl7c_rcv_mp == NULL);
611 
612 	ASSERT(so->so_oobmsg == NULL);
613 	ASSERT(so->so_ack_mp == NULL);
614 	ASSERT(so->so_conn_ind_head == NULL);
615 	ASSERT(so->so_conn_ind_tail == NULL);
616 	ASSERT(so->so_discon_ind_mp == NULL);
617 	ASSERT(so->so_ux_bound_vp == NULL);
618 	ASSERT(so->so_unbind_mp == NULL);
619 	ASSERT(so->so_ops == &sotpi_sonodeops);
620 
621 	ASSERT(vn_matchops(vp, socktpi_vnodeops));
622 	ASSERT(vp->v_data == (caddr_t)so);
623 
624 	vn_free(vp);
625 
626 	mutex_destroy(&so->so_lock);
627 	mutex_destroy(&so->so_plumb_lock);
628 	cv_destroy(&so->so_state_cv);
629 	cv_destroy(&so->so_ack_cv);
630 	cv_destroy(&so->so_connind_cv);
631 	cv_destroy(&so->so_want_cv);
632 }
633 
634 static int
635 socktpi_unix_constructor(void *buf, void *cdrarg, int kmflags)
636 {
637 	int retval;
638 
639 	if ((retval = socktpi_constructor(buf, cdrarg, kmflags)) == 0) {
640 		struct sonode *so = (struct sonode *)buf;
641 
642 		mutex_enter(&socklist.sl_lock);
643 
644 		so->so_next = socklist.sl_list;
645 		so->so_prev = NULL;
646 		if (so->so_next != NULL)
647 			so->so_next->so_prev = so;
648 		socklist.sl_list = so;
649 
650 		mutex_exit(&socklist.sl_lock);
651 
652 	}
653 	return (retval);
654 }
655 
656 static void
657 socktpi_unix_destructor(void *buf, void *cdrarg)
658 {
659 	struct sonode	*so	= (struct sonode *)buf;
660 
661 	mutex_enter(&socklist.sl_lock);
662 
663 	if (so->so_next != NULL)
664 		so->so_next->so_prev = so->so_prev;
665 	if (so->so_prev != NULL)
666 		so->so_prev->so_next = so->so_next;
667 	else
668 		socklist.sl_list = so->so_next;
669 
670 	mutex_exit(&socklist.sl_lock);
671 
672 	socktpi_destructor(buf, cdrarg);
673 }
674 
675 /*
676  * Init function called when sockfs is loaded.
677  */
678 int
679 sockinit(int fstype, char *name)
680 {
681 	static const fs_operation_def_t sock_vfsops_template[] = {
682 		NULL, NULL
683 	};
684 	int error;
685 	major_t dev;
686 	char *err_str;
687 
688 	error = vfs_setfsops(fstype, sock_vfsops_template, NULL);
689 	if (error != 0) {
690 		zcmn_err(GLOBAL_ZONEID, CE_WARN,
691 		    "sockinit: bad vfs ops template");
692 		return (error);
693 	}
694 
695 	error = vn_make_ops(name, socktpi_vnodeops_template, &socktpi_vnodeops);
696 	if (error != 0) {
697 		err_str = "sockinit: bad sock vnode ops template";
698 		/* vn_make_ops() does not reset socktpi_vnodeops on failure. */
699 		socktpi_vnodeops = NULL;
700 		goto failure;
701 	}
702 
703 	error = sosctp_init();
704 	if (error != 0) {
705 		err_str = NULL;
706 		goto failure;
707 	}
708 
709 	/*
710 	 * Create sonode caches.  We create a special one for AF_UNIX so
711 	 * that we can track them for netstat(1m).
712 	 */
713 	socktpi_cache = kmem_cache_create("socktpi_cache",
714 	    sizeof (struct sonode), 0, socktpi_constructor,
715 	    socktpi_destructor, NULL, NULL, NULL, 0);
716 
717 	socktpi_unix_cache = kmem_cache_create("socktpi_unix_cache",
718 	    sizeof (struct sonode), 0, socktpi_unix_constructor,
719 	    socktpi_unix_destructor, NULL, NULL, NULL, 0);
720 
721 	/*
722 	 * Build initial list mapping socket parameters to vnode.
723 	 */
724 	rw_init(&splist_lock, NULL, RW_DEFAULT, NULL);
725 
726 	/*
727 	 * If sockets are needed before init runs /sbin/soconfig
728 	 * it is possible to preload the sockparams list here using
729 	 * calls like:
730 	 *	sockconfig(1,2,3, "/dev/tcp", 0);
731 	 */
732 
733 	/*
734 	 * Create a unique dev_t for use in so_fsid.
735 	 */
736 
737 	if ((dev = getudev()) == (major_t)-1)
738 		dev = 0;
739 	sockdev = makedevice(dev, 0);
740 
741 	mutex_init(&socklist.sl_lock, NULL, MUTEX_DEFAULT, NULL);
742 	sendfile_init();
743 	nl7c_init();
744 
745 	return (0);
746 
747 failure:
748 	(void) vfs_freevfsops_by_type(fstype);
749 	if (socktpi_vnodeops != NULL)
750 		vn_freevnodeops(socktpi_vnodeops);
751 	if (err_str != NULL)
752 		zcmn_err(GLOBAL_ZONEID, CE_WARN, err_str);
753 	return (error);
754 }
755 
756 /*
757  * Caller must hold the mutex. Used to set SOLOCKED.
758  */
759 void
760 so_lock_single(struct sonode *so)
761 {
762 	ASSERT(MUTEX_HELD(&so->so_lock));
763 
764 	while (so->so_flag & (SOLOCKED | SOASYNC_UNBIND)) {
765 		so->so_flag |= SOWANT;
766 		cv_wait_stop(&so->so_want_cv, &so->so_lock,
767 			SO_LOCK_WAKEUP_TIME);
768 	}
769 	so->so_flag |= SOLOCKED;
770 }
771 
772 /*
773  * Caller must hold the mutex and pass in SOLOCKED or SOASYNC_UNBIND.
774  * Used to clear SOLOCKED or SOASYNC_UNBIND.
775  */
776 void
777 so_unlock_single(struct sonode *so, int flag)
778 {
779 	ASSERT(MUTEX_HELD(&so->so_lock));
780 	ASSERT(flag & (SOLOCKED|SOASYNC_UNBIND));
781 	ASSERT((flag & ~(SOLOCKED|SOASYNC_UNBIND)) == 0);
782 	ASSERT(so->so_flag & flag);
783 
784 	/*
785 	 * Process the T_DISCON_IND on so_discon_ind_mp.
786 	 *
787 	 * Call to so_drain_discon_ind will result in so_lock
788 	 * being dropped and re-acquired later.
789 	 */
790 	if (so->so_discon_ind_mp != NULL)
791 		so_drain_discon_ind(so);
792 
793 	if (so->so_flag & SOWANT)
794 		cv_broadcast(&so->so_want_cv);
795 	so->so_flag &= ~(SOWANT|flag);
796 }
797 
798 /*
799  * Caller must hold the mutex. Used to set SOREADLOCKED.
800  * If the caller wants nonblocking behavior it should set fmode.
801  */
802 int
803 so_lock_read(struct sonode *so, int fmode)
804 {
805 	ASSERT(MUTEX_HELD(&so->so_lock));
806 
807 	while (so->so_flag & SOREADLOCKED) {
808 		if (fmode & (FNDELAY|FNONBLOCK))
809 			return (EWOULDBLOCK);
810 		so->so_flag |= SOWANT;
811 		cv_wait_stop(&so->so_want_cv, &so->so_lock,
812 			SO_LOCK_WAKEUP_TIME);
813 	}
814 	so->so_flag |= SOREADLOCKED;
815 	return (0);
816 }
817 
818 /*
819  * Like so_lock_read above but allows signals.
820  */
821 int
822 so_lock_read_intr(struct sonode *so, int fmode)
823 {
824 	ASSERT(MUTEX_HELD(&so->so_lock));
825 
826 	while (so->so_flag & SOREADLOCKED) {
827 		if (fmode & (FNDELAY|FNONBLOCK))
828 			return (EWOULDBLOCK);
829 		so->so_flag |= SOWANT;
830 		if (!cv_wait_sig(&so->so_want_cv, &so->so_lock))
831 			return (EINTR);
832 	}
833 	so->so_flag |= SOREADLOCKED;
834 	return (0);
835 }
836 
837 /*
838  * Caller must hold the mutex. Used to clear SOREADLOCKED,
839  * set in so_lock_read() or so_lock_read_intr().
840  */
841 void
842 so_unlock_read(struct sonode *so)
843 {
844 	ASSERT(MUTEX_HELD(&so->so_lock));
845 	ASSERT(so->so_flag & SOREADLOCKED);
846 
847 	if (so->so_flag & SOWANT)
848 		cv_broadcast(&so->so_want_cv);
849 	so->so_flag &= ~(SOWANT|SOREADLOCKED);
850 }
851 
852 /*
853  * Verify that the specified offset falls within the mblk and
854  * that the resulting pointer is aligned.
855  * Returns NULL if not.
856  */
857 void *
858 sogetoff(mblk_t *mp, t_uscalar_t offset,
859     t_uscalar_t length, uint_t align_size)
860 {
861 	uintptr_t ptr1, ptr2;
862 
863 	ASSERT(mp && mp->b_wptr >= mp->b_rptr);
864 	ptr1 = (uintptr_t)mp->b_rptr + offset;
865 	ptr2 = (uintptr_t)ptr1 + length;
866 	if (ptr1 < (uintptr_t)mp->b_rptr || ptr2 > (uintptr_t)mp->b_wptr) {
867 		eprintline(0);
868 		return (NULL);
869 	}
870 	if ((ptr1 & (align_size - 1)) != 0) {
871 		eprintline(0);
872 		return (NULL);
873 	}
874 	return ((void *)ptr1);
875 }
876 
877 /*
878  * Return the AF_UNIX underlying filesystem vnode matching a given name.
879  * Makes sure the sending and the destination sonodes are compatible.
880  * The vnode is returned held.
881  *
882  * The underlying filesystem VSOCK vnode has a v_stream pointer that
883  * references the actual stream head (hence indirectly the actual sonode).
884  */
885 static int
886 so_ux_lookup(struct sonode *so, struct sockaddr_un *soun, int checkaccess,
887 		vnode_t **vpp)
888 {
889 	vnode_t		*vp;	/* Underlying filesystem vnode */
890 	vnode_t		*svp;	/* sockfs vnode */
891 	struct sonode	*so2;
892 	int		error;
893 
894 	dprintso(so, 1, ("so_ux_lookup(%p) name <%s>\n",
895 		so, soun->sun_path));
896 
897 	error = lookupname(soun->sun_path, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
898 	if (error) {
899 		eprintsoline(so, error);
900 		return (error);
901 	}
902 	if (vp->v_type != VSOCK) {
903 		error = ENOTSOCK;
904 		eprintsoline(so, error);
905 		goto done2;
906 	}
907 
908 	if (checkaccess) {
909 		/*
910 		 * Check that we have permissions to access the destination
911 		 * vnode. This check is not done in BSD but it is required
912 		 * by X/Open.
913 		 */
914 		if (error = VOP_ACCESS(vp, VREAD|VWRITE, 0, CRED())) {
915 			eprintsoline(so, error);
916 			goto done2;
917 		}
918 	}
919 
920 	/*
921 	 * Check if the remote socket has been closed.
922 	 *
923 	 * Synchronize with vn_rele_stream by holding v_lock while traversing
924 	 * v_stream->sd_vnode.
925 	 */
926 	mutex_enter(&vp->v_lock);
927 	if (vp->v_stream == NULL) {
928 		mutex_exit(&vp->v_lock);
929 		if (so->so_type == SOCK_DGRAM)
930 			error = EDESTADDRREQ;
931 		else
932 			error = ECONNREFUSED;
933 
934 		eprintsoline(so, error);
935 		goto done2;
936 	}
937 	ASSERT(vp->v_stream->sd_vnode);
938 	svp = vp->v_stream->sd_vnode;
939 	/*
940 	 * holding v_lock on underlying filesystem vnode and acquiring
941 	 * it on sockfs vnode. Assumes that no code ever attempts to
942 	 * acquire these locks in the reverse order.
943 	 */
944 	VN_HOLD(svp);
945 	mutex_exit(&vp->v_lock);
946 
947 	if (svp->v_type != VSOCK) {
948 		error = ENOTSOCK;
949 		eprintsoline(so, error);
950 		goto done;
951 	}
952 
953 	so2 = VTOSO(svp);
954 
955 	if (so->so_type != so2->so_type) {
956 		error = EPROTOTYPE;
957 		eprintsoline(so, error);
958 		goto done;
959 	}
960 
961 	VN_RELE(svp);
962 	*vpp = vp;
963 	return (0);
964 
965 done:
966 	VN_RELE(svp);
967 done2:
968 	VN_RELE(vp);
969 	return (error);
970 }
971 
972 /*
973  * Verify peer address for connect and sendto/sendmsg.
974  * Since sendto/sendmsg would not get synchronous errors from the transport
975  * provider we have to do these ugly checks in the socket layer to
976  * preserve compatibility with SunOS 4.X.
977  */
978 int
979 so_addr_verify(struct sonode *so, const struct sockaddr *name,
980     socklen_t namelen)
981 {
982 	int		family;
983 
984 	dprintso(so, 1, ("so_addr_verify(%p, %p, %d)\n", so, name, namelen));
985 
986 	ASSERT(name != NULL);
987 
988 	family = so->so_family;
989 	switch (family) {
990 	case AF_INET:
991 		if (name->sa_family != family) {
992 			eprintsoline(so, EAFNOSUPPORT);
993 			return (EAFNOSUPPORT);
994 		}
995 		if (namelen != (socklen_t)sizeof (struct sockaddr_in)) {
996 			eprintsoline(so, EINVAL);
997 			return (EINVAL);
998 		}
999 		break;
1000 	case AF_INET6: {
1001 #ifdef DEBUG
1002 		struct sockaddr_in6 *sin6;
1003 #endif /* DEBUG */
1004 
1005 		if (name->sa_family != family) {
1006 			eprintsoline(so, EAFNOSUPPORT);
1007 			return (EAFNOSUPPORT);
1008 		}
1009 		if (namelen != (socklen_t)sizeof (struct sockaddr_in6)) {
1010 			eprintsoline(so, EINVAL);
1011 			return (EINVAL);
1012 		}
1013 #ifdef DEBUG
1014 		/* Verify that apps don't forget to clear sin6_scope_id etc */
1015 		sin6 = (struct sockaddr_in6 *)name;
1016 		if (sin6->sin6_scope_id != 0 &&
1017 		    !IN6_IS_ADDR_LINKSCOPE(&sin6->sin6_addr)) {
1018 			zcmn_err(getzoneid(), CE_WARN,
1019 			    "connect/send* with uninitialized sin6_scope_id "
1020 			    "(%d) on socket. Pid = %d\n",
1021 			    (int)sin6->sin6_scope_id, (int)curproc->p_pid);
1022 		}
1023 #endif /* DEBUG */
1024 		break;
1025 	}
1026 	case AF_UNIX:
1027 		if (so->so_state & SS_FADDR_NOXLATE) {
1028 			return (0);
1029 		}
1030 		if (namelen < (socklen_t)sizeof (short)) {
1031 			eprintsoline(so, ENOENT);
1032 			return (ENOENT);
1033 		}
1034 		if (name->sa_family != family) {
1035 			eprintsoline(so, EAFNOSUPPORT);
1036 			return (EAFNOSUPPORT);
1037 		}
1038 		/* MAXPATHLEN + soun_family + nul termination */
1039 		if (namelen > (socklen_t)(MAXPATHLEN + sizeof (short) + 1)) {
1040 			eprintsoline(so, ENAMETOOLONG);
1041 			return (ENAMETOOLONG);
1042 		}
1043 
1044 		break;
1045 
1046 	default:
1047 		/*
1048 		 * Default is don't do any length or sa_family check
1049 		 * to allow non-sockaddr style addresses.
1050 		 */
1051 		break;
1052 	}
1053 
1054 	return (0);
1055 }
1056 
1057 
1058 /*
1059  * Translate an AF_UNIX sockaddr_un to the transport internal name.
1060  * Assumes caller has called so_addr_verify first.
1061  */
1062 /*ARGSUSED*/
1063 int
1064 so_ux_addr_xlate(struct sonode *so, struct sockaddr *name,
1065     socklen_t namelen, int checkaccess,
1066     void **addrp, socklen_t *addrlenp)
1067 {
1068 	int			error;
1069 	struct sockaddr_un	*soun;
1070 	vnode_t			*vp;
1071 	void			*addr;
1072 	socklen_t		addrlen;
1073 
1074 	dprintso(so, 1, ("so_ux_addr_xlate(%p, %p, %d, %d)\n",
1075 			so, name, namelen, checkaccess));
1076 
1077 	ASSERT(name != NULL);
1078 	ASSERT(so->so_family == AF_UNIX);
1079 	ASSERT(!(so->so_state & SS_FADDR_NOXLATE));
1080 	ASSERT(namelen >= (socklen_t)sizeof (short));
1081 	ASSERT(name->sa_family == AF_UNIX);
1082 	soun = (struct sockaddr_un *)name;
1083 	/*
1084 	 * Lookup vnode for the specified path name and verify that
1085 	 * it is a socket.
1086 	 */
1087 	error = so_ux_lookup(so, soun, checkaccess, &vp);
1088 	if (error) {
1089 		eprintsoline(so, error);
1090 		return (error);
1091 	}
1092 	/*
1093 	 * Use the address of the peer vnode as the address to send
1094 	 * to. We release the peer vnode here. In case it has been
1095 	 * closed by the time the T_CONN_REQ or T_UNIDATA_REQ reaches the
1096 	 * transport the message will get an error or be dropped.
1097 	 */
1098 	so->so_ux_faddr.soua_vp = vp;
1099 	so->so_ux_faddr.soua_magic = SOU_MAGIC_EXPLICIT;
1100 	addr = &so->so_ux_faddr;
1101 	addrlen = (socklen_t)sizeof (so->so_ux_faddr);
1102 	dprintso(so, 1, ("ux_xlate UNIX: addrlen %d, vp %p\n",
1103 				addrlen, vp));
1104 	VN_RELE(vp);
1105 	*addrp = addr;
1106 	*addrlenp = (socklen_t)addrlen;
1107 	return (0);
1108 }
1109 
1110 /*
1111  * Esballoc free function for messages that contain SO_FILEP option.
1112  * Decrement the reference count on the file pointers using closef.
1113  */
1114 void
1115 fdbuf_free(struct fdbuf *fdbuf)
1116 {
1117 	int	i;
1118 	struct file *fp;
1119 
1120 	dprint(1, ("fdbuf_free: %d fds\n", fdbuf->fd_numfd));
1121 	for (i = 0; i < fdbuf->fd_numfd; i++) {
1122 		/*
1123 		 * We need pointer size alignment for fd_fds. On a LP64
1124 		 * kernel, the required alignment is 8 bytes while
1125 		 * the option headers and values are only 4 bytes
1126 		 * aligned. So its safer to do a bcopy compared to
1127 		 * assigning fdbuf->fd_fds[i] to fp.
1128 		 */
1129 		bcopy((char *)&fdbuf->fd_fds[i], (char *)&fp, sizeof (fp));
1130 		dprint(1, ("fdbuf_free: [%d] = %p\n", i, fp));
1131 		(void) closef(fp);
1132 	}
1133 	if (fdbuf->fd_ebuf != NULL)
1134 		kmem_free(fdbuf->fd_ebuf, fdbuf->fd_ebuflen);
1135 	kmem_free(fdbuf, fdbuf->fd_size);
1136 }
1137 
1138 /*
1139  * Allocate an esballoc'ed message for AF_UNIX file descriptor passing.
1140  * Waits if memory is not available.
1141  */
1142 mblk_t *
1143 fdbuf_allocmsg(int size, struct fdbuf *fdbuf)
1144 {
1145 	uchar_t	*buf;
1146 	mblk_t	*mp;
1147 
1148 	dprint(1, ("fdbuf_allocmsg: size %d, %d fds\n", size, fdbuf->fd_numfd));
1149 	buf = kmem_alloc(size, KM_SLEEP);
1150 	fdbuf->fd_ebuf = (caddr_t)buf;
1151 	fdbuf->fd_ebuflen = size;
1152 	fdbuf->fd_frtn.free_func = fdbuf_free;
1153 	fdbuf->fd_frtn.free_arg = (caddr_t)fdbuf;
1154 
1155 	mp = esballoc_wait(buf, size, BPRI_MED, &fdbuf->fd_frtn);
1156 	mp->b_datap->db_type = M_PROTO;
1157 	return (mp);
1158 }
1159 
1160 /*
1161  * Extract file descriptors from a fdbuf.
1162  * Return list in rights/rightslen.
1163  */
1164 /*ARGSUSED*/
1165 static int
1166 fdbuf_extract(struct fdbuf *fdbuf, void *rights, int rightslen)
1167 {
1168 	int	i, fd;
1169 	int	*rp;
1170 	struct file *fp;
1171 	int	numfd;
1172 
1173 	dprint(1, ("fdbuf_extract: %d fds, len %d\n",
1174 		fdbuf->fd_numfd, rightslen));
1175 
1176 	numfd = fdbuf->fd_numfd;
1177 	ASSERT(rightslen == numfd * (int)sizeof (int));
1178 
1179 	/*
1180 	 * Allocate a file descriptor and increment the f_count.
1181 	 * The latter is needed since we always call fdbuf_free
1182 	 * which performs a closef.
1183 	 */
1184 	rp = (int *)rights;
1185 	for (i = 0; i < numfd; i++) {
1186 		if ((fd = ufalloc(0)) == -1)
1187 			goto cleanup;
1188 		/*
1189 		 * We need pointer size alignment for fd_fds. On a LP64
1190 		 * kernel, the required alignment is 8 bytes while
1191 		 * the option headers and values are only 4 bytes
1192 		 * aligned. So its safer to do a bcopy compared to
1193 		 * assigning fdbuf->fd_fds[i] to fp.
1194 		 */
1195 		bcopy((char *)&fdbuf->fd_fds[i], (char *)&fp, sizeof (fp));
1196 		mutex_enter(&fp->f_tlock);
1197 		fp->f_count++;
1198 		mutex_exit(&fp->f_tlock);
1199 		setf(fd, fp);
1200 		*rp++ = fd;
1201 #ifdef C2_AUDIT
1202 		if (audit_active)
1203 			audit_fdrecv(fd, fp);
1204 #endif
1205 		dprint(1, ("fdbuf_extract: [%d] = %d, %p refcnt %d\n",
1206 			i, fd, fp, fp->f_count));
1207 	}
1208 	return (0);
1209 
1210 cleanup:
1211 	/*
1212 	 * Undo whatever partial work the loop above has done.
1213 	 */
1214 	{
1215 		int j;
1216 
1217 		rp = (int *)rights;
1218 		for (j = 0; j < i; j++) {
1219 			dprint(0,
1220 			    ("fdbuf_extract: cleanup[%d] = %d\n", j, *rp));
1221 			(void) closeandsetf(*rp++, NULL);
1222 		}
1223 	}
1224 
1225 	return (EMFILE);
1226 }
1227 
1228 /*
1229  * Insert file descriptors into an fdbuf.
1230  * Returns a kmem_alloc'ed fdbuf. The fdbuf should be freed
1231  * by calling fdbuf_free().
1232  */
1233 int
1234 fdbuf_create(void *rights, int rightslen, struct fdbuf **fdbufp)
1235 {
1236 	int		numfd, i;
1237 	int		*fds;
1238 	struct file	*fp;
1239 	struct fdbuf	*fdbuf;
1240 	int		fdbufsize;
1241 
1242 	dprint(1, ("fdbuf_create: len %d\n", rightslen));
1243 
1244 	numfd = rightslen / (int)sizeof (int);
1245 
1246 	fdbufsize = (int)FDBUF_HDRSIZE + (numfd * (int)sizeof (struct file *));
1247 	fdbuf = kmem_alloc(fdbufsize, KM_SLEEP);
1248 	fdbuf->fd_size = fdbufsize;
1249 	fdbuf->fd_numfd = 0;
1250 	fdbuf->fd_ebuf = NULL;
1251 	fdbuf->fd_ebuflen = 0;
1252 	fds = (int *)rights;
1253 	for (i = 0; i < numfd; i++) {
1254 		if ((fp = getf(fds[i])) == NULL) {
1255 			fdbuf_free(fdbuf);
1256 			return (EBADF);
1257 		}
1258 		dprint(1, ("fdbuf_create: [%d] = %d, %p refcnt %d\n",
1259 			i, fds[i], fp, fp->f_count));
1260 		mutex_enter(&fp->f_tlock);
1261 		fp->f_count++;
1262 		mutex_exit(&fp->f_tlock);
1263 		/*
1264 		 * The maximum alignment for fdbuf (or any option header
1265 		 * and its value) it 4 bytes. On a LP64 kernel, the alignment
1266 		 * is not sufficient for pointers (fd_fds in this case). Since
1267 		 * we just did a kmem_alloc (we get a double word alignment),
1268 		 * we don't need to do anything on the send side (we loose
1269 		 * the double word alignment because fdbuf goes after an
1270 		 * option header (eg T_unitdata_req) which is only 4 byte
1271 		 * aligned). We take care of this when we extract the file
1272 		 * descriptor in fdbuf_extract or fdbuf_free.
1273 		 */
1274 		fdbuf->fd_fds[i] = fp;
1275 		fdbuf->fd_numfd++;
1276 		releasef(fds[i]);
1277 #ifdef C2_AUDIT
1278 		if (audit_active)
1279 			audit_fdsend(fds[i], fp, 0);
1280 #endif
1281 	}
1282 	*fdbufp = fdbuf;
1283 	return (0);
1284 }
1285 
1286 static int
1287 fdbuf_optlen(int rightslen)
1288 {
1289 	int numfd;
1290 
1291 	numfd = rightslen / (int)sizeof (int);
1292 
1293 	return ((int)FDBUF_HDRSIZE + (numfd * (int)sizeof (struct file *)));
1294 }
1295 
1296 static t_uscalar_t
1297 fdbuf_cmsglen(int fdbuflen)
1298 {
1299 	return (t_uscalar_t)((fdbuflen - FDBUF_HDRSIZE) /
1300 	    (int)sizeof (struct file *) * (int)sizeof (int));
1301 }
1302 
1303 
1304 /*
1305  * Return non-zero if the mblk and fdbuf are consistent.
1306  */
1307 static int
1308 fdbuf_verify(mblk_t *mp, struct fdbuf *fdbuf, int fdbuflen)
1309 {
1310 	if (fdbuflen >= FDBUF_HDRSIZE &&
1311 	    fdbuflen == fdbuf->fd_size) {
1312 		frtn_t *frp = mp->b_datap->db_frtnp;
1313 		/*
1314 		 * Check that the SO_FILEP portion of the
1315 		 * message has not been modified by
1316 		 * the loopback transport. The sending sockfs generates
1317 		 * a message that is esballoc'ed with the free function
1318 		 * being fdbuf_free() and where free_arg contains the
1319 		 * identical information as the SO_FILEP content.
1320 		 *
1321 		 * If any of these constraints are not satisfied we
1322 		 * silently ignore the option.
1323 		 */
1324 		ASSERT(mp);
1325 		if (frp != NULL &&
1326 		    frp->free_func == fdbuf_free &&
1327 		    frp->free_arg != NULL &&
1328 		    bcmp(frp->free_arg, fdbuf, fdbuflen) == 0) {
1329 			dprint(1, ("fdbuf_verify: fdbuf %p len %d\n",
1330 				fdbuf, fdbuflen));
1331 			return (1);
1332 		} else {
1333 			zcmn_err(getzoneid(), CE_WARN,
1334 			    "sockfs: mismatched fdbuf content (%p)",
1335 			    (void *)mp);
1336 			return (0);
1337 		}
1338 	} else {
1339 		zcmn_err(getzoneid(), CE_WARN,
1340 		    "sockfs: mismatched fdbuf len %d, %d\n",
1341 		    fdbuflen, fdbuf->fd_size);
1342 		return (0);
1343 	}
1344 }
1345 
1346 /*
1347  * When the file descriptors returned by sorecvmsg can not be passed
1348  * to the application this routine will cleanup the references on
1349  * the files. Start at startoff bytes into the buffer.
1350  */
1351 static void
1352 close_fds(void *fdbuf, int fdbuflen, int startoff)
1353 {
1354 	int *fds = (int *)fdbuf;
1355 	int numfd = fdbuflen / (int)sizeof (int);
1356 	int i;
1357 
1358 	dprint(1, ("close_fds(%p, %d, %d)\n", fdbuf, fdbuflen, startoff));
1359 
1360 	for (i = 0; i < numfd; i++) {
1361 		if (startoff < 0)
1362 			startoff = 0;
1363 		if (startoff < (int)sizeof (int)) {
1364 			/*
1365 			 * This file descriptor is partially or fully after
1366 			 * the offset
1367 			 */
1368 			dprint(0,
1369 			    ("close_fds: cleanup[%d] = %d\n", i, fds[i]));
1370 			(void) closeandsetf(fds[i], NULL);
1371 		}
1372 		startoff -= (int)sizeof (int);
1373 	}
1374 }
1375 
1376 /*
1377  * Close all file descriptors contained in the control part starting at
1378  * the startoffset.
1379  */
1380 void
1381 so_closefds(void *control, t_uscalar_t controllen, int oldflg,
1382     int startoff)
1383 {
1384 	struct cmsghdr *cmsg;
1385 
1386 	if (control == NULL)
1387 		return;
1388 
1389 	if (oldflg) {
1390 		close_fds(control, controllen, startoff);
1391 		return;
1392 	}
1393 	/* Scan control part for file descriptors. */
1394 	for (cmsg = (struct cmsghdr *)control;
1395 	    CMSG_VALID(cmsg, control, (uintptr_t)control + controllen);
1396 	    cmsg = CMSG_NEXT(cmsg)) {
1397 		if (cmsg->cmsg_level == SOL_SOCKET &&
1398 		    cmsg->cmsg_type == SCM_RIGHTS) {
1399 			close_fds(CMSG_CONTENT(cmsg),
1400 			    (int)CMSG_CONTENTLEN(cmsg),
1401 			    startoff - (int)sizeof (struct cmsghdr));
1402 		}
1403 		startoff -= cmsg->cmsg_len;
1404 	}
1405 }
1406 
1407 /*
1408  * Returns a pointer/length for the file descriptors contained
1409  * in the control buffer. Returns with *fdlenp == -1 if there are no
1410  * file descriptor options present. This is different than there being
1411  * a zero-length file descriptor option.
1412  * Fail if there are multiple SCM_RIGHT cmsgs.
1413  */
1414 int
1415 so_getfdopt(void *control, t_uscalar_t controllen, int oldflg,
1416     void **fdsp, int *fdlenp)
1417 {
1418 	struct cmsghdr *cmsg;
1419 	void *fds;
1420 	int fdlen;
1421 
1422 	if (control == NULL) {
1423 		*fdsp = NULL;
1424 		*fdlenp = -1;
1425 		return (0);
1426 	}
1427 
1428 	if (oldflg) {
1429 		*fdsp = control;
1430 		if (controllen == 0)
1431 			*fdlenp = -1;
1432 		else
1433 			*fdlenp = controllen;
1434 		dprint(1, ("so_getfdopt: old %d\n", *fdlenp));
1435 		return (0);
1436 	}
1437 
1438 	fds = NULL;
1439 	fdlen = 0;
1440 
1441 	for (cmsg = (struct cmsghdr *)control;
1442 	    CMSG_VALID(cmsg, control, (uintptr_t)control + controllen);
1443 	    cmsg = CMSG_NEXT(cmsg)) {
1444 		if (cmsg->cmsg_level == SOL_SOCKET &&
1445 		    cmsg->cmsg_type == SCM_RIGHTS) {
1446 			if (fds != NULL)
1447 				return (EINVAL);
1448 			fds = CMSG_CONTENT(cmsg);
1449 			fdlen = (int)CMSG_CONTENTLEN(cmsg);
1450 			dprint(1, ("so_getfdopt: new %lu\n",
1451 				(size_t)CMSG_CONTENTLEN(cmsg)));
1452 		}
1453 	}
1454 	if (fds == NULL) {
1455 		dprint(1, ("so_getfdopt: NONE\n"));
1456 		*fdlenp = -1;
1457 	} else
1458 		*fdlenp = fdlen;
1459 	*fdsp = fds;
1460 	return (0);
1461 }
1462 
1463 /*
1464  * Return the length of the options including any file descriptor options.
1465  */
1466 t_uscalar_t
1467 so_optlen(void *control, t_uscalar_t controllen, int oldflg)
1468 {
1469 	struct cmsghdr *cmsg;
1470 	t_uscalar_t optlen = 0;
1471 	t_uscalar_t len;
1472 
1473 	if (control == NULL)
1474 		return (0);
1475 
1476 	if (oldflg)
1477 		return ((t_uscalar_t)(sizeof (struct T_opthdr) +
1478 		    fdbuf_optlen(controllen)));
1479 
1480 	for (cmsg = (struct cmsghdr *)control;
1481 	    CMSG_VALID(cmsg, control, (uintptr_t)control + controllen);
1482 	    cmsg = CMSG_NEXT(cmsg)) {
1483 		if (cmsg->cmsg_level == SOL_SOCKET &&
1484 		    cmsg->cmsg_type == SCM_RIGHTS) {
1485 			len = fdbuf_optlen((int)CMSG_CONTENTLEN(cmsg));
1486 		} else {
1487 			len = (t_uscalar_t)CMSG_CONTENTLEN(cmsg);
1488 		}
1489 		optlen += (t_uscalar_t)(_TPI_ALIGN_TOPT(len) +
1490 		    sizeof (struct T_opthdr));
1491 	}
1492 	dprint(1, ("so_optlen: controllen %d, flg %d -> optlen %d\n",
1493 		controllen, oldflg, optlen));
1494 	return (optlen);
1495 }
1496 
1497 /*
1498  * Copy options from control to the mblk. Skip any file descriptor options.
1499  */
1500 void
1501 so_cmsg2opt(void *control, t_uscalar_t controllen, int oldflg, mblk_t *mp)
1502 {
1503 	struct T_opthdr toh;
1504 	struct cmsghdr *cmsg;
1505 
1506 	if (control == NULL)
1507 		return;
1508 
1509 	if (oldflg) {
1510 		/* No real options - caller has handled file descriptors */
1511 		return;
1512 	}
1513 	for (cmsg = (struct cmsghdr *)control;
1514 	    CMSG_VALID(cmsg, control, (uintptr_t)control + controllen);
1515 	    cmsg = CMSG_NEXT(cmsg)) {
1516 		/*
1517 		 * Note: The caller handles file descriptors prior
1518 		 * to calling this function.
1519 		 */
1520 		t_uscalar_t len;
1521 
1522 		if (cmsg->cmsg_level == SOL_SOCKET &&
1523 		    cmsg->cmsg_type == SCM_RIGHTS)
1524 			continue;
1525 
1526 		len = (t_uscalar_t)CMSG_CONTENTLEN(cmsg);
1527 		toh.level = cmsg->cmsg_level;
1528 		toh.name = cmsg->cmsg_type;
1529 		toh.len = len + (t_uscalar_t)sizeof (struct T_opthdr);
1530 		toh.status = 0;
1531 
1532 		soappendmsg(mp, &toh, sizeof (toh));
1533 		soappendmsg(mp, CMSG_CONTENT(cmsg), len);
1534 		mp->b_wptr += _TPI_ALIGN_TOPT(len) - len;
1535 		ASSERT(mp->b_wptr <= mp->b_datap->db_lim);
1536 	}
1537 }
1538 
1539 /*
1540  * Return the length of the control message derived from the options.
1541  * Exclude SO_SRCADDR and SO_UNIX_CLOSE options. Include SO_FILEP.
1542  * When oldflg is set only include SO_FILEP.
1543  * so_opt2cmsg and so_cmsglen are inter-related since so_cmsglen
1544  * allocates the space that so_opt2cmsg fills. If one changes, the other should
1545  * also be checked for any possible impacts.
1546  */
1547 t_uscalar_t
1548 so_cmsglen(mblk_t *mp, void *opt, t_uscalar_t optlen, int oldflg)
1549 {
1550 	t_uscalar_t cmsglen = 0;
1551 	struct T_opthdr *tohp;
1552 	t_uscalar_t len;
1553 	t_uscalar_t last_roundup = 0;
1554 
1555 	ASSERT(__TPI_TOPT_ISALIGNED(opt));
1556 
1557 	for (tohp = (struct T_opthdr *)opt;
1558 	    tohp && _TPI_TOPT_VALID(tohp, opt, (uintptr_t)opt + optlen);
1559 	    tohp = _TPI_TOPT_NEXTHDR(opt, optlen, tohp)) {
1560 		dprint(1, ("so_cmsglen: level 0x%x, name %d, len %d\n",
1561 			tohp->level, tohp->name, tohp->len));
1562 		if (tohp->level == SOL_SOCKET &&
1563 		    (tohp->name == SO_SRCADDR ||
1564 		    tohp->name == SO_UNIX_CLOSE)) {
1565 			continue;
1566 		}
1567 		if (tohp->level == SOL_SOCKET && tohp->name == SO_FILEP) {
1568 			struct fdbuf *fdbuf;
1569 			int fdbuflen;
1570 
1571 			fdbuf = (struct fdbuf *)_TPI_TOPT_DATA(tohp);
1572 			fdbuflen = (int)_TPI_TOPT_DATALEN(tohp);
1573 
1574 			if (!fdbuf_verify(mp, fdbuf, fdbuflen))
1575 				continue;
1576 			if (oldflg) {
1577 				cmsglen += fdbuf_cmsglen(fdbuflen);
1578 				continue;
1579 			}
1580 			len = fdbuf_cmsglen(fdbuflen);
1581 		} else if (tohp->level == SOL_SOCKET &&
1582 		    tohp->name == SCM_TIMESTAMP) {
1583 			if (oldflg)
1584 				continue;
1585 
1586 			if (get_udatamodel() == DATAMODEL_NATIVE) {
1587 				len = sizeof (struct timeval);
1588 			} else {
1589 				len = sizeof (struct timeval32);
1590 			}
1591 		} else {
1592 			if (oldflg)
1593 				continue;
1594 			len = (t_uscalar_t)_TPI_TOPT_DATALEN(tohp);
1595 		}
1596 		/*
1597 		 * Exclude roundup for last option to not set
1598 		 * MSG_CTRUNC when the cmsg fits but the padding doesn't fit.
1599 		 */
1600 		last_roundup = (t_uscalar_t)
1601 		    (ROUNDUP_cmsglen(len + (int)sizeof (struct cmsghdr)) -
1602 		    (len + (int)sizeof (struct cmsghdr)));
1603 		cmsglen += (t_uscalar_t)(len + (int)sizeof (struct cmsghdr)) +
1604 		    last_roundup;
1605 	}
1606 	cmsglen -= last_roundup;
1607 	dprint(1, ("so_cmsglen: optlen %d, flg %d -> cmsglen %d\n",
1608 		optlen, oldflg, cmsglen));
1609 	return (cmsglen);
1610 }
1611 
1612 /*
1613  * Copy options from options to the control. Convert SO_FILEP to
1614  * file descriptors.
1615  * Returns errno or zero.
1616  * so_opt2cmsg and so_cmsglen are inter-related since so_cmsglen
1617  * allocates the space that so_opt2cmsg fills. If one changes, the other should
1618  * also be checked for any possible impacts.
1619  */
1620 int
1621 so_opt2cmsg(mblk_t *mp, void *opt, t_uscalar_t optlen, int oldflg,
1622     void *control, t_uscalar_t controllen)
1623 {
1624 	struct T_opthdr *tohp;
1625 	struct cmsghdr *cmsg;
1626 	struct fdbuf *fdbuf;
1627 	int fdbuflen;
1628 	int error;
1629 #if defined(DEBUG) || defined(__lint)
1630 	struct cmsghdr *cend = (struct cmsghdr *)
1631 	    (((uint8_t *)control) + ROUNDUP_cmsglen(controllen));
1632 #endif
1633 	cmsg = (struct cmsghdr *)control;
1634 
1635 	ASSERT(__TPI_TOPT_ISALIGNED(opt));
1636 
1637 	for (tohp = (struct T_opthdr *)opt;
1638 	    tohp && _TPI_TOPT_VALID(tohp, opt, (uintptr_t)opt + optlen);
1639 	    tohp = _TPI_TOPT_NEXTHDR(opt, optlen, tohp)) {
1640 		dprint(1, ("so_opt2cmsg: level 0x%x, name %d, len %d\n",
1641 			tohp->level, tohp->name, tohp->len));
1642 
1643 		if (tohp->level == SOL_SOCKET &&
1644 		    (tohp->name == SO_SRCADDR ||
1645 		    tohp->name == SO_UNIX_CLOSE)) {
1646 			continue;
1647 		}
1648 		ASSERT((uintptr_t)cmsg <= (uintptr_t)control + controllen);
1649 		if (tohp->level == SOL_SOCKET && tohp->name == SO_FILEP) {
1650 			fdbuf = (struct fdbuf *)_TPI_TOPT_DATA(tohp);
1651 			fdbuflen = (int)_TPI_TOPT_DATALEN(tohp);
1652 
1653 			if (!fdbuf_verify(mp, fdbuf, fdbuflen))
1654 				return (EPROTO);
1655 			if (oldflg) {
1656 				error = fdbuf_extract(fdbuf, control,
1657 				    (int)controllen);
1658 				if (error != 0)
1659 					return (error);
1660 				continue;
1661 			} else {
1662 				int fdlen;
1663 
1664 				fdlen = (int)fdbuf_cmsglen(
1665 				    (int)_TPI_TOPT_DATALEN(tohp));
1666 
1667 				cmsg->cmsg_level = tohp->level;
1668 				cmsg->cmsg_type = SCM_RIGHTS;
1669 				cmsg->cmsg_len = (socklen_t)(fdlen +
1670 					sizeof (struct cmsghdr));
1671 
1672 				error = fdbuf_extract(fdbuf,
1673 						CMSG_CONTENT(cmsg), fdlen);
1674 				if (error != 0)
1675 					return (error);
1676 			}
1677 		} else if (tohp->level == SOL_SOCKET &&
1678 		    tohp->name == SCM_TIMESTAMP) {
1679 			timestruc_t *timestamp;
1680 
1681 			if (oldflg)
1682 				continue;
1683 
1684 			cmsg->cmsg_level = tohp->level;
1685 			cmsg->cmsg_type = tohp->name;
1686 
1687 			timestamp =
1688 			    (timestruc_t *)P2ROUNDUP((intptr_t)&tohp[1],
1689 			    sizeof (intptr_t));
1690 
1691 			if (get_udatamodel() == DATAMODEL_NATIVE) {
1692 				struct timeval tv;
1693 
1694 				cmsg->cmsg_len = sizeof (struct timeval) +
1695 				    sizeof (struct cmsghdr);
1696 				tv.tv_sec = timestamp->tv_sec;
1697 				tv.tv_usec = timestamp->tv_nsec /
1698 				    (NANOSEC / MICROSEC);
1699 				/*
1700 				 * on LP64 systems, the struct timeval in
1701 				 * the destination will not be 8-byte aligned,
1702 				 * so use bcopy to avoid alignment trouble
1703 				 */
1704 				bcopy(&tv, CMSG_CONTENT(cmsg), sizeof (tv));
1705 			} else {
1706 				struct timeval32 *time32;
1707 
1708 				cmsg->cmsg_len = sizeof (struct timeval32) +
1709 				    sizeof (struct cmsghdr);
1710 				time32 = (struct timeval32 *)CMSG_CONTENT(cmsg);
1711 				time32->tv_sec = (time32_t)timestamp->tv_sec;
1712 				time32->tv_usec =
1713 				    (int32_t)(timestamp->tv_nsec /
1714 				    (NANOSEC / MICROSEC));
1715 			}
1716 
1717 		} else {
1718 			if (oldflg)
1719 				continue;
1720 
1721 			cmsg->cmsg_level = tohp->level;
1722 			cmsg->cmsg_type = tohp->name;
1723 			cmsg->cmsg_len = (socklen_t)(_TPI_TOPT_DATALEN(tohp) +
1724 			    sizeof (struct cmsghdr));
1725 
1726 			/* copy content to control data part */
1727 			bcopy(&tohp[1], CMSG_CONTENT(cmsg),
1728 				CMSG_CONTENTLEN(cmsg));
1729 		}
1730 		/* move to next CMSG structure! */
1731 		cmsg = CMSG_NEXT(cmsg);
1732 	}
1733 	dprint(1, ("so_opt2cmsg: buf %p len %d; cend %p; final cmsg %p\n",
1734 	    control, controllen, cend, cmsg));
1735 	ASSERT(cmsg <= cend);
1736 	return (0);
1737 }
1738 
1739 /*
1740  * Extract the SO_SRCADDR option value if present.
1741  */
1742 void
1743 so_getopt_srcaddr(void *opt, t_uscalar_t optlen, void **srcp,
1744     t_uscalar_t *srclenp)
1745 {
1746 	struct T_opthdr		*tohp;
1747 
1748 	ASSERT(__TPI_TOPT_ISALIGNED(opt));
1749 
1750 	ASSERT(srcp != NULL && srclenp != NULL);
1751 	*srcp = NULL;
1752 	*srclenp = 0;
1753 
1754 	for (tohp = (struct T_opthdr *)opt;
1755 	    tohp && _TPI_TOPT_VALID(tohp, opt, (uintptr_t)opt + optlen);
1756 	    tohp = _TPI_TOPT_NEXTHDR(opt, optlen, tohp)) {
1757 		dprint(1, ("so_getopt_srcaddr: level 0x%x, name %d, len %d\n",
1758 			tohp->level, tohp->name, tohp->len));
1759 		if (tohp->level == SOL_SOCKET &&
1760 		    tohp->name == SO_SRCADDR) {
1761 			*srcp = _TPI_TOPT_DATA(tohp);
1762 			*srclenp = (t_uscalar_t)_TPI_TOPT_DATALEN(tohp);
1763 		}
1764 	}
1765 }
1766 
1767 /*
1768  * Verify if the SO_UNIX_CLOSE option is present.
1769  */
1770 int
1771 so_getopt_unix_close(void *opt, t_uscalar_t optlen)
1772 {
1773 	struct T_opthdr		*tohp;
1774 
1775 	ASSERT(__TPI_TOPT_ISALIGNED(opt));
1776 
1777 	for (tohp = (struct T_opthdr *)opt;
1778 	    tohp && _TPI_TOPT_VALID(tohp, opt, (uintptr_t)opt + optlen);
1779 	    tohp = _TPI_TOPT_NEXTHDR(opt, optlen, tohp)) {
1780 		dprint(1,
1781 			("so_getopt_unix_close: level 0x%x, name %d, len %d\n",
1782 			tohp->level, tohp->name, tohp->len));
1783 		if (tohp->level == SOL_SOCKET &&
1784 		    tohp->name == SO_UNIX_CLOSE)
1785 			return (1);
1786 	}
1787 	return (0);
1788 }
1789 
1790 /*
1791  * Allocate an M_PROTO message.
1792  *
1793  * If allocation fails the behavior depends on sleepflg:
1794  *	_ALLOC_NOSLEEP	fail immediately
1795  *	_ALLOC_INTR	sleep for memory until a signal is caught
1796  *	_ALLOC_SLEEP	sleep forever. Don't return NULL.
1797  */
1798 mblk_t *
1799 soallocproto(size_t size, int sleepflg)
1800 {
1801 	mblk_t	*mp;
1802 
1803 	/* Round up size for reuse */
1804 	size = MAX(size, 64);
1805 	mp = allocb(size, BPRI_MED);
1806 	if (mp == NULL) {
1807 		int error;	/* Dummy - error not returned to caller */
1808 
1809 		switch (sleepflg) {
1810 		case _ALLOC_SLEEP:
1811 			mp = allocb_wait(size, BPRI_MED, STR_NOSIG, &error);
1812 			ASSERT(mp);
1813 			break;
1814 		case _ALLOC_INTR:
1815 			mp = allocb_wait(size, BPRI_MED, 0, &error);
1816 			if (mp == NULL) {
1817 				/* Caught signal while sleeping for memory */
1818 				eprintline(ENOBUFS);
1819 				return (NULL);
1820 			}
1821 			break;
1822 		case _ALLOC_NOSLEEP:
1823 		default:
1824 			eprintline(ENOBUFS);
1825 			return (NULL);
1826 		}
1827 	}
1828 	DB_TYPE(mp) = M_PROTO;
1829 	return (mp);
1830 }
1831 
1832 /*
1833  * Allocate an M_PROTO message with a single component.
1834  * len is the length of buf. size is the amount to allocate.
1835  *
1836  * buf can be NULL with a non-zero len.
1837  * This results in a bzero'ed chunk being placed the message.
1838  */
1839 mblk_t *
1840 soallocproto1(const void *buf, ssize_t len, ssize_t size, int sleepflg)
1841 {
1842 	mblk_t	*mp;
1843 
1844 	if (size == 0)
1845 		size = len;
1846 
1847 	ASSERT(size >= len);
1848 	/* Round up size for reuse */
1849 	size = MAX(size, 64);
1850 	mp = soallocproto(size, sleepflg);
1851 	if (mp == NULL)
1852 		return (NULL);
1853 	mp->b_datap->db_type = M_PROTO;
1854 	if (len != 0) {
1855 		if (buf != NULL)
1856 			bcopy(buf, mp->b_wptr, len);
1857 		else
1858 			bzero(mp->b_wptr, len);
1859 		mp->b_wptr += len;
1860 	}
1861 	return (mp);
1862 }
1863 
1864 /*
1865  * Append buf/len to mp.
1866  * The caller has to ensure that there is enough room in the mblk.
1867  *
1868  * buf can be NULL with a non-zero len.
1869  * This results in a bzero'ed chunk being placed the message.
1870  */
1871 void
1872 soappendmsg(mblk_t *mp, const void *buf, ssize_t len)
1873 {
1874 	ASSERT(mp);
1875 
1876 	if (len != 0) {
1877 		/* Assert for room left */
1878 		ASSERT(mp->b_datap->db_lim - mp->b_wptr >= len);
1879 		if (buf != NULL)
1880 			bcopy(buf, mp->b_wptr, len);
1881 		else
1882 			bzero(mp->b_wptr, len);
1883 	}
1884 	mp->b_wptr += len;
1885 }
1886 
1887 /*
1888  * Create a message using two kernel buffers.
1889  * If size is set that will determine the allocation size (e.g. for future
1890  * soappendmsg calls). If size is zero it is derived from the buffer
1891  * lengths.
1892  */
1893 mblk_t *
1894 soallocproto2(const void *buf1, ssize_t len1, const void *buf2, ssize_t len2,
1895     ssize_t size, int sleepflg)
1896 {
1897 	mblk_t *mp;
1898 
1899 	if (size == 0)
1900 		size = len1 + len2;
1901 	ASSERT(size >= len1 + len2);
1902 
1903 	mp = soallocproto1(buf1, len1, size, sleepflg);
1904 	if (mp)
1905 		soappendmsg(mp, buf2, len2);
1906 	return (mp);
1907 }
1908 
1909 /*
1910  * Create a message using three kernel buffers.
1911  * If size is set that will determine the allocation size (for future
1912  * soappendmsg calls). If size is zero it is derived from the buffer
1913  * lengths.
1914  */
1915 mblk_t *
1916 soallocproto3(const void *buf1, ssize_t len1, const void *buf2, ssize_t len2,
1917     const void *buf3, ssize_t len3, ssize_t size, int sleepflg)
1918 {
1919 	mblk_t *mp;
1920 
1921 	if (size == 0)
1922 		size = len1 + len2 +len3;
1923 	ASSERT(size >= len1 + len2 + len3);
1924 
1925 	mp = soallocproto1(buf1, len1, size, sleepflg);
1926 	if (mp != NULL) {
1927 		soappendmsg(mp, buf2, len2);
1928 		soappendmsg(mp, buf3, len3);
1929 	}
1930 	return (mp);
1931 }
1932 
1933 #ifdef DEBUG
1934 char *
1935 pr_state(uint_t state, uint_t mode)
1936 {
1937 	static char buf[1024];
1938 
1939 	buf[0] = 0;
1940 	if (state & SS_ISCONNECTED)
1941 		strcat(buf, "ISCONNECTED ");
1942 	if (state & SS_ISCONNECTING)
1943 		strcat(buf, "ISCONNECTING ");
1944 	if (state & SS_ISDISCONNECTING)
1945 		strcat(buf, "ISDISCONNECTING ");
1946 	if (state & SS_CANTSENDMORE)
1947 		strcat(buf, "CANTSENDMORE ");
1948 
1949 	if (state & SS_CANTRCVMORE)
1950 		strcat(buf, "CANTRCVMORE ");
1951 	if (state & SS_ISBOUND)
1952 		strcat(buf, "ISBOUND ");
1953 	if (state & SS_NDELAY)
1954 		strcat(buf, "NDELAY ");
1955 	if (state & SS_NONBLOCK)
1956 		strcat(buf, "NONBLOCK ");
1957 
1958 	if (state & SS_ASYNC)
1959 		strcat(buf, "ASYNC ");
1960 	if (state & SS_ACCEPTCONN)
1961 		strcat(buf, "ACCEPTCONN ");
1962 	if (state & SS_HASCONNIND)
1963 		strcat(buf, "HASCONNIND ");
1964 	if (state & SS_SAVEDEOR)
1965 		strcat(buf, "SAVEDEOR ");
1966 
1967 	if (state & SS_RCVATMARK)
1968 		strcat(buf, "RCVATMARK ");
1969 	if (state & SS_OOBPEND)
1970 		strcat(buf, "OOBPEND ");
1971 	if (state & SS_HAVEOOBDATA)
1972 		strcat(buf, "HAVEOOBDATA ");
1973 	if (state & SS_HADOOBDATA)
1974 		strcat(buf, "HADOOBDATA ");
1975 
1976 	if (state & SS_FADDR_NOXLATE)
1977 		strcat(buf, "FADDR_NOXLATE ");
1978 
1979 	if (mode & SM_PRIV)
1980 		strcat(buf, "PRIV ");
1981 	if (mode & SM_ATOMIC)
1982 		strcat(buf, "ATOMIC ");
1983 	if (mode & SM_ADDR)
1984 		strcat(buf, "ADDR ");
1985 	if (mode & SM_CONNREQUIRED)
1986 		strcat(buf, "CONNREQUIRED ");
1987 
1988 	if (mode & SM_FDPASSING)
1989 		strcat(buf, "FDPASSING ");
1990 	if (mode & SM_EXDATA)
1991 		strcat(buf, "EXDATA ");
1992 	if (mode & SM_OPTDATA)
1993 		strcat(buf, "OPTDATA ");
1994 	if (mode & SM_BYTESTREAM)
1995 		strcat(buf, "BYTESTREAM ");
1996 	return (buf);
1997 }
1998 
1999 char *
2000 pr_addr(int family, struct sockaddr *addr, t_uscalar_t addrlen)
2001 {
2002 	static char buf[1024];
2003 
2004 	if (addr == NULL || addrlen == 0) {
2005 		sprintf(buf, "(len %d) %p", addrlen, addr);
2006 		return (buf);
2007 	}
2008 	switch (family) {
2009 	case AF_INET: {
2010 		struct sockaddr_in sin;
2011 
2012 		bcopy(addr, &sin, sizeof (sin));
2013 
2014 		(void) sprintf(buf, "(len %d) %x/%d",
2015 			addrlen, ntohl(sin.sin_addr.s_addr),
2016 			ntohs(sin.sin_port));
2017 		break;
2018 	}
2019 	case AF_INET6: {
2020 		struct sockaddr_in6 sin6;
2021 		uint16_t *piece = (uint16_t *)&sin6.sin6_addr;
2022 
2023 		bcopy((char *)addr, (char *)&sin6, sizeof (sin6));
2024 		sprintf(buf, "(len %d) %x:%x:%x:%x:%x:%x:%x:%x/%d",
2025 		    addrlen,
2026 		    ntohs(piece[0]), ntohs(piece[1]),
2027 		    ntohs(piece[2]), ntohs(piece[3]),
2028 		    ntohs(piece[4]), ntohs(piece[5]),
2029 		    ntohs(piece[6]), ntohs(piece[7]),
2030 		    ntohs(sin6.sin6_port));
2031 		break;
2032 	}
2033 	case AF_UNIX: {
2034 		struct sockaddr_un *soun = (struct sockaddr_un *)addr;
2035 
2036 		(void) sprintf(buf, "(len %d) %s",
2037 			addrlen,
2038 			(soun == NULL) ? "(none)" : soun->sun_path);
2039 		break;
2040 	}
2041 	default:
2042 		(void) sprintf(buf, "(unknown af %d)", family);
2043 		break;
2044 	}
2045 	return (buf);
2046 }
2047 
2048 /* The logical equivalence operator (a if-and-only-if b) */
2049 #define	EQUIV(a, b)	(((a) && (b)) || (!(a) && (!(b))))
2050 
2051 /*
2052  * Verify limitations and invariants on oob state.
2053  * Return 1 if OK, otherwise 0 so that it can be used as
2054  *	ASSERT(verify_oobstate(so));
2055  */
2056 int
2057 so_verify_oobstate(struct sonode *so)
2058 {
2059 	ASSERT(MUTEX_HELD(&so->so_lock));
2060 
2061 	/*
2062 	 * The possible state combinations are:
2063 	 *	0
2064 	 *	SS_OOBPEND
2065 	 *	SS_OOBPEND|SS_HAVEOOBDATA
2066 	 *	SS_OOBPEND|SS_HADOOBDATA
2067 	 *	SS_HADOOBDATA
2068 	 */
2069 	switch (so->so_state & (SS_OOBPEND|SS_HAVEOOBDATA|SS_HADOOBDATA)) {
2070 	case 0:
2071 	case SS_OOBPEND:
2072 	case SS_OOBPEND|SS_HAVEOOBDATA:
2073 	case SS_OOBPEND|SS_HADOOBDATA:
2074 	case SS_HADOOBDATA:
2075 		break;
2076 	default:
2077 		printf("Bad oob state 1 (%p): counts %d/%d state %s\n",
2078 			so, so->so_oobsigcnt,
2079 			so->so_oobcnt, pr_state(so->so_state, so->so_mode));
2080 		return (0);
2081 	}
2082 
2083 	/* SS_RCVATMARK should only be set when SS_OOBPEND is set */
2084 	if ((so->so_state & (SS_RCVATMARK|SS_OOBPEND)) == SS_RCVATMARK) {
2085 		printf("Bad oob state 2 (%p): counts %d/%d state %s\n",
2086 			so, so->so_oobsigcnt,
2087 			so->so_oobcnt, pr_state(so->so_state, so->so_mode));
2088 		return (0);
2089 	}
2090 
2091 	/*
2092 	 * (so_oobsigcnt != 0 or SS_RCVATMARK) iff SS_OOBPEND
2093 	 */
2094 	if (!EQUIV((so->so_oobsigcnt != 0) || (so->so_state & SS_RCVATMARK),
2095 		so->so_state & SS_OOBPEND)) {
2096 		printf("Bad oob state 3 (%p): counts %d/%d state %s\n",
2097 			so, so->so_oobsigcnt,
2098 			so->so_oobcnt, pr_state(so->so_state, so->so_mode));
2099 		return (0);
2100 	}
2101 
2102 	/*
2103 	 * Unless SO_OOBINLINE we have so_oobmsg != NULL iff SS_HAVEOOBDATA
2104 	 */
2105 	if (!(so->so_options & SO_OOBINLINE) &&
2106 	    !EQUIV(so->so_oobmsg != NULL, so->so_state & SS_HAVEOOBDATA)) {
2107 		printf("Bad oob state 4 (%p): counts %d/%d state %s\n",
2108 			so, so->so_oobsigcnt,
2109 			so->so_oobcnt, pr_state(so->so_state, so->so_mode));
2110 		return (0);
2111 	}
2112 	if (so->so_oobsigcnt < so->so_oobcnt) {
2113 		printf("Bad oob state 5 (%p): counts %d/%d state %s\n",
2114 			so, so->so_oobsigcnt,
2115 			so->so_oobcnt, pr_state(so->so_state, so->so_mode));
2116 		return (0);
2117 	}
2118 	return (1);
2119 }
2120 #undef	EQUIV
2121 
2122 #endif /* DEBUG */
2123 
2124 /* initialize sockfs zone specific kstat related items			*/
2125 void *
2126 sock_kstat_init(zoneid_t zoneid)
2127 {
2128 	kstat_t	*ksp;
2129 
2130 	ksp = kstat_create_zone("sockfs", 0, "sock_unix_list", "misc",
2131 	    KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VAR_SIZE|KSTAT_FLAG_VIRTUAL, zoneid);
2132 
2133 	if (ksp != NULL) {
2134 		ksp->ks_update = sockfs_update;
2135 		ksp->ks_snapshot = sockfs_snapshot;
2136 		ksp->ks_lock = &socklist.sl_lock;
2137 		ksp->ks_private = (void *)(uintptr_t)zoneid;
2138 		kstat_install(ksp);
2139 	}
2140 
2141 	return (ksp);
2142 }
2143 
2144 /* tear down sockfs zone specific kstat related items			*/
2145 /*ARGSUSED*/
2146 void
2147 sock_kstat_fini(zoneid_t zoneid, void *arg)
2148 {
2149 	kstat_t *ksp = (kstat_t *)arg;
2150 
2151 	if (ksp != NULL) {
2152 		ASSERT(zoneid == (zoneid_t)(uintptr_t)ksp->ks_private);
2153 		kstat_delete(ksp);
2154 	}
2155 }
2156 
2157 /*
2158  * Zones:
2159  * Note that nactive is going to be different for each zone.
2160  * This means we require kstat to call sockfs_update and then sockfs_snapshot
2161  * for the same zone, or sockfs_snapshot will be taken into the wrong size
2162  * buffer. This is safe, but if the buffer is too small, user will not be
2163  * given details of all sockets. However, as this kstat has a ks_lock, kstat
2164  * driver will keep it locked between the update and the snapshot, so no
2165  * other process (zone) can currently get inbetween resulting in a wrong size
2166  * buffer allocation.
2167  */
2168 static int
2169 sockfs_update(kstat_t *ksp, int rw)
2170 {
2171 	uint_t	nactive = 0;		/* # of active AF_UNIX sockets	*/
2172 	struct sonode	*so;		/* current sonode on socklist	*/
2173 	zoneid_t	myzoneid = (zoneid_t)(uintptr_t)ksp->ks_private;
2174 
2175 	ASSERT((zoneid_t)(uintptr_t)ksp->ks_private == getzoneid());
2176 
2177 	if (rw == KSTAT_WRITE) {	/* bounce all writes		*/
2178 		return (EACCES);
2179 	}
2180 
2181 	for (so = socklist.sl_list; so != NULL; so = so->so_next) {
2182 		if (so->so_accessvp != NULL && so->so_zoneid == myzoneid) {
2183 			nactive++;
2184 		}
2185 	}
2186 	ksp->ks_ndata = nactive;
2187 	ksp->ks_data_size = nactive * sizeof (struct k_sockinfo);
2188 
2189 	return (0);
2190 }
2191 
2192 static int
2193 sockfs_snapshot(kstat_t *ksp, void *buf, int rw)
2194 {
2195 	int			ns;	/* # of sonodes we've copied	*/
2196 	struct sonode		*so;	/* current sonode on socklist	*/
2197 	struct k_sockinfo	*pksi;	/* where we put sockinfo data	*/
2198 	t_uscalar_t		sn_len;	/* soa_len			*/
2199 	zoneid_t		myzoneid = (zoneid_t)(uintptr_t)ksp->ks_private;
2200 
2201 	ASSERT((zoneid_t)(uintptr_t)ksp->ks_private == getzoneid());
2202 
2203 	ksp->ks_snaptime = gethrtime();
2204 
2205 	if (rw == KSTAT_WRITE) {	/* bounce all writes		*/
2206 		return (EACCES);
2207 	}
2208 
2209 	/*
2210 	 * for each sonode on the socklist, we massage the important
2211 	 * info into buf, in k_sockinfo format.
2212 	 */
2213 	pksi = (struct k_sockinfo *)buf;
2214 	for (ns = 0, so = socklist.sl_list; so != NULL; so = so->so_next) {
2215 		/* only stuff active sonodes and the same zone:		*/
2216 		if (so->so_accessvp == NULL || so->so_zoneid != myzoneid) {
2217 			continue;
2218 		}
2219 
2220 		/*
2221 		 * If the sonode was activated between the update and the
2222 		 * snapshot, we're done - as this is only a snapshot.
2223 		 */
2224 		if ((caddr_t)(pksi) >= (caddr_t)buf + ksp->ks_data_size) {
2225 			break;
2226 		}
2227 
2228 		/* copy important info into buf:			*/
2229 		pksi->ks_si.si_size = sizeof (struct k_sockinfo);
2230 		pksi->ks_si.si_family = so->so_family;
2231 		pksi->ks_si.si_type = so->so_type;
2232 		pksi->ks_si.si_flag = so->so_flag;
2233 		pksi->ks_si.si_state = so->so_state;
2234 		pksi->ks_si.si_serv_type = so->so_serv_type;
2235 		pksi->ks_si.si_ux_laddr_sou_magic = so->so_ux_laddr.soua_magic;
2236 		pksi->ks_si.si_ux_faddr_sou_magic = so->so_ux_faddr.soua_magic;
2237 		pksi->ks_si.si_laddr_soa_len = so->so_laddr.soa_len;
2238 		pksi->ks_si.si_faddr_soa_len = so->so_faddr.soa_len;
2239 		pksi->ks_si.si_szoneid = so->so_zoneid;
2240 
2241 		mutex_enter(&so->so_lock);
2242 
2243 		if (so->so_laddr_sa != NULL) {
2244 			ASSERT(so->so_laddr_sa->sa_data != NULL);
2245 			sn_len = so->so_laddr_len;
2246 			ASSERT(sn_len <= sizeof (short) +
2247 			    sizeof (pksi->ks_si.si_laddr_sun_path));
2248 
2249 			pksi->ks_si.si_laddr_family =
2250 				so->so_laddr_sa->sa_family;
2251 			if (sn_len != 0) {
2252 				/* AF_UNIX socket names are NULL terminated */
2253 				(void) strncpy(pksi->ks_si.si_laddr_sun_path,
2254 				    so->so_laddr_sa->sa_data,
2255 				    sizeof (pksi->ks_si.si_laddr_sun_path));
2256 				sn_len = strlen(pksi->ks_si.si_laddr_sun_path);
2257 			}
2258 			pksi->ks_si.si_laddr_sun_path[sn_len] = 0;
2259 		}
2260 
2261 		if (so->so_faddr_sa != NULL) {
2262 			ASSERT(so->so_faddr_sa->sa_data != NULL);
2263 			sn_len = so->so_faddr_len;
2264 			ASSERT(sn_len <= sizeof (short) +
2265 			    sizeof (pksi->ks_si.si_faddr_sun_path));
2266 
2267 			pksi->ks_si.si_faddr_family =
2268 			    so->so_faddr_sa->sa_family;
2269 			if (sn_len != 0) {
2270 				(void) strncpy(pksi->ks_si.si_faddr_sun_path,
2271 				    so->so_faddr_sa->sa_data,
2272 				    sizeof (pksi->ks_si.si_faddr_sun_path));
2273 				sn_len = strlen(pksi->ks_si.si_faddr_sun_path);
2274 			}
2275 			pksi->ks_si.si_faddr_sun_path[sn_len] = 0;
2276 		}
2277 
2278 		mutex_exit(&so->so_lock);
2279 
2280 		(void) sprintf(pksi->ks_straddr[0], "%p", (void *)so);
2281 		(void) sprintf(pksi->ks_straddr[1], "%p",
2282 		    (void *)so->so_ux_laddr.soua_vp);
2283 		(void) sprintf(pksi->ks_straddr[2], "%p",
2284 		    (void *)so->so_ux_faddr.soua_vp);
2285 
2286 		ns++;
2287 		pksi++;
2288 	}
2289 
2290 	ksp->ks_ndata = ns;
2291 	return (0);
2292 }
2293 
2294 ssize_t
2295 soreadfile(file_t *fp, uchar_t *buf, u_offset_t fileoff, int *err, size_t size)
2296 {
2297 	struct uio auio;
2298 	struct iovec aiov[MSG_MAXIOVLEN];
2299 	register vnode_t *vp;
2300 	int ioflag, rwflag;
2301 	ssize_t cnt;
2302 	int error = 0;
2303 	int iovcnt = 0;
2304 	short fflag;
2305 
2306 	vp = fp->f_vnode;
2307 	fflag = fp->f_flag;
2308 
2309 	rwflag = 0;
2310 	aiov[0].iov_base = (caddr_t)buf;
2311 	aiov[0].iov_len = size;
2312 	iovcnt = 1;
2313 	cnt = (ssize_t)size;
2314 	(void) VOP_RWLOCK(vp, rwflag, NULL);
2315 
2316 	auio.uio_loffset = fileoff;
2317 	auio.uio_iov = aiov;
2318 	auio.uio_iovcnt = iovcnt;
2319 	auio.uio_resid = cnt;
2320 	auio.uio_segflg = UIO_SYSSPACE;
2321 	auio.uio_llimit = MAXOFFSET_T;
2322 	auio.uio_fmode = fflag;
2323 	auio.uio_extflg = UIO_COPY_CACHED;
2324 
2325 	ioflag = auio.uio_fmode & (FAPPEND|FSYNC|FDSYNC|FRSYNC);
2326 
2327 	/* If read sync is not asked for, filter sync flags */
2328 	if ((ioflag & FRSYNC) == 0)
2329 		ioflag &= ~(FSYNC|FDSYNC);
2330 	error = VOP_READ(vp, &auio, ioflag, fp->f_cred, NULL);
2331 	cnt -= auio.uio_resid;
2332 
2333 	VOP_RWUNLOCK(vp, rwflag, NULL);
2334 
2335 	if (error == EINTR && cnt != 0)
2336 		error = 0;
2337 out:
2338 	if (error != 0) {
2339 		*err = error;
2340 		return (0);
2341 	} else {
2342 		*err = 0;
2343 		return (cnt);
2344 	}
2345 }
2346