xref: /titanic_50/usr/src/uts/common/fs/dev/sdev_subr.c (revision bf604c6405d5cbc4e94e3d0ecc9e6e074ed4ea67)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * utility routines for the /dev fs
28  */
29 
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/t_lock.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 #include <sys/user.h>
36 #include <sys/time.h>
37 #include <sys/vfs.h>
38 #include <sys/vnode.h>
39 #include <sys/file.h>
40 #include <sys/fcntl.h>
41 #include <sys/flock.h>
42 #include <sys/kmem.h>
43 #include <sys/uio.h>
44 #include <sys/errno.h>
45 #include <sys/stat.h>
46 #include <sys/cred.h>
47 #include <sys/dirent.h>
48 #include <sys/pathname.h>
49 #include <sys/cmn_err.h>
50 #include <sys/debug.h>
51 #include <sys/mode.h>
52 #include <sys/policy.h>
53 #include <fs/fs_subr.h>
54 #include <sys/mount.h>
55 #include <sys/fs/snode.h>
56 #include <sys/fs/dv_node.h>
57 #include <sys/fs/sdev_impl.h>
58 #include <sys/fs/sdev_node.h>
59 #include <sys/sunndi.h>
60 #include <sys/sunmdi.h>
61 #include <sys/conf.h>
62 #include <sys/proc.h>
63 #include <sys/user.h>
64 #include <sys/modctl.h>
65 
66 #ifdef DEBUG
67 int sdev_debug = 0x00000001;
68 int sdev_debug_cache_flags = 0;
69 #endif
70 
71 /*
72  * globals
73  */
74 /* prototype memory vattrs */
75 vattr_t sdev_vattr_dir = {
76 	AT_TYPE|AT_MODE|AT_UID|AT_GID,		/* va_mask */
77 	VDIR,					/* va_type */
78 	SDEV_DIRMODE_DEFAULT,			/* va_mode */
79 	SDEV_UID_DEFAULT,			/* va_uid */
80 	SDEV_GID_DEFAULT,			/* va_gid */
81 	0,					/* va_fsid */
82 	0,					/* va_nodeid */
83 	0,					/* va_nlink */
84 	0,					/* va_size */
85 	0,					/* va_atime */
86 	0,					/* va_mtime */
87 	0,					/* va_ctime */
88 	0,					/* va_rdev */
89 	0,					/* va_blksize */
90 	0,					/* va_nblocks */
91 	0					/* va_vcode */
92 };
93 
94 vattr_t sdev_vattr_lnk = {
95 	AT_TYPE|AT_MODE,			/* va_mask */
96 	VLNK,					/* va_type */
97 	SDEV_LNKMODE_DEFAULT,			/* va_mode */
98 	SDEV_UID_DEFAULT,			/* va_uid */
99 	SDEV_GID_DEFAULT,			/* va_gid */
100 	0,					/* va_fsid */
101 	0,					/* va_nodeid */
102 	0,					/* va_nlink */
103 	0,					/* va_size */
104 	0,					/* va_atime */
105 	0,					/* va_mtime */
106 	0,					/* va_ctime */
107 	0,					/* va_rdev */
108 	0,					/* va_blksize */
109 	0,					/* va_nblocks */
110 	0					/* va_vcode */
111 };
112 
113 vattr_t sdev_vattr_blk = {
114 	AT_TYPE|AT_MODE|AT_UID|AT_GID,		/* va_mask */
115 	VBLK,					/* va_type */
116 	S_IFBLK | SDEV_DEVMODE_DEFAULT,		/* va_mode */
117 	SDEV_UID_DEFAULT,			/* va_uid */
118 	SDEV_GID_DEFAULT,			/* va_gid */
119 	0,					/* va_fsid */
120 	0,					/* va_nodeid */
121 	0,					/* va_nlink */
122 	0,					/* va_size */
123 	0,					/* va_atime */
124 	0,					/* va_mtime */
125 	0,					/* va_ctime */
126 	0,					/* va_rdev */
127 	0,					/* va_blksize */
128 	0,					/* va_nblocks */
129 	0					/* va_vcode */
130 };
131 
132 vattr_t sdev_vattr_chr = {
133 	AT_TYPE|AT_MODE|AT_UID|AT_GID,		/* va_mask */
134 	VCHR,					/* va_type */
135 	S_IFCHR | SDEV_DEVMODE_DEFAULT,		/* va_mode */
136 	SDEV_UID_DEFAULT,			/* va_uid */
137 	SDEV_GID_DEFAULT,			/* va_gid */
138 	0,					/* va_fsid */
139 	0,					/* va_nodeid */
140 	0,					/* va_nlink */
141 	0,					/* va_size */
142 	0,					/* va_atime */
143 	0,					/* va_mtime */
144 	0,					/* va_ctime */
145 	0,					/* va_rdev */
146 	0,					/* va_blksize */
147 	0,					/* va_nblocks */
148 	0					/* va_vcode */
149 };
150 
151 kmem_cache_t	*sdev_node_cache;	/* sdev_node cache */
152 int		devtype;		/* fstype */
153 
154 struct devname_ops *devname_ns_ops;	/* default name service directory ops */
155 kmutex_t devname_nsmaps_lock;	/* protect devname_nsmaps */
156 
157 /* static */
158 static struct devname_nsmap *devname_nsmaps = NULL;
159 				/* contents from /etc/dev/devname_master */
160 static int devname_nsmaps_invalidated = 0; /* "devfsadm -m" has run */
161 
162 static struct vnodeops *sdev_get_vop(struct sdev_node *);
163 static void sdev_set_no_nocache(struct sdev_node *);
164 static int sdev_get_moduleops(struct sdev_node *);
165 static fs_operation_def_t *sdev_merge_vtab(const fs_operation_def_t []);
166 static void sdev_free_vtab(fs_operation_def_t *);
167 
168 static void
169 sdev_prof_free(struct sdev_node *dv)
170 {
171 	ASSERT(!SDEV_IS_GLOBAL(dv));
172 	if (dv->sdev_prof.dev_name)
173 		nvlist_free(dv->sdev_prof.dev_name);
174 	if (dv->sdev_prof.dev_map)
175 		nvlist_free(dv->sdev_prof.dev_map);
176 	if (dv->sdev_prof.dev_symlink)
177 		nvlist_free(dv->sdev_prof.dev_symlink);
178 	if (dv->sdev_prof.dev_glob_incdir)
179 		nvlist_free(dv->sdev_prof.dev_glob_incdir);
180 	if (dv->sdev_prof.dev_glob_excdir)
181 		nvlist_free(dv->sdev_prof.dev_glob_excdir);
182 	bzero(&dv->sdev_prof, sizeof (dv->sdev_prof));
183 }
184 
185 /* sdev_node cache constructor */
186 /*ARGSUSED1*/
187 static int
188 i_sdev_node_ctor(void *buf, void *cfarg, int flag)
189 {
190 	struct sdev_node *dv = (struct sdev_node *)buf;
191 	struct vnode *vp;
192 
193 	bzero(buf, sizeof (struct sdev_node));
194 	vp = dv->sdev_vnode = vn_alloc(flag);
195 	if (vp == NULL) {
196 		return (-1);
197 	}
198 	vp->v_data = dv;
199 	rw_init(&dv->sdev_contents, NULL, RW_DEFAULT, NULL);
200 	return (0);
201 }
202 
203 /* sdev_node cache destructor */
204 /*ARGSUSED1*/
205 static void
206 i_sdev_node_dtor(void *buf, void *arg)
207 {
208 	struct sdev_node *dv = (struct sdev_node *)buf;
209 	struct vnode *vp = SDEVTOV(dv);
210 
211 	rw_destroy(&dv->sdev_contents);
212 	vn_free(vp);
213 }
214 
215 /* initialize sdev_node cache */
216 void
217 sdev_node_cache_init()
218 {
219 	int flags = 0;
220 
221 #ifdef	DEBUG
222 	flags = sdev_debug_cache_flags;
223 	if (flags)
224 		sdcmn_err(("cache debug flags 0x%x\n", flags));
225 #endif	/* DEBUG */
226 
227 	ASSERT(sdev_node_cache == NULL);
228 	sdev_node_cache = kmem_cache_create("sdev_node_cache",
229 	    sizeof (struct sdev_node), 0, i_sdev_node_ctor, i_sdev_node_dtor,
230 	    NULL, NULL, NULL, flags);
231 }
232 
233 /* destroy sdev_node cache */
234 void
235 sdev_node_cache_fini()
236 {
237 	ASSERT(sdev_node_cache != NULL);
238 	kmem_cache_destroy(sdev_node_cache);
239 	sdev_node_cache = NULL;
240 }
241 
242 /*
243  * Compare two nodes lexographically to balance avl tree
244  */
245 static int
246 sdev_compare_nodes(const struct sdev_node *dv1, const struct sdev_node *dv2)
247 {
248 	int rv;
249 	if ((rv = strcmp(dv1->sdev_name, dv2->sdev_name)) == 0)
250 		return (0);
251 	return ((rv < 0) ? -1 : 1);
252 }
253 
254 void
255 sdev_set_nodestate(struct sdev_node *dv, sdev_node_state_t state)
256 {
257 	ASSERT(dv);
258 	ASSERT(RW_WRITE_HELD(&dv->sdev_contents));
259 	dv->sdev_state = state;
260 }
261 
262 static void
263 sdev_attrinit(struct sdev_node *dv, vattr_t *vap)
264 {
265 	timestruc_t now;
266 
267 	ASSERT(vap);
268 
269 	dv->sdev_attr = kmem_zalloc(sizeof (struct vattr), KM_SLEEP);
270 	*dv->sdev_attr = *vap;
271 
272 	dv->sdev_attr->va_mode = MAKEIMODE(vap->va_type, vap->va_mode);
273 
274 	gethrestime(&now);
275 	dv->sdev_attr->va_atime = now;
276 	dv->sdev_attr->va_mtime = now;
277 	dv->sdev_attr->va_ctime = now;
278 }
279 
280 /* alloc and initialize a sdev_node */
281 int
282 sdev_nodeinit(struct sdev_node *ddv, char *nm, struct sdev_node **newdv,
283     vattr_t *vap)
284 {
285 	struct sdev_node *dv = NULL;
286 	struct vnode *vp;
287 	size_t nmlen, len;
288 	devname_handle_t  *dhl;
289 
290 	nmlen = strlen(nm) + 1;
291 	if (nmlen > MAXNAMELEN) {
292 		sdcmn_err9(("sdev_nodeinit: node name %s"
293 		    " too long\n", nm));
294 		*newdv = NULL;
295 		return (ENAMETOOLONG);
296 	}
297 
298 	dv = kmem_cache_alloc(sdev_node_cache, KM_SLEEP);
299 
300 	dv->sdev_name = kmem_alloc(nmlen, KM_SLEEP);
301 	bcopy(nm, dv->sdev_name, nmlen);
302 	dv->sdev_namelen = nmlen - 1;	/* '\0' not included */
303 	len = strlen(ddv->sdev_path) + strlen(nm) + 2;
304 	dv->sdev_path = kmem_alloc(len, KM_SLEEP);
305 	(void) snprintf(dv->sdev_path, len, "%s/%s", ddv->sdev_path, nm);
306 	/* overwritten for VLNK nodes */
307 	dv->sdev_symlink = NULL;
308 
309 	vp = SDEVTOV(dv);
310 	vn_reinit(vp);
311 	vp->v_vfsp = SDEVTOV(ddv)->v_vfsp;
312 	if (vap)
313 		vp->v_type = vap->va_type;
314 
315 	/*
316 	 * initialized to the parent's vnodeops.
317 	 * maybe overwriten for a VDIR
318 	 */
319 	vn_setops(vp, vn_getops(SDEVTOV(ddv)));
320 	vn_exists(vp);
321 
322 	dv->sdev_dotdot = NULL;
323 	dv->sdev_attrvp = NULL;
324 	if (vap) {
325 		sdev_attrinit(dv, vap);
326 	} else {
327 		dv->sdev_attr = NULL;
328 	}
329 
330 	dv->sdev_ino = sdev_mkino(dv);
331 	dv->sdev_nlink = 0;		/* updated on insert */
332 	dv->sdev_flags = ddv->sdev_flags; /* inherit from the parent first */
333 	dv->sdev_flags |= SDEV_BUILD;
334 	mutex_init(&dv->sdev_lookup_lock, NULL, MUTEX_DEFAULT, NULL);
335 	cv_init(&dv->sdev_lookup_cv, NULL, CV_DEFAULT, NULL);
336 	if (SDEV_IS_GLOBAL(ddv)) {
337 		dv->sdev_flags |= SDEV_GLOBAL;
338 		dv->sdev_mapinfo = NULL;
339 		dhl = &(dv->sdev_handle);
340 		dhl->dh_data = dv;
341 		dhl->dh_spec = DEVNAME_NS_NONE;
342 		dhl->dh_args = NULL;
343 		sdev_set_no_nocache(dv);
344 		dv->sdev_gdir_gen = 0;
345 	} else {
346 		dv->sdev_flags &= ~SDEV_GLOBAL;
347 		dv->sdev_origin = NULL; /* set later */
348 		bzero(&dv->sdev_prof, sizeof (dv->sdev_prof));
349 		dv->sdev_ldir_gen = 0;
350 		dv->sdev_devtree_gen = 0;
351 	}
352 
353 	rw_enter(&dv->sdev_contents, RW_WRITER);
354 	sdev_set_nodestate(dv, SDEV_INIT);
355 	rw_exit(&dv->sdev_contents);
356 	*newdv = dv;
357 
358 	return (0);
359 }
360 
361 /*
362  * transition a sdev_node into SDEV_READY state
363  */
364 int
365 sdev_nodeready(struct sdev_node *dv, struct vattr *vap, struct vnode *avp,
366     void *args, struct cred *cred)
367 {
368 	int error = 0;
369 	struct vnode *vp = SDEVTOV(dv);
370 	vtype_t type;
371 
372 	ASSERT(dv && (dv->sdev_state != SDEV_READY) && vap);
373 
374 	type = vap->va_type;
375 	vp->v_type = type;
376 	vp->v_rdev = vap->va_rdev;
377 	rw_enter(&dv->sdev_contents, RW_WRITER);
378 	if (type == VDIR) {
379 		dv->sdev_nlink = 2;
380 		dv->sdev_flags &= ~SDEV_PERSIST;
381 		dv->sdev_flags &= ~SDEV_DYNAMIC;
382 		vn_setops(vp, sdev_get_vop(dv)); /* from internal vtab */
383 		error = sdev_get_moduleops(dv); /* from plug-in module */
384 		ASSERT(dv->sdev_dotdot);
385 		ASSERT(SDEVTOV(dv->sdev_dotdot)->v_type == VDIR);
386 		vp->v_rdev = SDEVTOV(dv->sdev_dotdot)->v_rdev;
387 		avl_create(&dv->sdev_entries,
388 		    (int (*)(const void *, const void *))sdev_compare_nodes,
389 		    sizeof (struct sdev_node),
390 		    offsetof(struct sdev_node, sdev_avllink));
391 	} else if (type == VLNK) {
392 		ASSERT(args);
393 		dv->sdev_nlink = 1;
394 		dv->sdev_symlink = i_ddi_strdup((char *)args, KM_SLEEP);
395 	} else {
396 		dv->sdev_nlink = 1;
397 	}
398 
399 	if (!(SDEV_IS_GLOBAL(dv))) {
400 		dv->sdev_origin = (struct sdev_node *)args;
401 		dv->sdev_flags &= ~SDEV_PERSIST;
402 	}
403 
404 	/*
405 	 * shadow node is created here OR
406 	 * if failed (indicated by dv->sdev_attrvp == NULL),
407 	 * created later in sdev_setattr
408 	 */
409 	if (avp) {
410 		dv->sdev_attrvp = avp;
411 	} else {
412 		if (dv->sdev_attr == NULL)
413 			sdev_attrinit(dv, vap);
414 		else
415 			*dv->sdev_attr = *vap;
416 
417 		if ((SDEV_IS_PERSIST(dv) && (dv->sdev_attrvp == NULL)) ||
418 		    ((SDEVTOV(dv)->v_type == VDIR) &&
419 		    (dv->sdev_attrvp == NULL))) {
420 			error = sdev_shadow_node(dv, cred);
421 		}
422 	}
423 
424 	if (error == 0) {
425 		/* transition to READY state */
426 		sdev_set_nodestate(dv, SDEV_READY);
427 		sdev_nc_node_exists(dv);
428 	} else {
429 		sdev_set_nodestate(dv, SDEV_ZOMBIE);
430 	}
431 	rw_exit(&dv->sdev_contents);
432 	return (error);
433 }
434 
435 /*
436  * setting ZOMBIE state
437  */
438 static int
439 sdev_nodezombied(struct sdev_node *dv)
440 {
441 	rw_enter(&dv->sdev_contents, RW_WRITER);
442 	sdev_set_nodestate(dv, SDEV_ZOMBIE);
443 	rw_exit(&dv->sdev_contents);
444 	return (0);
445 }
446 
447 /*
448  * Build the VROOT sdev_node.
449  */
450 /*ARGSUSED*/
451 struct sdev_node *
452 sdev_mkroot(struct vfs *vfsp, dev_t devdev, struct vnode *mvp,
453     struct vnode *avp, struct cred *cred)
454 {
455 	struct sdev_node *dv;
456 	struct vnode *vp;
457 	char devdir[] = "/dev";
458 
459 	ASSERT(sdev_node_cache != NULL);
460 	ASSERT(avp);
461 	dv = kmem_cache_alloc(sdev_node_cache, KM_SLEEP);
462 	vp = SDEVTOV(dv);
463 	vn_reinit(vp);
464 	vp->v_flag |= VROOT;
465 	vp->v_vfsp = vfsp;
466 	vp->v_type = VDIR;
467 	vp->v_rdev = devdev;
468 	vn_setops(vp, sdev_vnodeops); /* apply the default vnodeops at /dev */
469 	vn_exists(vp);
470 
471 	if (vfsp->vfs_mntpt)
472 		dv->sdev_name = i_ddi_strdup(
473 		    (char *)refstr_value(vfsp->vfs_mntpt), KM_SLEEP);
474 	else
475 		/* vfs_mountdev1 set mount point later */
476 		dv->sdev_name = i_ddi_strdup("/dev", KM_SLEEP);
477 	dv->sdev_namelen = strlen(dv->sdev_name); /* '\0' not included */
478 	dv->sdev_path = i_ddi_strdup(devdir, KM_SLEEP);
479 	dv->sdev_ino = SDEV_ROOTINO;
480 	dv->sdev_nlink = 2;		/* name + . (no sdev_insert) */
481 	dv->sdev_dotdot = dv;		/* .. == self */
482 	dv->sdev_attrvp = avp;
483 	dv->sdev_attr = NULL;
484 	mutex_init(&dv->sdev_lookup_lock, NULL, MUTEX_DEFAULT, NULL);
485 	cv_init(&dv->sdev_lookup_cv, NULL, CV_DEFAULT, NULL);
486 	if (strcmp(dv->sdev_name, "/dev") == 0) {
487 		mutex_init(&devname_nsmaps_lock, NULL, MUTEX_DEFAULT, NULL);
488 		dv->sdev_mapinfo = NULL;
489 		dv->sdev_flags = SDEV_BUILD|SDEV_GLOBAL|SDEV_PERSIST;
490 		bzero(&dv->sdev_handle, sizeof (dv->sdev_handle));
491 		dv->sdev_gdir_gen = 0;
492 	} else {
493 		dv->sdev_flags = SDEV_BUILD;
494 		dv->sdev_flags &= ~SDEV_PERSIST;
495 		bzero(&dv->sdev_prof, sizeof (dv->sdev_prof));
496 		dv->sdev_ldir_gen = 0;
497 		dv->sdev_devtree_gen = 0;
498 	}
499 
500 	avl_create(&dv->sdev_entries,
501 	    (int (*)(const void *, const void *))sdev_compare_nodes,
502 	    sizeof (struct sdev_node),
503 	    offsetof(struct sdev_node, sdev_avllink));
504 
505 	rw_enter(&dv->sdev_contents, RW_WRITER);
506 	sdev_set_nodestate(dv, SDEV_READY);
507 	rw_exit(&dv->sdev_contents);
508 	sdev_nc_node_exists(dv);
509 	return (dv);
510 }
511 
512 /*
513  *  1. load the module
514  *  2. modload invokes sdev_module_register, which in turn sets
515  *     the dv->sdev_mapinfo->dir_ops
516  *
517  * note: locking order:
518  *	dv->sdev_contents -> map->dir_lock
519  */
520 static int
521 sdev_get_moduleops(struct sdev_node *dv)
522 {
523 	int error = 0;
524 	struct devname_nsmap *map = NULL;
525 	char *module;
526 	char *path;
527 	int load = 1;
528 
529 	ASSERT(SDEVTOV(dv)->v_type == VDIR);
530 
531 	if (devname_nsmaps == NULL)
532 		return (0);
533 
534 	if (!sdev_nsmaps_loaded() && !sdev_nsmaps_reloaded())
535 		return (0);
536 
537 
538 	path = dv->sdev_path;
539 	if ((map = sdev_get_nsmap_by_dir(path, 0))) {
540 		rw_enter(&map->dir_lock, RW_READER);
541 		if (map->dir_invalid) {
542 			if (map->dir_module && map->dir_newmodule &&
543 			    (strcmp(map->dir_module,
544 			    map->dir_newmodule) == 0)) {
545 				load = 0;
546 			}
547 			sdev_replace_nsmap(map, map->dir_newmodule,
548 			    map->dir_newmap);
549 		}
550 
551 		module = map->dir_module;
552 		if (module && load) {
553 			sdcmn_err6(("sdev_get_moduleops: "
554 			    "load module %s", module));
555 			rw_exit(&map->dir_lock);
556 			error = modload("devname", module);
557 			sdcmn_err6(("sdev_get_moduleops: error %d\n", error));
558 			if (error < 0) {
559 				return (-1);
560 			}
561 		} else if (module == NULL) {
562 			/*
563 			 * loading the module ops for name services
564 			 */
565 			if (devname_ns_ops == NULL) {
566 				sdcmn_err6((
567 				    "sdev_get_moduleops: modload default\n"));
568 				error = modload("devname", DEVNAME_NSCONFIG);
569 				sdcmn_err6((
570 				    "sdev_get_moduleops: error %d\n", error));
571 				if (error < 0) {
572 					return (-1);
573 				}
574 			}
575 
576 			if (!rw_tryupgrade(&map->dir_lock)) {
577 				rw_exit(&map->dir_lock);
578 				rw_enter(&map->dir_lock, RW_WRITER);
579 			}
580 			ASSERT(devname_ns_ops);
581 			map->dir_ops = devname_ns_ops;
582 			rw_exit(&map->dir_lock);
583 		}
584 	}
585 
586 	dv->sdev_mapinfo = map;
587 	return (0);
588 }
589 
590 /* directory dependent vop table */
591 struct sdev_vop_table {
592 	char *vt_name;				/* subdirectory name */
593 	const fs_operation_def_t *vt_service;	/* vnodeops table */
594 	struct vnodeops *vt_vops;		/* constructed vop */
595 	struct vnodeops **vt_global_vops;	/* global container for vop */
596 	int (*vt_vtor)(struct sdev_node *);	/* validate sdev_node */
597 	int vt_flags;
598 };
599 
600 /*
601  * A nice improvement would be to provide a plug-in mechanism
602  * for this table instead of a const table.
603  */
604 static struct sdev_vop_table vtab[] =
605 {
606 	{ "pts", devpts_vnodeops_tbl, NULL, &devpts_vnodeops, devpts_validate,
607 	SDEV_DYNAMIC | SDEV_VTOR },
608 
609 	{ "vt", devvt_vnodeops_tbl, NULL, &devvt_vnodeops, devvt_validate,
610 	SDEV_DYNAMIC | SDEV_VTOR },
611 
612 	{ "zcons", NULL, NULL, NULL, NULL, SDEV_NO_NCACHE },
613 
614 	{ "net", devnet_vnodeops_tbl, NULL, &devnet_vnodeops, devnet_validate,
615 	SDEV_DYNAMIC | SDEV_VTOR },
616 
617 	{ "ipnet", devipnet_vnodeops_tbl, NULL, &devipnet_vnodeops,
618 	devipnet_validate, SDEV_DYNAMIC | SDEV_VTOR | SDEV_NO_NCACHE },
619 
620 	{ NULL, NULL, NULL, NULL, NULL, 0}
621 };
622 
623 
624 /*
625  *  sets a directory's vnodeops if the directory is in the vtab;
626  */
627 static struct vnodeops *
628 sdev_get_vop(struct sdev_node *dv)
629 {
630 	int i;
631 	char *path;
632 
633 	path = dv->sdev_path;
634 	ASSERT(path);
635 
636 	/* gets the relative path to /dev/ */
637 	path += 5;
638 
639 	/* gets the vtab entry if matches */
640 	for (i = 0; vtab[i].vt_name; i++) {
641 		if (strcmp(vtab[i].vt_name, path) != 0)
642 			continue;
643 		dv->sdev_flags |= vtab[i].vt_flags;
644 
645 		if (vtab[i].vt_vops) {
646 			if (vtab[i].vt_global_vops)
647 				*(vtab[i].vt_global_vops) = vtab[i].vt_vops;
648 			return (vtab[i].vt_vops);
649 		}
650 
651 		if (vtab[i].vt_service) {
652 			fs_operation_def_t *templ;
653 			templ = sdev_merge_vtab(vtab[i].vt_service);
654 			if (vn_make_ops(vtab[i].vt_name,
655 			    (const fs_operation_def_t *)templ,
656 			    &vtab[i].vt_vops) != 0) {
657 				cmn_err(CE_PANIC, "%s: malformed vnode ops\n",
658 				    vtab[i].vt_name);
659 				/*NOTREACHED*/
660 			}
661 			if (vtab[i].vt_global_vops) {
662 				*(vtab[i].vt_global_vops) = vtab[i].vt_vops;
663 			}
664 			sdev_free_vtab(templ);
665 			return (vtab[i].vt_vops);
666 		}
667 		return (sdev_vnodeops);
668 	}
669 
670 	/* child inherits the persistence of the parent */
671 	if (SDEV_IS_PERSIST(dv->sdev_dotdot))
672 		dv->sdev_flags |= SDEV_PERSIST;
673 
674 	return (sdev_vnodeops);
675 }
676 
677 static void
678 sdev_set_no_nocache(struct sdev_node *dv)
679 {
680 	int i;
681 	char *path;
682 
683 	ASSERT(dv->sdev_path);
684 	path = dv->sdev_path + strlen("/dev/");
685 
686 	for (i = 0; vtab[i].vt_name; i++) {
687 		if (strcmp(vtab[i].vt_name, path) == 0) {
688 			if (vtab[i].vt_flags & SDEV_NO_NCACHE)
689 				dv->sdev_flags |= SDEV_NO_NCACHE;
690 			break;
691 		}
692 	}
693 }
694 
695 void *
696 sdev_get_vtor(struct sdev_node *dv)
697 {
698 	int i;
699 
700 	for (i = 0; vtab[i].vt_name; i++) {
701 		if (strcmp(vtab[i].vt_name, dv->sdev_name) != 0)
702 			continue;
703 		return ((void *)vtab[i].vt_vtor);
704 	}
705 	return (NULL);
706 }
707 
708 /*
709  * Build the base root inode
710  */
711 ino_t
712 sdev_mkino(struct sdev_node *dv)
713 {
714 	ino_t	ino;
715 
716 	/*
717 	 * for now, follow the lead of tmpfs here
718 	 * need to someday understand the requirements here
719 	 */
720 	ino = (ino_t)(uint32_t)((uintptr_t)dv >> 3);
721 	ino += SDEV_ROOTINO + 1;
722 
723 	return (ino);
724 }
725 
726 static int
727 sdev_getlink(struct vnode *linkvp, char **link)
728 {
729 	int err;
730 	char *buf;
731 	struct uio uio = {0};
732 	struct iovec iov = {0};
733 
734 	if (linkvp == NULL)
735 		return (ENOENT);
736 	ASSERT(linkvp->v_type == VLNK);
737 
738 	buf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
739 	iov.iov_base = buf;
740 	iov.iov_len = MAXPATHLEN;
741 	uio.uio_iov = &iov;
742 	uio.uio_iovcnt = 1;
743 	uio.uio_resid = MAXPATHLEN;
744 	uio.uio_segflg = UIO_SYSSPACE;
745 	uio.uio_llimit = MAXOFFSET_T;
746 
747 	err = VOP_READLINK(linkvp, &uio, kcred, NULL);
748 	if (err) {
749 		cmn_err(CE_WARN, "readlink %s failed in dev\n", buf);
750 		kmem_free(buf, MAXPATHLEN);
751 		return (ENOENT);
752 	}
753 
754 	/* mission complete */
755 	*link = i_ddi_strdup(buf, KM_SLEEP);
756 	kmem_free(buf, MAXPATHLEN);
757 	return (0);
758 }
759 
760 /*
761  * A convenient wrapper to get the devfs node vnode for a device
762  * minor functionality: readlink() of a /dev symlink
763  * Place the link into dv->sdev_symlink
764  */
765 static int
766 sdev_follow_link(struct sdev_node *dv)
767 {
768 	int err;
769 	struct vnode *linkvp;
770 	char *link = NULL;
771 
772 	linkvp = SDEVTOV(dv);
773 	if (linkvp == NULL)
774 		return (ENOENT);
775 	ASSERT(linkvp->v_type == VLNK);
776 	err = sdev_getlink(linkvp, &link);
777 	if (err) {
778 		(void) sdev_nodezombied(dv);
779 		dv->sdev_symlink = NULL;
780 		return (ENOENT);
781 	}
782 
783 	ASSERT(link != NULL);
784 	dv->sdev_symlink = link;
785 	return (0);
786 }
787 
788 static int
789 sdev_node_check(struct sdev_node *dv, struct vattr *nvap, void *nargs)
790 {
791 	vtype_t otype = SDEVTOV(dv)->v_type;
792 
793 	/*
794 	 * existing sdev_node has a different type.
795 	 */
796 	if (otype != nvap->va_type) {
797 		sdcmn_err9(("sdev_node_check: existing node "
798 		    "  %s type %d does not match new node type %d\n",
799 		    dv->sdev_name, otype, nvap->va_type));
800 		return (EEXIST);
801 	}
802 
803 	/*
804 	 * For a symlink, the target should be the same.
805 	 */
806 	if (otype == VLNK) {
807 		ASSERT(nargs != NULL);
808 		ASSERT(dv->sdev_symlink != NULL);
809 		if (strcmp(dv->sdev_symlink, (char *)nargs) != 0) {
810 			sdcmn_err9(("sdev_node_check: existing node "
811 			    " %s has different symlink %s as new node "
812 			    " %s\n", dv->sdev_name, dv->sdev_symlink,
813 			    (char *)nargs));
814 			return (EEXIST);
815 		}
816 	}
817 
818 	return (0);
819 }
820 
821 /*
822  * sdev_mknode - a wrapper for sdev_nodeinit(), sdev_nodeready()
823  *
824  * arguments:
825  *	- ddv (parent)
826  *	- nm (child name)
827  *	- newdv (sdev_node for nm is returned here)
828  *	- vap (vattr for the node to be created, va_type should be set.
829  *	- avp (attribute vnode)
830  *	  the defaults should be used if unknown)
831  *	- cred
832  *	- args
833  *	    . tnm (for VLNK)
834  *	    . global sdev_node (for !SDEV_GLOBAL)
835  * 	- state: SDEV_INIT, SDEV_READY
836  *
837  * only ddv, nm, newddv, vap, cred are required for sdev_mknode(SDEV_INIT)
838  *
839  * NOTE:  directory contents writers lock needs to be held before
840  *	  calling this routine.
841  */
842 int
843 sdev_mknode(struct sdev_node *ddv, char *nm, struct sdev_node **newdv,
844     struct vattr *vap, struct vnode *avp, void *args, struct cred *cred,
845     sdev_node_state_t state)
846 {
847 	int error = 0;
848 	sdev_node_state_t node_state;
849 	struct sdev_node *dv = NULL;
850 
851 	ASSERT(state != SDEV_ZOMBIE);
852 	ASSERT(RW_WRITE_HELD(&ddv->sdev_contents));
853 
854 	if (*newdv) {
855 		dv = *newdv;
856 	} else {
857 		/* allocate and initialize a sdev_node */
858 		if (ddv->sdev_state == SDEV_ZOMBIE) {
859 			sdcmn_err9(("sdev_mknode: parent %s ZOMBIEd\n",
860 			    ddv->sdev_path));
861 			return (ENOENT);
862 		}
863 
864 		error = sdev_nodeinit(ddv, nm, &dv, vap);
865 		if (error != 0) {
866 			sdcmn_err9(("sdev_mknode: error %d,"
867 			    " name %s can not be initialized\n",
868 			    error, nm));
869 			return (error);
870 		}
871 		ASSERT(dv);
872 
873 		/* insert into the directory cache */
874 		error = sdev_cache_update(ddv, &dv, nm, SDEV_CACHE_ADD);
875 		if (error) {
876 			sdcmn_err9(("sdev_mknode: node %s can not"
877 			    " be added into directory cache\n", nm));
878 			return (ENOENT);
879 		}
880 	}
881 
882 	ASSERT(dv);
883 	node_state = dv->sdev_state;
884 	ASSERT(node_state != SDEV_ZOMBIE);
885 
886 	if (state == SDEV_READY) {
887 		switch (node_state) {
888 		case SDEV_INIT:
889 			error = sdev_nodeready(dv, vap, avp, args, cred);
890 			if (error) {
891 				sdcmn_err9(("sdev_mknode: node %s can NOT"
892 				    " be transitioned into READY state, "
893 				    "error %d\n", nm, error));
894 			}
895 			break;
896 		case SDEV_READY:
897 			/*
898 			 * Do some sanity checking to make sure
899 			 * the existing sdev_node is what has been
900 			 * asked for.
901 			 */
902 			error = sdev_node_check(dv, vap, args);
903 			break;
904 		default:
905 			break;
906 		}
907 	}
908 
909 	if (!error) {
910 		*newdv = dv;
911 		ASSERT((*newdv)->sdev_state != SDEV_ZOMBIE);
912 	} else {
913 		SDEV_SIMPLE_RELE(dv);
914 		*newdv = NULL;
915 	}
916 
917 	return (error);
918 }
919 
920 /*
921  * convenient wrapper to change vp's ATIME, CTIME and MTIME
922  */
923 void
924 sdev_update_timestamps(struct vnode *vp, cred_t *cred, uint_t mask)
925 {
926 	struct vattr attr;
927 	timestruc_t now;
928 	int err;
929 
930 	ASSERT(vp);
931 	gethrestime(&now);
932 	if (mask & AT_CTIME)
933 		attr.va_ctime = now;
934 	if (mask & AT_MTIME)
935 		attr.va_mtime = now;
936 	if (mask & AT_ATIME)
937 		attr.va_atime = now;
938 
939 	attr.va_mask = (mask & AT_TIMES);
940 	err = VOP_SETATTR(vp, &attr, 0, cred, NULL);
941 	if (err && (err != EROFS)) {
942 		sdcmn_err(("update timestamps error %d\n", err));
943 	}
944 }
945 
946 /*
947  * the backing store vnode is released here
948  */
949 /*ARGSUSED1*/
950 void
951 sdev_nodedestroy(struct sdev_node *dv, uint_t flags)
952 {
953 	/* no references */
954 	ASSERT(dv->sdev_nlink == 0);
955 
956 	if (dv->sdev_attrvp != NULLVP) {
957 		VN_RELE(dv->sdev_attrvp);
958 		/*
959 		 * reset the attrvp so that no more
960 		 * references can be made on this already
961 		 * vn_rele() vnode
962 		 */
963 		dv->sdev_attrvp = NULLVP;
964 	}
965 
966 	if (dv->sdev_attr != NULL) {
967 		kmem_free(dv->sdev_attr, sizeof (struct vattr));
968 		dv->sdev_attr = NULL;
969 	}
970 
971 	if (dv->sdev_name != NULL) {
972 		kmem_free(dv->sdev_name, dv->sdev_namelen + 1);
973 		dv->sdev_name = NULL;
974 	}
975 
976 	if (dv->sdev_symlink != NULL) {
977 		kmem_free(dv->sdev_symlink, strlen(dv->sdev_symlink) + 1);
978 		dv->sdev_symlink = NULL;
979 	}
980 
981 	if (dv->sdev_path) {
982 		kmem_free(dv->sdev_path, strlen(dv->sdev_path) + 1);
983 		dv->sdev_path = NULL;
984 	}
985 
986 	if (!SDEV_IS_GLOBAL(dv))
987 		sdev_prof_free(dv);
988 
989 	if (SDEVTOV(dv)->v_type == VDIR) {
990 		ASSERT(SDEV_FIRST_ENTRY(dv) == NULL);
991 		avl_destroy(&dv->sdev_entries);
992 	}
993 
994 	mutex_destroy(&dv->sdev_lookup_lock);
995 	cv_destroy(&dv->sdev_lookup_cv);
996 
997 	/* return node to initial state as per constructor */
998 	(void) memset((void *)&dv->sdev_instance_data, 0,
999 	    sizeof (dv->sdev_instance_data));
1000 	vn_invalid(SDEVTOV(dv));
1001 	kmem_cache_free(sdev_node_cache, dv);
1002 }
1003 
1004 /*
1005  * DIRECTORY CACHE lookup
1006  */
1007 struct sdev_node *
1008 sdev_findbyname(struct sdev_node *ddv, char *nm)
1009 {
1010 	struct sdev_node *dv;
1011 	struct sdev_node dvtmp;
1012 	avl_index_t	where;
1013 
1014 	ASSERT(RW_LOCK_HELD(&ddv->sdev_contents));
1015 
1016 	dvtmp.sdev_name = nm;
1017 	dv = avl_find(&ddv->sdev_entries, &dvtmp, &where);
1018 	if (dv) {
1019 		ASSERT(dv->sdev_dotdot == ddv);
1020 		ASSERT(strcmp(dv->sdev_name, nm) == 0);
1021 		SDEV_HOLD(dv);
1022 		return (dv);
1023 	}
1024 	return (NULL);
1025 }
1026 
1027 /*
1028  * Inserts a new sdev_node in a parent directory
1029  */
1030 void
1031 sdev_direnter(struct sdev_node *ddv, struct sdev_node *dv)
1032 {
1033 	avl_index_t where;
1034 
1035 	ASSERT(RW_WRITE_HELD(&ddv->sdev_contents));
1036 	ASSERT(SDEVTOV(ddv)->v_type == VDIR);
1037 	ASSERT(ddv->sdev_nlink >= 2);
1038 	ASSERT(dv->sdev_nlink == 0);
1039 
1040 	dv->sdev_dotdot = ddv;
1041 	VERIFY(avl_find(&ddv->sdev_entries, dv, &where) == NULL);
1042 	avl_insert(&ddv->sdev_entries, dv, where);
1043 	ddv->sdev_nlink++;
1044 }
1045 
1046 /*
1047  * The following check is needed because while sdev_nodes are linked
1048  * in SDEV_INIT state, they have their link counts incremented only
1049  * in SDEV_READY state.
1050  */
1051 static void
1052 decr_link(struct sdev_node *dv)
1053 {
1054 	if (dv->sdev_state != SDEV_INIT)
1055 		dv->sdev_nlink--;
1056 	else
1057 		ASSERT(dv->sdev_nlink == 0);
1058 }
1059 
1060 /*
1061  * Delete an existing dv from directory cache
1062  *
1063  * In the case of a node is still held by non-zero reference count,
1064  *     the node is put into ZOMBIE state. Once the reference count
1065  *     reaches "0", the node is unlinked and destroyed,
1066  *     in sdev_inactive().
1067  */
1068 static int
1069 sdev_dirdelete(struct sdev_node *ddv, struct sdev_node *dv)
1070 {
1071 	struct vnode *vp;
1072 
1073 	ASSERT(RW_WRITE_HELD(&ddv->sdev_contents));
1074 
1075 	vp = SDEVTOV(dv);
1076 	mutex_enter(&vp->v_lock);
1077 
1078 	/* dv is held still */
1079 	if (vp->v_count > 1) {
1080 		rw_enter(&dv->sdev_contents, RW_WRITER);
1081 		if (dv->sdev_state == SDEV_READY) {
1082 			sdcmn_err9((
1083 			    "sdev_delete: node %s busy with count %d\n",
1084 			    dv->sdev_name, vp->v_count));
1085 			dv->sdev_state = SDEV_ZOMBIE;
1086 		}
1087 		rw_exit(&dv->sdev_contents);
1088 		--vp->v_count;
1089 		mutex_exit(&vp->v_lock);
1090 		return (EBUSY);
1091 	}
1092 	ASSERT(vp->v_count == 1);
1093 
1094 	/* unlink from the memory cache */
1095 	ddv->sdev_nlink--;	/* .. to above */
1096 	if (vp->v_type == VDIR) {
1097 		decr_link(dv);		/* . to self */
1098 	}
1099 
1100 	avl_remove(&ddv->sdev_entries, dv);
1101 	decr_link(dv);	/* name, back to zero */
1102 	vp->v_count--;
1103 	mutex_exit(&vp->v_lock);
1104 
1105 	/* destroy the node */
1106 	sdev_nodedestroy(dv, 0);
1107 	return (0);
1108 }
1109 
1110 /*
1111  * check if the source is in the path of the target
1112  *
1113  * source and target are different
1114  */
1115 /*ARGSUSED2*/
1116 static int
1117 sdev_checkpath(struct sdev_node *sdv, struct sdev_node *tdv, struct cred *cred)
1118 {
1119 	int error = 0;
1120 	struct sdev_node *dotdot, *dir;
1121 
1122 	dotdot = tdv->sdev_dotdot;
1123 	ASSERT(dotdot);
1124 
1125 	/* fs root */
1126 	if (dotdot == tdv) {
1127 		return (0);
1128 	}
1129 
1130 	for (;;) {
1131 		/*
1132 		 * avoid error cases like
1133 		 *	mv a a/b
1134 		 *	mv a a/b/c
1135 		 *	etc.
1136 		 */
1137 		if (dotdot == sdv) {
1138 			error = EINVAL;
1139 			break;
1140 		}
1141 
1142 		dir = dotdot;
1143 		dotdot = dir->sdev_dotdot;
1144 
1145 		/* done checking because root is reached */
1146 		if (dir == dotdot) {
1147 			break;
1148 		}
1149 	}
1150 	return (error);
1151 }
1152 
1153 int
1154 sdev_rnmnode(struct sdev_node *oddv, struct sdev_node *odv,
1155     struct sdev_node *nddv, struct sdev_node **ndvp, char *nnm,
1156     struct cred *cred)
1157 {
1158 	int error = 0;
1159 	struct vnode *ovp = SDEVTOV(odv);
1160 	struct vnode *nvp;
1161 	struct vattr vattr;
1162 	int doingdir = (ovp->v_type == VDIR);
1163 	char *link = NULL;
1164 	int samedir = (oddv == nddv) ? 1 : 0;
1165 	int bkstore = 0;
1166 	struct sdev_node *idv = NULL;
1167 	struct sdev_node *ndv = NULL;
1168 	timestruc_t now;
1169 
1170 	vattr.va_mask = AT_MODE|AT_UID|AT_GID;
1171 	error = VOP_GETATTR(ovp, &vattr, 0, cred, NULL);
1172 	if (error)
1173 		return (error);
1174 
1175 	if (!samedir)
1176 		rw_enter(&oddv->sdev_contents, RW_WRITER);
1177 	rw_enter(&nddv->sdev_contents, RW_WRITER);
1178 
1179 	/*
1180 	 * the source may have been deleted by another thread before
1181 	 * we gets here.
1182 	 */
1183 	if (odv->sdev_state != SDEV_READY) {
1184 		error = ENOENT;
1185 		goto err_out;
1186 	}
1187 
1188 	if (doingdir && (odv == nddv)) {
1189 		error = EINVAL;
1190 		goto err_out;
1191 	}
1192 
1193 	/*
1194 	 * If renaming a directory, and the parents are different (".." must be
1195 	 * changed) then the source dir must not be in the dir hierarchy above
1196 	 * the target since it would orphan everything below the source dir.
1197 	 */
1198 	if (doingdir && (oddv != nddv)) {
1199 		error = sdev_checkpath(odv, nddv, cred);
1200 		if (error)
1201 			goto err_out;
1202 	}
1203 
1204 	/* destination existing */
1205 	if (*ndvp) {
1206 		nvp = SDEVTOV(*ndvp);
1207 		ASSERT(nvp);
1208 
1209 		/* handling renaming to itself */
1210 		if (odv == *ndvp) {
1211 			error = 0;
1212 			goto err_out;
1213 		}
1214 
1215 		if (nvp->v_type == VDIR) {
1216 			if (!doingdir) {
1217 				error = EISDIR;
1218 				goto err_out;
1219 			}
1220 
1221 			if (vn_vfswlock(nvp)) {
1222 				error = EBUSY;
1223 				goto err_out;
1224 			}
1225 
1226 			if (vn_mountedvfs(nvp) != NULL) {
1227 				vn_vfsunlock(nvp);
1228 				error = EBUSY;
1229 				goto err_out;
1230 			}
1231 
1232 			/* in case dir1 exists in dir2 and "mv dir1 dir2" */
1233 			if ((*ndvp)->sdev_nlink > 2) {
1234 				vn_vfsunlock(nvp);
1235 				error = EEXIST;
1236 				goto err_out;
1237 			}
1238 			vn_vfsunlock(nvp);
1239 
1240 			(void) sdev_dirdelete(nddv, *ndvp);
1241 			*ndvp = NULL;
1242 			ASSERT(nddv->sdev_attrvp);
1243 			error = VOP_RMDIR(nddv->sdev_attrvp, nnm,
1244 			    nddv->sdev_attrvp, cred, NULL, 0);
1245 			if (error)
1246 				goto err_out;
1247 		} else {
1248 			if (doingdir) {
1249 				error = ENOTDIR;
1250 				goto err_out;
1251 			}
1252 
1253 			if (SDEV_IS_PERSIST((*ndvp))) {
1254 				bkstore = 1;
1255 			}
1256 
1257 			/*
1258 			 * get rid of the node from the directory cache
1259 			 * note, in case EBUSY is returned, the ZOMBIE
1260 			 * node is taken care in sdev_mknode.
1261 			 */
1262 			(void) sdev_dirdelete(nddv, *ndvp);
1263 			*ndvp = NULL;
1264 			if (bkstore) {
1265 				ASSERT(nddv->sdev_attrvp);
1266 				error = VOP_REMOVE(nddv->sdev_attrvp,
1267 				    nnm, cred, NULL, 0);
1268 				if (error)
1269 					goto err_out;
1270 			}
1271 		}
1272 	}
1273 
1274 	/* fix the source for a symlink */
1275 	if (vattr.va_type == VLNK) {
1276 		if (odv->sdev_symlink == NULL) {
1277 			error = sdev_follow_link(odv);
1278 			if (error) {
1279 				error = ENOENT;
1280 				goto err_out;
1281 			}
1282 		}
1283 		ASSERT(odv->sdev_symlink);
1284 		link = i_ddi_strdup(odv->sdev_symlink, KM_SLEEP);
1285 	}
1286 
1287 	/*
1288 	 * make a fresh node from the source attrs
1289 	 */
1290 	ASSERT(RW_WRITE_HELD(&nddv->sdev_contents));
1291 	error = sdev_mknode(nddv, nnm, ndvp, &vattr,
1292 	    NULL, (void *)link, cred, SDEV_READY);
1293 
1294 	if (link)
1295 		kmem_free(link, strlen(link) + 1);
1296 
1297 	if (error)
1298 		goto err_out;
1299 	ASSERT(*ndvp);
1300 	ASSERT((*ndvp)->sdev_state == SDEV_READY);
1301 
1302 	/* move dir contents */
1303 	if (doingdir) {
1304 		for (idv = SDEV_FIRST_ENTRY(odv); idv;
1305 		    idv = SDEV_NEXT_ENTRY(odv, idv)) {
1306 			error = sdev_rnmnode(odv, idv,
1307 			    (struct sdev_node *)(*ndvp), &ndv,
1308 			    idv->sdev_name, cred);
1309 			if (error)
1310 				goto err_out;
1311 			ndv = NULL;
1312 		}
1313 	}
1314 
1315 	if ((*ndvp)->sdev_attrvp) {
1316 		sdev_update_timestamps((*ndvp)->sdev_attrvp, kcred,
1317 		    AT_CTIME|AT_ATIME);
1318 	} else {
1319 		ASSERT((*ndvp)->sdev_attr);
1320 		gethrestime(&now);
1321 		(*ndvp)->sdev_attr->va_ctime = now;
1322 		(*ndvp)->sdev_attr->va_atime = now;
1323 	}
1324 
1325 	if (nddv->sdev_attrvp) {
1326 		sdev_update_timestamps(nddv->sdev_attrvp, kcred,
1327 		    AT_MTIME|AT_ATIME);
1328 	} else {
1329 		ASSERT(nddv->sdev_attr);
1330 		gethrestime(&now);
1331 		nddv->sdev_attr->va_mtime = now;
1332 		nddv->sdev_attr->va_atime = now;
1333 	}
1334 	rw_exit(&nddv->sdev_contents);
1335 	if (!samedir)
1336 		rw_exit(&oddv->sdev_contents);
1337 
1338 	SDEV_RELE(*ndvp);
1339 	return (error);
1340 
1341 err_out:
1342 	rw_exit(&nddv->sdev_contents);
1343 	if (!samedir)
1344 		rw_exit(&oddv->sdev_contents);
1345 	return (error);
1346 }
1347 
1348 /*
1349  * Merge sdev_node specific information into an attribute structure.
1350  *
1351  * note: sdev_node is not locked here
1352  */
1353 void
1354 sdev_vattr_merge(struct sdev_node *dv, struct vattr *vap)
1355 {
1356 	struct vnode *vp = SDEVTOV(dv);
1357 
1358 	vap->va_nlink = dv->sdev_nlink;
1359 	vap->va_nodeid = dv->sdev_ino;
1360 	vap->va_fsid = SDEVTOV(dv->sdev_dotdot)->v_rdev;
1361 	vap->va_type = vp->v_type;
1362 
1363 	if (vp->v_type == VDIR) {
1364 		vap->va_rdev = 0;
1365 		vap->va_fsid = vp->v_rdev;
1366 	} else if (vp->v_type == VLNK) {
1367 		vap->va_rdev = 0;
1368 		vap->va_mode  &= ~S_IFMT;
1369 		vap->va_mode |= S_IFLNK;
1370 	} else if ((vp->v_type == VCHR) || (vp->v_type == VBLK)) {
1371 		vap->va_rdev = vp->v_rdev;
1372 		vap->va_mode &= ~S_IFMT;
1373 		if (vap->va_type == VCHR)
1374 			vap->va_mode |= S_IFCHR;
1375 		else
1376 			vap->va_mode |= S_IFBLK;
1377 	} else {
1378 		vap->va_rdev = 0;
1379 	}
1380 }
1381 
1382 static struct vattr *
1383 sdev_getdefault_attr(enum vtype type)
1384 {
1385 	if (type == VDIR)
1386 		return (&sdev_vattr_dir);
1387 	else if (type == VCHR)
1388 		return (&sdev_vattr_chr);
1389 	else if (type == VBLK)
1390 		return (&sdev_vattr_blk);
1391 	else if (type == VLNK)
1392 		return (&sdev_vattr_lnk);
1393 	else
1394 		return (NULL);
1395 }
1396 int
1397 sdev_to_vp(struct sdev_node *dv, struct vnode **vpp)
1398 {
1399 	int rv = 0;
1400 	struct vnode *vp = SDEVTOV(dv);
1401 
1402 	switch (vp->v_type) {
1403 	case VCHR:
1404 	case VBLK:
1405 		/*
1406 		 * If vnode is a device, return special vnode instead
1407 		 * (though it knows all about -us- via sp->s_realvp)
1408 		 */
1409 		*vpp = specvp(vp, vp->v_rdev, vp->v_type, kcred);
1410 		VN_RELE(vp);
1411 		if (*vpp == NULLVP)
1412 			rv = ENOSYS;
1413 		break;
1414 	default:	/* most types are returned as is */
1415 		*vpp = vp;
1416 		break;
1417 	}
1418 	return (rv);
1419 }
1420 
1421 /*
1422  * loopback into sdev_lookup()
1423  */
1424 static struct vnode *
1425 devname_find_by_devpath(char *devpath, struct vattr *vattr)
1426 {
1427 	int error = 0;
1428 	struct vnode *vp;
1429 
1430 	error = lookupname(devpath, UIO_SYSSPACE, NO_FOLLOW, NULLVPP, &vp);
1431 	if (error) {
1432 		return (NULL);
1433 	}
1434 
1435 	if (vattr)
1436 		(void) VOP_GETATTR(vp, vattr, 0, kcred, NULL);
1437 	return (vp);
1438 }
1439 
1440 /*
1441  * the junction between devname and devfs
1442  */
1443 static struct vnode *
1444 devname_configure_by_path(char *physpath, struct vattr *vattr)
1445 {
1446 	int error = 0;
1447 	struct vnode *vp;
1448 
1449 	ASSERT(strncmp(physpath, "/devices/", sizeof ("/devices/") - 1)
1450 	    == 0);
1451 
1452 	error = devfs_lookupname(physpath + sizeof ("/devices/") - 1,
1453 	    NULLVPP, &vp);
1454 	if (error != 0) {
1455 		if (error == ENODEV) {
1456 			cmn_err(CE_CONT, "%s: not found (line %d)\n",
1457 			    physpath, __LINE__);
1458 		}
1459 
1460 		return (NULL);
1461 	}
1462 
1463 	if (vattr)
1464 		(void) VOP_GETATTR(vp, vattr, 0, kcred, NULL);
1465 	return (vp);
1466 }
1467 
1468 /*
1469  * junction between devname and root file system, e.g. ufs
1470  */
1471 int
1472 devname_backstore_lookup(struct sdev_node *ddv, char *nm, struct vnode **rvp)
1473 {
1474 	struct vnode *rdvp = ddv->sdev_attrvp;
1475 	int rval = 0;
1476 
1477 	ASSERT(rdvp);
1478 
1479 	rval = VOP_LOOKUP(rdvp, nm, rvp, NULL, 0, NULL, kcred, NULL, NULL,
1480 	    NULL);
1481 	return (rval);
1482 }
1483 
1484 static int
1485 sdev_filldir_from_store(struct sdev_node *ddv, int dlen, struct cred *cred)
1486 {
1487 	struct sdev_node *dv = NULL;
1488 	char	*nm;
1489 	struct vnode *dirvp;
1490 	int	error;
1491 	vnode_t	*vp;
1492 	int eof;
1493 	struct iovec iov;
1494 	struct uio uio;
1495 	struct dirent64 *dp;
1496 	dirent64_t *dbuf;
1497 	size_t dbuflen;
1498 	struct vattr vattr;
1499 	char *link = NULL;
1500 
1501 	if (ddv->sdev_attrvp == NULL)
1502 		return (0);
1503 	if (!(ddv->sdev_flags & SDEV_BUILD))
1504 		return (0);
1505 
1506 	dirvp = ddv->sdev_attrvp;
1507 	VN_HOLD(dirvp);
1508 	dbuf = kmem_zalloc(dlen, KM_SLEEP);
1509 
1510 	uio.uio_iov = &iov;
1511 	uio.uio_iovcnt = 1;
1512 	uio.uio_segflg = UIO_SYSSPACE;
1513 	uio.uio_fmode = 0;
1514 	uio.uio_extflg = UIO_COPY_CACHED;
1515 	uio.uio_loffset = 0;
1516 	uio.uio_llimit = MAXOFFSET_T;
1517 
1518 	eof = 0;
1519 	error = 0;
1520 	while (!error && !eof) {
1521 		uio.uio_resid = dlen;
1522 		iov.iov_base = (char *)dbuf;
1523 		iov.iov_len = dlen;
1524 		(void) VOP_RWLOCK(dirvp, V_WRITELOCK_FALSE, NULL);
1525 		error = VOP_READDIR(dirvp, &uio, kcred, &eof, NULL, 0);
1526 		VOP_RWUNLOCK(dirvp, V_WRITELOCK_FALSE, NULL);
1527 
1528 		dbuflen = dlen - uio.uio_resid;
1529 		if (error || dbuflen == 0)
1530 			break;
1531 
1532 		if (!(ddv->sdev_flags & SDEV_BUILD)) {
1533 			error = 0;
1534 			break;
1535 		}
1536 
1537 		for (dp = dbuf; ((intptr_t)dp <
1538 		    (intptr_t)dbuf + dbuflen);
1539 		    dp = (dirent64_t *)((intptr_t)dp + dp->d_reclen)) {
1540 			nm = dp->d_name;
1541 
1542 			if (strcmp(nm, ".") == 0 ||
1543 			    strcmp(nm, "..") == 0)
1544 				continue;
1545 
1546 			vp = NULLVP;
1547 			dv = sdev_cache_lookup(ddv, nm);
1548 			if (dv) {
1549 				if (dv->sdev_state != SDEV_ZOMBIE) {
1550 					SDEV_SIMPLE_RELE(dv);
1551 				} else {
1552 					/*
1553 					 * A ZOMBIE node may not have been
1554 					 * cleaned up from the backing store,
1555 					 * bypass this entry in this case,
1556 					 * and clean it up from the directory
1557 					 * cache if this is the last call.
1558 					 */
1559 					(void) sdev_dirdelete(ddv, dv);
1560 				}
1561 				continue;
1562 			}
1563 
1564 			/* refill the cache if not already */
1565 			error = devname_backstore_lookup(ddv, nm, &vp);
1566 			if (error)
1567 				continue;
1568 
1569 			vattr.va_mask = AT_MODE|AT_UID|AT_GID;
1570 			error = VOP_GETATTR(vp, &vattr, 0, cred, NULL);
1571 			if (error)
1572 				continue;
1573 
1574 			if (vattr.va_type == VLNK) {
1575 				error = sdev_getlink(vp, &link);
1576 				if (error) {
1577 					continue;
1578 				}
1579 				ASSERT(link != NULL);
1580 			}
1581 
1582 			if (!rw_tryupgrade(&ddv->sdev_contents)) {
1583 				rw_exit(&ddv->sdev_contents);
1584 				rw_enter(&ddv->sdev_contents, RW_WRITER);
1585 			}
1586 			error = sdev_mknode(ddv, nm, &dv, &vattr, vp, link,
1587 			    cred, SDEV_READY);
1588 			rw_downgrade(&ddv->sdev_contents);
1589 
1590 			if (link != NULL) {
1591 				kmem_free(link, strlen(link) + 1);
1592 				link = NULL;
1593 			}
1594 
1595 			if (!error) {
1596 				ASSERT(dv);
1597 				ASSERT(dv->sdev_state != SDEV_ZOMBIE);
1598 				SDEV_SIMPLE_RELE(dv);
1599 			}
1600 			vp = NULL;
1601 			dv = NULL;
1602 		}
1603 	}
1604 
1605 done:
1606 	VN_RELE(dirvp);
1607 	kmem_free(dbuf, dlen);
1608 
1609 	return (error);
1610 }
1611 
1612 void
1613 sdev_filldir_dynamic(struct sdev_node *ddv)
1614 {
1615 	int error;
1616 	int i;
1617 	struct vattr *vap;
1618 	char *nm = NULL;
1619 	struct sdev_node *dv = NULL;
1620 
1621 	ASSERT(RW_WRITE_HELD(&ddv->sdev_contents));
1622 	ASSERT((ddv->sdev_flags & SDEV_BUILD));
1623 
1624 	vap = sdev_getdefault_attr(VDIR);
1625 	for (i = 0; vtab[i].vt_name != NULL; i++) {
1626 		nm = vtab[i].vt_name;
1627 		ASSERT(RW_WRITE_HELD(&ddv->sdev_contents));
1628 		dv = NULL;
1629 		error = sdev_mknode(ddv, nm, &dv, vap, NULL,
1630 		    NULL, kcred, SDEV_READY);
1631 		if (error) {
1632 			cmn_err(CE_WARN, "%s/%s: error %d\n",
1633 			    ddv->sdev_name, nm, error);
1634 		} else {
1635 			ASSERT(dv);
1636 			ASSERT(dv->sdev_state != SDEV_ZOMBIE);
1637 			SDEV_SIMPLE_RELE(dv);
1638 		}
1639 	}
1640 }
1641 
1642 /*
1643  * Creating a backing store entry based on sdev_attr.
1644  * This is called either as part of node creation in a persistent directory
1645  * or from setattr/setsecattr to persist access attributes across reboot.
1646  */
1647 int
1648 sdev_shadow_node(struct sdev_node *dv, struct cred *cred)
1649 {
1650 	int error = 0;
1651 	struct vnode *dvp = SDEVTOV(dv->sdev_dotdot);
1652 	struct vnode *rdvp = VTOSDEV(dvp)->sdev_attrvp;
1653 	struct vattr *vap = dv->sdev_attr;
1654 	char *nm = dv->sdev_name;
1655 	struct vnode *tmpvp, **rvp = &tmpvp, *rrvp = NULL;
1656 
1657 	ASSERT(dv && dv->sdev_name && rdvp);
1658 	ASSERT(RW_WRITE_HELD(&dv->sdev_contents) && dv->sdev_attrvp == NULL);
1659 
1660 lookup:
1661 	/* try to find it in the backing store */
1662 	error = VOP_LOOKUP(rdvp, nm, rvp, NULL, 0, NULL, cred, NULL, NULL,
1663 	    NULL);
1664 	if (error == 0) {
1665 		if (VOP_REALVP(*rvp, &rrvp, NULL) == 0) {
1666 			VN_HOLD(rrvp);
1667 			VN_RELE(*rvp);
1668 			*rvp = rrvp;
1669 		}
1670 
1671 		kmem_free(dv->sdev_attr, sizeof (vattr_t));
1672 		dv->sdev_attr = NULL;
1673 		dv->sdev_attrvp = *rvp;
1674 		return (0);
1675 	}
1676 
1677 	/* let's try to persist the node */
1678 	gethrestime(&vap->va_atime);
1679 	vap->va_mtime = vap->va_atime;
1680 	vap->va_ctime = vap->va_atime;
1681 	vap->va_mask |= AT_TYPE|AT_MODE;
1682 	switch (vap->va_type) {
1683 	case VDIR:
1684 		error = VOP_MKDIR(rdvp, nm, vap, rvp, cred, NULL, 0, NULL);
1685 		sdcmn_err9(("sdev_shadow_node: mkdir vp %p error %d\n",
1686 		    (void *)(*rvp), error));
1687 		break;
1688 	case VCHR:
1689 	case VBLK:
1690 	case VREG:
1691 	case VDOOR:
1692 		error = VOP_CREATE(rdvp, nm, vap, NONEXCL, VREAD|VWRITE,
1693 		    rvp, cred, 0, NULL, NULL);
1694 		sdcmn_err9(("sdev_shadow_node: create vp %p, error %d\n",
1695 		    (void *)(*rvp), error));
1696 		if (!error)
1697 			VN_RELE(*rvp);
1698 		break;
1699 	case VLNK:
1700 		ASSERT(dv->sdev_symlink);
1701 		error = VOP_SYMLINK(rdvp, nm, vap, dv->sdev_symlink, cred,
1702 		    NULL, 0);
1703 		sdcmn_err9(("sdev_shadow_node: create symlink error %d\n",
1704 		    error));
1705 		break;
1706 	default:
1707 		cmn_err(CE_PANIC, "dev: %s: sdev_shadow_node "
1708 		    "create\n", nm);
1709 		/*NOTREACHED*/
1710 	}
1711 
1712 	/* go back to lookup to factor out spec node and set attrvp */
1713 	if (error == 0)
1714 		goto lookup;
1715 
1716 	sdcmn_err(("cannot persist %s - error %d\n", dv->sdev_path, error));
1717 	return (error);
1718 }
1719 
1720 static int
1721 sdev_cache_add(struct sdev_node *ddv, struct sdev_node **dv, char *nm)
1722 {
1723 	int error = 0;
1724 	struct sdev_node *dup = NULL;
1725 
1726 	ASSERT(RW_WRITE_HELD(&ddv->sdev_contents));
1727 	if ((dup = sdev_findbyname(ddv, nm)) == NULL) {
1728 		sdev_direnter(ddv, *dv);
1729 	} else {
1730 		if (dup->sdev_state == SDEV_ZOMBIE) {
1731 			error = sdev_dirdelete(ddv, dup);
1732 			/*
1733 			 * The ZOMBIE node is still hanging
1734 			 * around with more than one reference counts.
1735 			 * Fail the new node creation so that
1736 			 * the directory cache won't have
1737 			 * duplicate entries for the same named node
1738 			 */
1739 			if (error == EBUSY) {
1740 				SDEV_SIMPLE_RELE(*dv);
1741 				sdev_nodedestroy(*dv, 0);
1742 				*dv = NULL;
1743 				return (error);
1744 			}
1745 			sdev_direnter(ddv, *dv);
1746 		} else {
1747 			ASSERT((*dv)->sdev_state != SDEV_ZOMBIE);
1748 			SDEV_SIMPLE_RELE(*dv);
1749 			sdev_nodedestroy(*dv, 0);
1750 			*dv = dup;
1751 		}
1752 	}
1753 
1754 	return (0);
1755 }
1756 
1757 static int
1758 sdev_cache_delete(struct sdev_node *ddv, struct sdev_node **dv)
1759 {
1760 	ASSERT(RW_WRITE_HELD(&ddv->sdev_contents));
1761 	return (sdev_dirdelete(ddv, *dv));
1762 }
1763 
1764 /*
1765  * update the in-core directory cache
1766  */
1767 int
1768 sdev_cache_update(struct sdev_node *ddv, struct sdev_node **dv, char *nm,
1769     sdev_cache_ops_t ops)
1770 {
1771 	int error = 0;
1772 
1773 	ASSERT((SDEV_HELD(*dv)));
1774 
1775 	ASSERT(RW_WRITE_HELD(&ddv->sdev_contents));
1776 	switch (ops) {
1777 	case SDEV_CACHE_ADD:
1778 		error = sdev_cache_add(ddv, dv, nm);
1779 		break;
1780 	case SDEV_CACHE_DELETE:
1781 		error = sdev_cache_delete(ddv, dv);
1782 		break;
1783 	default:
1784 		break;
1785 	}
1786 
1787 	return (error);
1788 }
1789 
1790 /*
1791  * retrieve the named entry from the directory cache
1792  */
1793 struct sdev_node *
1794 sdev_cache_lookup(struct sdev_node *ddv, char *nm)
1795 {
1796 	struct sdev_node *dv = NULL;
1797 
1798 	ASSERT(RW_LOCK_HELD(&ddv->sdev_contents));
1799 	dv = sdev_findbyname(ddv, nm);
1800 
1801 	return (dv);
1802 }
1803 
1804 /*
1805  * Implicit reconfig for nodes constructed by a link generator
1806  * Start devfsadm if needed, or if devfsadm is in progress,
1807  * prepare to block on devfsadm either completing or
1808  * constructing the desired node.  As devfsadmd is global
1809  * in scope, constructing all necessary nodes, we only
1810  * need to initiate it once.
1811  */
1812 static int
1813 sdev_call_devfsadmd(struct sdev_node *ddv, struct sdev_node *dv, char *nm)
1814 {
1815 	int error = 0;
1816 
1817 	if (DEVNAME_DEVFSADM_IS_RUNNING(devfsadm_state)) {
1818 		sdcmn_err6(("lookup: waiting for %s/%s, 0x%x\n",
1819 		    ddv->sdev_name, nm, devfsadm_state));
1820 		mutex_enter(&dv->sdev_lookup_lock);
1821 		SDEV_BLOCK_OTHERS(dv, (SDEV_LOOKUP | SDEV_LGWAITING));
1822 		mutex_exit(&dv->sdev_lookup_lock);
1823 		error = 0;
1824 	} else if (!DEVNAME_DEVFSADM_HAS_RUN(devfsadm_state)) {
1825 		sdcmn_err6(("lookup %s/%s starting devfsadm, 0x%x\n",
1826 		    ddv->sdev_name, nm, devfsadm_state));
1827 
1828 		sdev_devfsadmd_thread(ddv, dv, kcred);
1829 		mutex_enter(&dv->sdev_lookup_lock);
1830 		SDEV_BLOCK_OTHERS(dv,
1831 		    (SDEV_LOOKUP | SDEV_LGWAITING));
1832 		mutex_exit(&dv->sdev_lookup_lock);
1833 		error = 0;
1834 	} else {
1835 		error = -1;
1836 	}
1837 
1838 	return (error);
1839 }
1840 
1841 static int
1842 sdev_call_modulelookup(struct sdev_node *ddv, struct sdev_node **dvp, char *nm,
1843     int (*fn)(char *, devname_handle_t *, struct cred *), struct cred *cred)
1844 {
1845 	struct vnode *rvp = NULL;
1846 	int error = 0;
1847 	struct vattr *vap;
1848 	devname_spec_t spec;
1849 	devname_handle_t *hdl;
1850 	void *args = NULL;
1851 	struct sdev_node *dv = *dvp;
1852 
1853 	ASSERT(dv && ddv);
1854 	hdl = &(dv->sdev_handle);
1855 	ASSERT(hdl->dh_data == dv);
1856 	mutex_enter(&dv->sdev_lookup_lock);
1857 	SDEV_BLOCK_OTHERS(dv, SDEV_LOOKUP);
1858 	mutex_exit(&dv->sdev_lookup_lock);
1859 	error = (*fn)(nm, hdl, cred);
1860 	if (error) {
1861 		return (error);
1862 	}
1863 
1864 	spec = hdl->dh_spec;
1865 	args = hdl->dh_args;
1866 	ASSERT(args);
1867 
1868 	switch (spec) {
1869 	case DEVNAME_NS_PATH:
1870 		/*
1871 		 * symlink of:
1872 		 *	/dev/dir/nm -> /device/...
1873 		 */
1874 		rvp = devname_configure_by_path((char *)args, NULL);
1875 		break;
1876 	case DEVNAME_NS_DEV:
1877 		/*
1878 		 * symlink of:
1879 		 *	/dev/dir/nm -> /dev/...
1880 		 */
1881 		rvp = devname_find_by_devpath((char *)args, NULL);
1882 		break;
1883 	default:
1884 		if (args)
1885 			kmem_free((char *)args, strlen(args) + 1);
1886 		return (ENOENT);
1887 
1888 	}
1889 
1890 	if (rvp == NULL) {
1891 		if (args)
1892 			kmem_free((char *)args, strlen(args) + 1);
1893 		return (ENOENT);
1894 	} else {
1895 		vap = sdev_getdefault_attr(VLNK);
1896 		ASSERT(RW_READ_HELD(&ddv->sdev_contents));
1897 		/*
1898 		 * Could sdev_mknode return a different dv_node
1899 		 * once the lock is dropped?
1900 		 */
1901 		if (!rw_tryupgrade(&ddv->sdev_contents)) {
1902 			rw_exit(&ddv->sdev_contents);
1903 			rw_enter(&ddv->sdev_contents, RW_WRITER);
1904 		}
1905 		error = sdev_mknode(ddv, nm, &dv, vap, NULL, args, cred,
1906 		    SDEV_READY);
1907 		rw_downgrade(&ddv->sdev_contents);
1908 		if (error) {
1909 			if (args)
1910 				kmem_free((char *)args, strlen(args) + 1);
1911 			return (error);
1912 		} else {
1913 			mutex_enter(&dv->sdev_lookup_lock);
1914 			SDEV_UNBLOCK_OTHERS(dv, SDEV_LOOKUP);
1915 			mutex_exit(&dv->sdev_lookup_lock);
1916 			error = 0;
1917 		}
1918 	}
1919 
1920 	if (args)
1921 		kmem_free((char *)args, strlen(args) + 1);
1922 
1923 	*dvp = dv;
1924 	return (0);
1925 }
1926 
1927 /*
1928  *  Support for specialized device naming construction mechanisms
1929  */
1930 static int
1931 sdev_call_dircallback(struct sdev_node *ddv, struct sdev_node **dvp, char *nm,
1932     int (*callback)(struct sdev_node *, char *, void **, struct cred *,
1933     void *, char *), int flags, struct cred *cred)
1934 {
1935 	int rv = 0;
1936 	char *physpath = NULL;
1937 	struct vnode *rvp = NULL;
1938 	struct vattr vattr;
1939 	struct vattr *vap;
1940 	struct sdev_node *dv = *dvp;
1941 
1942 	mutex_enter(&dv->sdev_lookup_lock);
1943 	SDEV_BLOCK_OTHERS(dv, SDEV_LOOKUP);
1944 	mutex_exit(&dv->sdev_lookup_lock);
1945 
1946 	/* for non-devfsadm devices */
1947 	if (flags & SDEV_PATH) {
1948 		physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1949 		rv = callback(ddv, nm, (void *)&physpath, kcred, NULL,
1950 		    NULL);
1951 		if (rv) {
1952 			kmem_free(physpath, MAXPATHLEN);
1953 			return (-1);
1954 		}
1955 
1956 		rvp = devname_configure_by_path(physpath, NULL);
1957 		if (rvp == NULL) {
1958 			sdcmn_err3(("devname_configure_by_path: "
1959 			    "failed for /dev/%s/%s\n",
1960 			    ddv->sdev_name, nm));
1961 			kmem_free(physpath, MAXPATHLEN);
1962 			rv = -1;
1963 		} else {
1964 			vap = sdev_getdefault_attr(VLNK);
1965 			ASSERT(RW_READ_HELD(&ddv->sdev_contents));
1966 
1967 			/*
1968 			 * Sdev_mknode may return back a different sdev_node
1969 			 * that was created by another thread that
1970 			 * raced to the directroy cache before this thread.
1971 			 *
1972 			 * With current directory cache mechanism
1973 			 * (linked list with the sdev_node name as
1974 			 * the entity key), this is a way to make sure
1975 			 * only one entry exists for the same name
1976 			 * in the same directory. The outcome is
1977 			 * the winner wins.
1978 			 */
1979 			if (!rw_tryupgrade(&ddv->sdev_contents)) {
1980 				rw_exit(&ddv->sdev_contents);
1981 				rw_enter(&ddv->sdev_contents, RW_WRITER);
1982 			}
1983 			rv = sdev_mknode(ddv, nm, &dv, vap, NULL,
1984 			    (void *)physpath, cred, SDEV_READY);
1985 			rw_downgrade(&ddv->sdev_contents);
1986 			kmem_free(physpath, MAXPATHLEN);
1987 			if (rv) {
1988 				return (rv);
1989 			} else {
1990 				mutex_enter(&dv->sdev_lookup_lock);
1991 				SDEV_UNBLOCK_OTHERS(dv, SDEV_LOOKUP);
1992 				mutex_exit(&dv->sdev_lookup_lock);
1993 				return (0);
1994 			}
1995 		}
1996 	} else if (flags & SDEV_VLINK) {
1997 		physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1998 		rv = callback(ddv, nm, (void *)&physpath, kcred, NULL,
1999 		    NULL);
2000 		if (rv) {
2001 			kmem_free(physpath, MAXPATHLEN);
2002 			return (-1);
2003 		}
2004 
2005 		vap = sdev_getdefault_attr(VLNK);
2006 		vap->va_size = strlen(physpath);
2007 		ASSERT(RW_READ_HELD(&ddv->sdev_contents));
2008 
2009 		if (!rw_tryupgrade(&ddv->sdev_contents)) {
2010 			rw_exit(&ddv->sdev_contents);
2011 			rw_enter(&ddv->sdev_contents, RW_WRITER);
2012 		}
2013 		rv = sdev_mknode(ddv, nm, &dv, vap, NULL,
2014 		    (void *)physpath, cred, SDEV_READY);
2015 		rw_downgrade(&ddv->sdev_contents);
2016 		kmem_free(physpath, MAXPATHLEN);
2017 		if (rv)
2018 			return (rv);
2019 
2020 		mutex_enter(&dv->sdev_lookup_lock);
2021 		SDEV_UNBLOCK_OTHERS(dv, SDEV_LOOKUP);
2022 		mutex_exit(&dv->sdev_lookup_lock);
2023 		return (0);
2024 	} else if (flags & SDEV_VNODE) {
2025 		/*
2026 		 * DBNR has its own way to create the device
2027 		 * and return a backing store vnode in rvp
2028 		 */
2029 		ASSERT(callback);
2030 		rv = callback(ddv, nm, (void *)&rvp, kcred, NULL, NULL);
2031 		if (rv || (rvp == NULL)) {
2032 			sdcmn_err3(("devname_lookup_func: SDEV_VNODE "
2033 			    "callback failed \n"));
2034 			return (-1);
2035 		}
2036 		vap = sdev_getdefault_attr(rvp->v_type);
2037 		if (vap == NULL)
2038 			return (-1);
2039 
2040 		ASSERT(RW_READ_HELD(&ddv->sdev_contents));
2041 		if (!rw_tryupgrade(&ddv->sdev_contents)) {
2042 			rw_exit(&ddv->sdev_contents);
2043 			rw_enter(&ddv->sdev_contents, RW_WRITER);
2044 		}
2045 		rv = sdev_mknode(ddv, nm, &dv, vap, rvp, NULL,
2046 		    cred, SDEV_READY);
2047 		rw_downgrade(&ddv->sdev_contents);
2048 		if (rv)
2049 			return (rv);
2050 
2051 		mutex_enter(&dv->sdev_lookup_lock);
2052 		SDEV_UNBLOCK_OTHERS(dv, SDEV_LOOKUP);
2053 		mutex_exit(&dv->sdev_lookup_lock);
2054 		return (0);
2055 	} else if (flags & SDEV_VATTR) {
2056 		/*
2057 		 * /dev/pts
2058 		 *
2059 		 * DBNR has its own way to create the device
2060 		 * "0" is returned upon success.
2061 		 *
2062 		 * callback is responsible to set the basic attributes,
2063 		 * e.g. va_type/va_uid/va_gid/
2064 		 *    dev_t if VCHR or VBLK/
2065 		 */
2066 		ASSERT(callback);
2067 		rv = callback(ddv, nm, (void *)&vattr, kcred, NULL, NULL);
2068 		if (rv) {
2069 			sdcmn_err3(("devname_lookup_func: SDEV_NONE "
2070 			    "callback failed \n"));
2071 			return (-1);
2072 		}
2073 
2074 		ASSERT(RW_READ_HELD(&ddv->sdev_contents));
2075 		if (!rw_tryupgrade(&ddv->sdev_contents)) {
2076 			rw_exit(&ddv->sdev_contents);
2077 			rw_enter(&ddv->sdev_contents, RW_WRITER);
2078 		}
2079 		rv = sdev_mknode(ddv, nm, &dv, &vattr, NULL, NULL,
2080 		    cred, SDEV_READY);
2081 		rw_downgrade(&ddv->sdev_contents);
2082 
2083 		if (rv)
2084 			return (rv);
2085 
2086 		mutex_enter(&dv->sdev_lookup_lock);
2087 		SDEV_UNBLOCK_OTHERS(dv, SDEV_LOOKUP);
2088 		mutex_exit(&dv->sdev_lookup_lock);
2089 		return (0);
2090 	} else {
2091 		impossible(("lookup: %s/%s by %s not supported (%d)\n",
2092 		    SDEVTOV(ddv)->v_path, nm, curproc->p_user.u_comm,
2093 		    __LINE__));
2094 		rv = -1;
2095 	}
2096 
2097 	*dvp = dv;
2098 	return (rv);
2099 }
2100 
2101 static int
2102 is_devfsadm_thread(char *exec_name)
2103 {
2104 	/*
2105 	 * note: because devfsadmd -> /usr/sbin/devfsadm
2106 	 * it is safe to use "devfsadm" to capture the lookups
2107 	 * from devfsadm and its daemon version.
2108 	 */
2109 	if (strcmp(exec_name, "devfsadm") == 0)
2110 		return (1);
2111 	return (0);
2112 }
2113 
2114 
2115 /*
2116  * Lookup Order:
2117  *	sdev_node cache;
2118  *	backing store (SDEV_PERSIST);
2119  *	DBNR: a. dir_ops implemented in the loadable modules;
2120  *	      b. vnode ops in vtab.
2121  */
2122 int
2123 devname_lookup_func(struct sdev_node *ddv, char *nm, struct vnode **vpp,
2124     struct cred *cred, int (*callback)(struct sdev_node *, char *, void **,
2125     struct cred *, void *, char *), int flags)
2126 {
2127 	int rv = 0, nmlen;
2128 	struct vnode *rvp = NULL;
2129 	struct sdev_node *dv = NULL;
2130 	int	retried = 0;
2131 	int	error = 0;
2132 	struct devname_nsmap *map = NULL;
2133 	struct devname_ops *dirops = NULL;
2134 	int (*fn)(char *, devname_handle_t *, struct cred *) = NULL;
2135 	struct vattr vattr;
2136 	char *lookup_thread = curproc->p_user.u_comm;
2137 	int failed_flags = 0;
2138 	int (*vtor)(struct sdev_node *) = NULL;
2139 	int state;
2140 	int parent_state;
2141 	char *link = NULL;
2142 
2143 	if (SDEVTOV(ddv)->v_type != VDIR)
2144 		return (ENOTDIR);
2145 
2146 	/*
2147 	 * Empty name or ., return node itself.
2148 	 */
2149 	nmlen = strlen(nm);
2150 	if ((nmlen == 0) || ((nmlen == 1) && (nm[0] == '.'))) {
2151 		*vpp = SDEVTOV(ddv);
2152 		VN_HOLD(*vpp);
2153 		return (0);
2154 	}
2155 
2156 	/*
2157 	 * .., return the parent directory
2158 	 */
2159 	if ((nmlen == 2) && (strcmp(nm, "..") == 0)) {
2160 		*vpp = SDEVTOV(ddv->sdev_dotdot);
2161 		VN_HOLD(*vpp);
2162 		return (0);
2163 	}
2164 
2165 	rw_enter(&ddv->sdev_contents, RW_READER);
2166 	if (ddv->sdev_flags & SDEV_VTOR) {
2167 		vtor = (int (*)(struct sdev_node *))sdev_get_vtor(ddv);
2168 		ASSERT(vtor);
2169 	}
2170 
2171 tryagain:
2172 	/*
2173 	 * (a) directory cache lookup:
2174 	 */
2175 	ASSERT(RW_READ_HELD(&ddv->sdev_contents));
2176 	parent_state = ddv->sdev_state;
2177 	dv = sdev_cache_lookup(ddv, nm);
2178 	if (dv) {
2179 		state = dv->sdev_state;
2180 		switch (state) {
2181 		case SDEV_INIT:
2182 			if (is_devfsadm_thread(lookup_thread))
2183 				break;
2184 
2185 			/* ZOMBIED parent won't allow node creation */
2186 			if (parent_state == SDEV_ZOMBIE) {
2187 				SD_TRACE_FAILED_LOOKUP(ddv, nm,
2188 				    retried);
2189 				goto nolock_notfound;
2190 			}
2191 
2192 			mutex_enter(&dv->sdev_lookup_lock);
2193 			/* compensate the threads started after devfsadm */
2194 			if (DEVNAME_DEVFSADM_IS_RUNNING(devfsadm_state) &&
2195 			    !(SDEV_IS_LOOKUP(dv)))
2196 				SDEV_BLOCK_OTHERS(dv,
2197 				    (SDEV_LOOKUP | SDEV_LGWAITING));
2198 
2199 			if (SDEV_IS_LOOKUP(dv)) {
2200 				failed_flags |= SLF_REBUILT;
2201 				rw_exit(&ddv->sdev_contents);
2202 				error = sdev_wait4lookup(dv, SDEV_LOOKUP);
2203 				mutex_exit(&dv->sdev_lookup_lock);
2204 				rw_enter(&ddv->sdev_contents, RW_READER);
2205 
2206 				if (error != 0) {
2207 					SD_TRACE_FAILED_LOOKUP(ddv, nm,
2208 					    retried);
2209 					goto nolock_notfound;
2210 				}
2211 
2212 				state = dv->sdev_state;
2213 				if (state == SDEV_INIT) {
2214 					SD_TRACE_FAILED_LOOKUP(ddv, nm,
2215 					    retried);
2216 					goto nolock_notfound;
2217 				} else if (state == SDEV_READY) {
2218 					goto found;
2219 				} else if (state == SDEV_ZOMBIE) {
2220 					rw_exit(&ddv->sdev_contents);
2221 					SD_TRACE_FAILED_LOOKUP(ddv, nm,
2222 					    retried);
2223 					SDEV_RELE(dv);
2224 					goto lookup_failed;
2225 				}
2226 			} else {
2227 				mutex_exit(&dv->sdev_lookup_lock);
2228 			}
2229 			break;
2230 		case SDEV_READY:
2231 			goto found;
2232 		case SDEV_ZOMBIE:
2233 			rw_exit(&ddv->sdev_contents);
2234 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2235 			SDEV_RELE(dv);
2236 			goto lookup_failed;
2237 		default:
2238 			rw_exit(&ddv->sdev_contents);
2239 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2240 			sdev_lookup_failed(ddv, nm, failed_flags);
2241 			*vpp = NULLVP;
2242 			return (ENOENT);
2243 		}
2244 	}
2245 	ASSERT(RW_READ_HELD(&ddv->sdev_contents));
2246 
2247 	/*
2248 	 * ZOMBIED parent does not allow new node creation.
2249 	 * bail out early
2250 	 */
2251 	if (parent_state == SDEV_ZOMBIE) {
2252 		rw_exit(&ddv->sdev_contents);
2253 		*vpp = NULL;
2254 		SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2255 		return (ENOENT);
2256 	}
2257 
2258 	/*
2259 	 * (b0): backing store lookup
2260 	 *	SDEV_PERSIST is default except:
2261 	 *		1) pts nodes
2262 	 *		2) non-chmod'ed local nodes
2263 	 */
2264 	if (SDEV_IS_PERSIST(ddv)) {
2265 		error = devname_backstore_lookup(ddv, nm, &rvp);
2266 
2267 		if (!error) {
2268 			sdcmn_err3(("devname_backstore_lookup: "
2269 			    "found attrvp %p for %s\n", (void *)rvp, nm));
2270 
2271 			vattr.va_mask = AT_MODE|AT_UID|AT_GID;
2272 			error = VOP_GETATTR(rvp, &vattr, 0, cred, NULL);
2273 			if (error) {
2274 				rw_exit(&ddv->sdev_contents);
2275 				if (dv)
2276 					SDEV_RELE(dv);
2277 				SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2278 				sdev_lookup_failed(ddv, nm, failed_flags);
2279 				*vpp = NULLVP;
2280 				return (ENOENT);
2281 			}
2282 
2283 			if (vattr.va_type == VLNK) {
2284 				error = sdev_getlink(rvp, &link);
2285 				if (error) {
2286 					rw_exit(&ddv->sdev_contents);
2287 					if (dv)
2288 						SDEV_RELE(dv);
2289 					SD_TRACE_FAILED_LOOKUP(ddv, nm,
2290 					    retried);
2291 					sdev_lookup_failed(ddv, nm,
2292 					    failed_flags);
2293 					*vpp = NULLVP;
2294 					return (ENOENT);
2295 				}
2296 				ASSERT(link != NULL);
2297 			}
2298 
2299 			if (!rw_tryupgrade(&ddv->sdev_contents)) {
2300 				rw_exit(&ddv->sdev_contents);
2301 				rw_enter(&ddv->sdev_contents, RW_WRITER);
2302 			}
2303 			error = sdev_mknode(ddv, nm, &dv, &vattr,
2304 			    rvp, link, cred, SDEV_READY);
2305 			rw_downgrade(&ddv->sdev_contents);
2306 
2307 			if (link != NULL) {
2308 				kmem_free(link, strlen(link) + 1);
2309 				link = NULL;
2310 			}
2311 
2312 			if (error) {
2313 				SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2314 				rw_exit(&ddv->sdev_contents);
2315 				if (dv)
2316 					SDEV_RELE(dv);
2317 				goto lookup_failed;
2318 			} else {
2319 				goto found;
2320 			}
2321 		} else if (retried) {
2322 			rw_exit(&ddv->sdev_contents);
2323 			sdcmn_err3(("retry of lookup of %s/%s: failed\n",
2324 			    ddv->sdev_name, nm));
2325 			if (dv)
2326 				SDEV_RELE(dv);
2327 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2328 			sdev_lookup_failed(ddv, nm, failed_flags);
2329 			*vpp = NULLVP;
2330 			return (ENOENT);
2331 		}
2332 	}
2333 
2334 lookup_create_node:
2335 	/* first thread that is doing the lookup on this node */
2336 	if (!dv) {
2337 		if (!rw_tryupgrade(&ddv->sdev_contents)) {
2338 			rw_exit(&ddv->sdev_contents);
2339 			rw_enter(&ddv->sdev_contents, RW_WRITER);
2340 		}
2341 		error = sdev_mknode(ddv, nm, &dv, NULL, NULL, NULL,
2342 		    cred, SDEV_INIT);
2343 		if (!dv) {
2344 			rw_exit(&ddv->sdev_contents);
2345 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2346 			sdev_lookup_failed(ddv, nm, failed_flags);
2347 			*vpp = NULLVP;
2348 			return (ENOENT);
2349 		}
2350 		rw_downgrade(&ddv->sdev_contents);
2351 	}
2352 	ASSERT(dv);
2353 	ASSERT(SDEV_HELD(dv));
2354 
2355 	if (SDEV_IS_NO_NCACHE(dv)) {
2356 		failed_flags |= SLF_NO_NCACHE;
2357 	}
2358 
2359 	if (SDEV_IS_GLOBAL(ddv)) {
2360 		map = sdev_get_map(ddv, 1);
2361 		dirops = map ? map->dir_ops : NULL;
2362 		fn = dirops ? dirops->devnops_lookup : NULL;
2363 	}
2364 
2365 	/*
2366 	 * (b1) invoking devfsadm once per life time for devfsadm nodes
2367 	 */
2368 	if ((fn == NULL) && !callback) {
2369 
2370 		if (sdev_reconfig_boot || !i_ddi_io_initialized() ||
2371 		    SDEV_IS_DYNAMIC(ddv) || SDEV_IS_NO_NCACHE(dv) ||
2372 		    ((moddebug & MODDEBUG_FINI_EBUSY) != 0)) {
2373 			ASSERT(SDEV_HELD(dv));
2374 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2375 			goto nolock_notfound;
2376 		}
2377 
2378 		/*
2379 		 * filter out known non-existent devices recorded
2380 		 * during initial reconfiguration boot for which
2381 		 * reconfig should not be done and lookup may
2382 		 * be short-circuited now.
2383 		 */
2384 		if (sdev_lookup_filter(ddv, nm)) {
2385 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2386 			goto nolock_notfound;
2387 		}
2388 
2389 		/* bypassing devfsadm internal nodes */
2390 		if (is_devfsadm_thread(lookup_thread)) {
2391 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2392 			goto nolock_notfound;
2393 		}
2394 
2395 		if (sdev_reconfig_disable) {
2396 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2397 			goto nolock_notfound;
2398 		}
2399 
2400 		error = sdev_call_devfsadmd(ddv, dv, nm);
2401 		if (error == 0) {
2402 			sdcmn_err8(("lookup of %s/%s by %s: reconfig\n",
2403 			    ddv->sdev_name, nm, curproc->p_user.u_comm));
2404 			if (sdev_reconfig_verbose) {
2405 				cmn_err(CE_CONT,
2406 				    "?lookup of %s/%s by %s: reconfig\n",
2407 				    ddv->sdev_name, nm, curproc->p_user.u_comm);
2408 			}
2409 			retried = 1;
2410 			failed_flags |= SLF_REBUILT;
2411 			ASSERT(dv->sdev_state != SDEV_ZOMBIE);
2412 			SDEV_SIMPLE_RELE(dv);
2413 			goto tryagain;
2414 		} else {
2415 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2416 			goto nolock_notfound;
2417 		}
2418 	}
2419 
2420 	/*
2421 	 * (b2) Directory Based Name Resolution (DBNR):
2422 	 *	ddv	- parent
2423 	 *	nm	- /dev/(ddv->sdev_name)/nm
2424 	 *
2425 	 *	note: module vnode ops take precedence than the build-in ones
2426 	 */
2427 	if (fn) {
2428 		error = sdev_call_modulelookup(ddv, &dv, nm, fn, cred);
2429 		if (error) {
2430 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2431 			goto notfound;
2432 		} else {
2433 			goto found;
2434 		}
2435 	} else if (callback) {
2436 		error = sdev_call_dircallback(ddv, &dv, nm, callback,
2437 		    flags, cred);
2438 		if (error == 0) {
2439 			goto found;
2440 		} else {
2441 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2442 			goto notfound;
2443 		}
2444 	}
2445 	ASSERT(rvp);
2446 
2447 found:
2448 	ASSERT(!(dv->sdev_flags & SDEV_STALE));
2449 	ASSERT(dv->sdev_state == SDEV_READY);
2450 	if (vtor) {
2451 		/*
2452 		 * Check validity of returned node
2453 		 */
2454 		switch (vtor(dv)) {
2455 		case SDEV_VTOR_VALID:
2456 			break;
2457 		case SDEV_VTOR_STALE:
2458 			/*
2459 			 * The name exists, but the cache entry is
2460 			 * stale and needs to be re-created.
2461 			 */
2462 			ASSERT(RW_READ_HELD(&ddv->sdev_contents));
2463 			if (rw_tryupgrade(&ddv->sdev_contents) == 0) {
2464 				rw_exit(&ddv->sdev_contents);
2465 				rw_enter(&ddv->sdev_contents, RW_WRITER);
2466 			}
2467 			error = sdev_cache_update(ddv, &dv, nm,
2468 			    SDEV_CACHE_DELETE);
2469 			rw_downgrade(&ddv->sdev_contents);
2470 			if (error == 0) {
2471 				dv = NULL;
2472 				goto lookup_create_node;
2473 			}
2474 			/* FALLTHRU */
2475 		case SDEV_VTOR_INVALID:
2476 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2477 			sdcmn_err7(("lookup: destroy invalid "
2478 			    "node: %s(%p)\n", dv->sdev_name, (void *)dv));
2479 			goto nolock_notfound;
2480 		case SDEV_VTOR_SKIP:
2481 			sdcmn_err7(("lookup: node not applicable - "
2482 			    "skipping: %s(%p)\n", dv->sdev_name, (void *)dv));
2483 			rw_exit(&ddv->sdev_contents);
2484 			SD_TRACE_FAILED_LOOKUP(ddv, nm, retried);
2485 			SDEV_RELE(dv);
2486 			goto lookup_failed;
2487 		default:
2488 			cmn_err(CE_PANIC,
2489 			    "dev fs: validator failed: %s(%p)\n",
2490 			    dv->sdev_name, (void *)dv);
2491 			break;
2492 			/*NOTREACHED*/
2493 		}
2494 	}
2495 
2496 	if ((SDEVTOV(dv)->v_type == VDIR) && SDEV_IS_GLOBAL(dv)) {
2497 		rw_enter(&dv->sdev_contents, RW_READER);
2498 		(void) sdev_get_map(dv, 1);
2499 		rw_exit(&dv->sdev_contents);
2500 	}
2501 	rw_exit(&ddv->sdev_contents);
2502 	rv = sdev_to_vp(dv, vpp);
2503 	sdcmn_err3(("devname_lookup_func: returning vp %p v_count %d state %d "
2504 	    "for nm %s, error %d\n", (void *)*vpp, (*vpp)->v_count,
2505 	    dv->sdev_state, nm, rv));
2506 	return (rv);
2507 
2508 notfound:
2509 	mutex_enter(&dv->sdev_lookup_lock);
2510 	SDEV_UNBLOCK_OTHERS(dv, SDEV_LOOKUP);
2511 	mutex_exit(&dv->sdev_lookup_lock);
2512 nolock_notfound:
2513 	/*
2514 	 * Destroy the node that is created for synchronization purposes.
2515 	 */
2516 	sdcmn_err3(("devname_lookup_func: %s with state %d\n",
2517 	    nm, dv->sdev_state));
2518 	ASSERT(RW_READ_HELD(&ddv->sdev_contents));
2519 	if (dv->sdev_state == SDEV_INIT) {
2520 		if (!rw_tryupgrade(&ddv->sdev_contents)) {
2521 			rw_exit(&ddv->sdev_contents);
2522 			rw_enter(&ddv->sdev_contents, RW_WRITER);
2523 		}
2524 
2525 		/*
2526 		 * Node state may have changed during the lock
2527 		 * changes. Re-check.
2528 		 */
2529 		if (dv->sdev_state == SDEV_INIT) {
2530 			(void) sdev_dirdelete(ddv, dv);
2531 			rw_exit(&ddv->sdev_contents);
2532 			sdev_lookup_failed(ddv, nm, failed_flags);
2533 			*vpp = NULL;
2534 			return (ENOENT);
2535 		}
2536 	}
2537 
2538 	rw_exit(&ddv->sdev_contents);
2539 	SDEV_RELE(dv);
2540 
2541 lookup_failed:
2542 	sdev_lookup_failed(ddv, nm, failed_flags);
2543 	*vpp = NULL;
2544 	return (ENOENT);
2545 }
2546 
2547 /*
2548  * Given a directory node, mark all nodes beneath as
2549  * STALE, i.e. nodes that don't exist as far as new
2550  * consumers are concerned.  Remove them from the
2551  * list of directory entries so that no lookup or
2552  * directory traversal will find them.  The node
2553  * not deallocated so existing holds are not affected.
2554  */
2555 void
2556 sdev_stale(struct sdev_node *ddv)
2557 {
2558 	struct sdev_node *dv;
2559 	struct vnode *vp;
2560 
2561 	ASSERT(SDEVTOV(ddv)->v_type == VDIR);
2562 
2563 	rw_enter(&ddv->sdev_contents, RW_WRITER);
2564 	for (dv = SDEV_FIRST_ENTRY(ddv); dv; dv = SDEV_NEXT_ENTRY(ddv, dv)) {
2565 		vp = SDEVTOV(dv);
2566 		if (vp->v_type == VDIR)
2567 			sdev_stale(dv);
2568 
2569 		sdcmn_err9(("sdev_stale: setting stale %s\n",
2570 		    dv->sdev_path));
2571 		dv->sdev_flags |= SDEV_STALE;
2572 		avl_remove(&ddv->sdev_entries, dv);
2573 	}
2574 	ddv->sdev_flags |= SDEV_BUILD;
2575 	rw_exit(&ddv->sdev_contents);
2576 }
2577 
2578 /*
2579  * Given a directory node, clean out all the nodes beneath.
2580  * If expr is specified, clean node with names matching expr.
2581  * If SDEV_ENFORCE is specified in flags, busy nodes are made stale,
2582  *	so they are excluded from future lookups.
2583  */
2584 int
2585 sdev_cleandir(struct sdev_node *ddv, char *expr, uint_t flags)
2586 {
2587 	int error = 0;
2588 	int busy = 0;
2589 	struct vnode *vp;
2590 	struct sdev_node *dv, *next = NULL;
2591 	int bkstore = 0;
2592 	int len = 0;
2593 	char *bks_name = NULL;
2594 
2595 	ASSERT(SDEVTOV(ddv)->v_type == VDIR);
2596 
2597 	/*
2598 	 * We try our best to destroy all unused sdev_node's
2599 	 */
2600 	rw_enter(&ddv->sdev_contents, RW_WRITER);
2601 	for (dv = SDEV_FIRST_ENTRY(ddv); dv; dv = next) {
2602 		next = SDEV_NEXT_ENTRY(ddv, dv);
2603 		vp = SDEVTOV(dv);
2604 
2605 		if (expr && gmatch(dv->sdev_name, expr) == 0)
2606 			continue;
2607 
2608 		if (vp->v_type == VDIR &&
2609 		    sdev_cleandir(dv, NULL, flags) != 0) {
2610 			sdcmn_err9(("sdev_cleandir: dir %s busy\n",
2611 			    dv->sdev_name));
2612 			busy++;
2613 			continue;
2614 		}
2615 
2616 		if (vp->v_count > 0 && (flags & SDEV_ENFORCE) == 0) {
2617 			sdcmn_err9(("sdev_cleandir: dir %s busy\n",
2618 			    dv->sdev_name));
2619 			busy++;
2620 			continue;
2621 		}
2622 
2623 		/*
2624 		 * at this point, either dv is not held or SDEV_ENFORCE
2625 		 * is specified. In either case, dv needs to be deleted
2626 		 */
2627 		SDEV_HOLD(dv);
2628 
2629 		bkstore = SDEV_IS_PERSIST(dv) ? 1 : 0;
2630 		if (bkstore && (vp->v_type == VDIR))
2631 			bkstore += 1;
2632 
2633 		if (bkstore) {
2634 			len = strlen(dv->sdev_name) + 1;
2635 			bks_name = kmem_alloc(len, KM_SLEEP);
2636 			bcopy(dv->sdev_name, bks_name, len);
2637 		}
2638 
2639 		error = sdev_dirdelete(ddv, dv);
2640 
2641 		if (error == EBUSY) {
2642 			sdcmn_err9(("sdev_cleandir: dir busy\n"));
2643 			busy++;
2644 		}
2645 
2646 		/* take care the backing store clean up */
2647 		if (bkstore && (error == 0)) {
2648 			ASSERT(bks_name);
2649 			ASSERT(ddv->sdev_attrvp);
2650 
2651 			if (bkstore == 1) {
2652 				error = VOP_REMOVE(ddv->sdev_attrvp,
2653 				    bks_name, kcred, NULL, 0);
2654 			} else if (bkstore == 2) {
2655 				error = VOP_RMDIR(ddv->sdev_attrvp,
2656 				    bks_name, ddv->sdev_attrvp, kcred, NULL, 0);
2657 			}
2658 
2659 			/* do not propagate the backing store errors */
2660 			if (error) {
2661 				sdcmn_err9(("sdev_cleandir: backing store"
2662 				    "not cleaned\n"));
2663 				error = 0;
2664 			}
2665 
2666 			bkstore = 0;
2667 			kmem_free(bks_name, len);
2668 			bks_name = NULL;
2669 			len = 0;
2670 		}
2671 	}
2672 
2673 	ddv->sdev_flags |= SDEV_BUILD;
2674 	rw_exit(&ddv->sdev_contents);
2675 
2676 	if (busy) {
2677 		error = EBUSY;
2678 	}
2679 
2680 	return (error);
2681 }
2682 
2683 /*
2684  * a convenient wrapper for readdir() funcs
2685  */
2686 size_t
2687 add_dir_entry(dirent64_t *de, char *nm, size_t size, ino_t ino, offset_t off)
2688 {
2689 	size_t reclen = DIRENT64_RECLEN(strlen(nm));
2690 	if (reclen > size)
2691 		return (0);
2692 
2693 	de->d_ino = (ino64_t)ino;
2694 	de->d_off = (off64_t)off + 1;
2695 	de->d_reclen = (ushort_t)reclen;
2696 	(void) strncpy(de->d_name, nm, DIRENT64_NAMELEN(reclen));
2697 	return (reclen);
2698 }
2699 
2700 /*
2701  * sdev_mount service routines
2702  */
2703 int
2704 sdev_copyin_mountargs(struct mounta *uap, struct sdev_mountargs *args)
2705 {
2706 	int	error;
2707 
2708 	if (uap->datalen != sizeof (*args))
2709 		return (EINVAL);
2710 
2711 	if (error = copyin(uap->dataptr, args, sizeof (*args))) {
2712 		cmn_err(CE_WARN, "sdev_copyin_mountargs: can not"
2713 		    "get user data. error %d\n", error);
2714 		return (EFAULT);
2715 	}
2716 
2717 	return (0);
2718 }
2719 
2720 #ifdef nextdp
2721 #undef nextdp
2722 #endif
2723 #define	nextdp(dp)	((struct dirent64 *) \
2724 			    (intptr_t)((char *)(dp) + (dp)->d_reclen))
2725 
2726 /*
2727  * readdir helper func
2728  */
2729 int
2730 devname_readdir_func(vnode_t *vp, uio_t *uiop, cred_t *cred, int *eofp,
2731     int flags)
2732 {
2733 	struct sdev_node *ddv = VTOSDEV(vp);
2734 	struct sdev_node *dv;
2735 	dirent64_t	*dp;
2736 	ulong_t		outcount = 0;
2737 	size_t		namelen;
2738 	ulong_t		alloc_count;
2739 	void		*outbuf;
2740 	struct iovec	*iovp;
2741 	int		error = 0;
2742 	size_t		reclen;
2743 	offset_t	diroff;
2744 	offset_t	soff;
2745 	int		this_reclen;
2746 	struct devname_nsmap	*map = NULL;
2747 	struct devname_ops	*dirops = NULL;
2748 	int (*fn)(devname_handle_t *, struct cred *) = NULL;
2749 	int (*vtor)(struct sdev_node *) = NULL;
2750 	struct vattr attr;
2751 	timestruc_t now;
2752 
2753 	ASSERT(ddv->sdev_attr || ddv->sdev_attrvp);
2754 	ASSERT(RW_READ_HELD(&ddv->sdev_contents));
2755 
2756 	if (uiop->uio_loffset >= MAXOFF_T) {
2757 		if (eofp)
2758 			*eofp = 1;
2759 		return (0);
2760 	}
2761 
2762 	if (uiop->uio_iovcnt != 1)
2763 		return (EINVAL);
2764 
2765 	if (vp->v_type != VDIR)
2766 		return (ENOTDIR);
2767 
2768 	if (ddv->sdev_flags & SDEV_VTOR) {
2769 		vtor = (int (*)(struct sdev_node *))sdev_get_vtor(ddv);
2770 		ASSERT(vtor);
2771 	}
2772 
2773 	if (eofp != NULL)
2774 		*eofp = 0;
2775 
2776 	soff = uiop->uio_loffset;
2777 	iovp = uiop->uio_iov;
2778 	alloc_count = iovp->iov_len;
2779 	dp = outbuf = kmem_alloc(alloc_count, KM_SLEEP);
2780 	outcount = 0;
2781 
2782 	if (ddv->sdev_state == SDEV_ZOMBIE)
2783 		goto get_cache;
2784 
2785 	if (SDEV_IS_GLOBAL(ddv)) {
2786 		map = sdev_get_map(ddv, 0);
2787 		dirops = map ? map->dir_ops : NULL;
2788 		fn = dirops ? dirops->devnops_readdir : NULL;
2789 
2790 		if (map && map->dir_map) {
2791 			/*
2792 			 * load the name mapping rule database
2793 			 * through invoking devfsadm and symlink
2794 			 * all the entries in the map
2795 			 */
2796 			devname_rdr_result_t rdr_result;
2797 			int do_thread = 0;
2798 
2799 			rw_enter(&map->dir_lock, RW_READER);
2800 			do_thread = map->dir_maploaded ? 0 : 1;
2801 			rw_exit(&map->dir_lock);
2802 
2803 			if (do_thread) {
2804 				mutex_enter(&ddv->sdev_lookup_lock);
2805 				SDEV_BLOCK_OTHERS(ddv, SDEV_READDIR);
2806 				mutex_exit(&ddv->sdev_lookup_lock);
2807 
2808 				sdev_dispatch_to_nsrdr_thread(ddv,
2809 				    map->dir_map, &rdr_result);
2810 			}
2811 		} else if ((sdev_boot_state == SDEV_BOOT_STATE_COMPLETE) &&
2812 		    !sdev_reconfig_boot && (flags & SDEV_BROWSE) &&
2813 		    !SDEV_IS_DYNAMIC(ddv) && !SDEV_IS_NO_NCACHE(ddv) &&
2814 		    ((moddebug & MODDEBUG_FINI_EBUSY) == 0) &&
2815 		    !DEVNAME_DEVFSADM_HAS_RUN(devfsadm_state) &&
2816 		    !DEVNAME_DEVFSADM_IS_RUNNING(devfsadm_state) &&
2817 		    !sdev_reconfig_disable) {
2818 			/*
2819 			 * invoking "devfsadm" to do system device reconfig
2820 			 */
2821 			mutex_enter(&ddv->sdev_lookup_lock);
2822 			SDEV_BLOCK_OTHERS(ddv,
2823 			    (SDEV_READDIR|SDEV_LGWAITING));
2824 			mutex_exit(&ddv->sdev_lookup_lock);
2825 
2826 			sdcmn_err8(("readdir of %s by %s: reconfig\n",
2827 			    ddv->sdev_path, curproc->p_user.u_comm));
2828 			if (sdev_reconfig_verbose) {
2829 				cmn_err(CE_CONT,
2830 				    "?readdir of %s by %s: reconfig\n",
2831 				    ddv->sdev_path, curproc->p_user.u_comm);
2832 			}
2833 
2834 			sdev_devfsadmd_thread(ddv, NULL, kcred);
2835 		} else if (DEVNAME_DEVFSADM_IS_RUNNING(devfsadm_state)) {
2836 			/*
2837 			 * compensate the "ls" started later than "devfsadm"
2838 			 */
2839 			mutex_enter(&ddv->sdev_lookup_lock);
2840 			SDEV_BLOCK_OTHERS(ddv, (SDEV_READDIR|SDEV_LGWAITING));
2841 			mutex_exit(&ddv->sdev_lookup_lock);
2842 		}
2843 
2844 		/*
2845 		 * release the contents lock so that
2846 		 * the cache may be updated by devfsadmd
2847 		 */
2848 		rw_exit(&ddv->sdev_contents);
2849 		mutex_enter(&ddv->sdev_lookup_lock);
2850 		if (SDEV_IS_READDIR(ddv))
2851 			(void) sdev_wait4lookup(ddv, SDEV_READDIR);
2852 		mutex_exit(&ddv->sdev_lookup_lock);
2853 		rw_enter(&ddv->sdev_contents, RW_READER);
2854 
2855 		sdcmn_err4(("readdir of directory %s by %s\n",
2856 		    ddv->sdev_name, curproc->p_user.u_comm));
2857 		if (ddv->sdev_flags & SDEV_BUILD) {
2858 			if (SDEV_IS_PERSIST(ddv)) {
2859 				error = sdev_filldir_from_store(ddv,
2860 				    alloc_count, cred);
2861 			}
2862 			ddv->sdev_flags &= ~SDEV_BUILD;
2863 		}
2864 	}
2865 
2866 get_cache:
2867 	/* handle "." and ".." */
2868 	diroff = 0;
2869 	if (soff == 0) {
2870 		/* first time */
2871 		this_reclen = DIRENT64_RECLEN(1);
2872 		if (alloc_count < this_reclen) {
2873 			error = EINVAL;
2874 			goto done;
2875 		}
2876 
2877 		dp->d_ino = (ino64_t)ddv->sdev_ino;
2878 		dp->d_off = (off64_t)1;
2879 		dp->d_reclen = (ushort_t)this_reclen;
2880 
2881 		(void) strncpy(dp->d_name, ".",
2882 		    DIRENT64_NAMELEN(this_reclen));
2883 		outcount += dp->d_reclen;
2884 		dp = nextdp(dp);
2885 	}
2886 
2887 	diroff++;
2888 	if (soff <= 1) {
2889 		this_reclen = DIRENT64_RECLEN(2);
2890 		if (alloc_count < outcount + this_reclen) {
2891 			error = EINVAL;
2892 			goto done;
2893 		}
2894 
2895 		dp->d_reclen = (ushort_t)this_reclen;
2896 		dp->d_ino = (ino64_t)ddv->sdev_dotdot->sdev_ino;
2897 		dp->d_off = (off64_t)2;
2898 
2899 		(void) strncpy(dp->d_name, "..",
2900 		    DIRENT64_NAMELEN(this_reclen));
2901 		outcount += dp->d_reclen;
2902 
2903 		dp = nextdp(dp);
2904 	}
2905 
2906 
2907 	/* gets the cache */
2908 	diroff++;
2909 	for (dv = SDEV_FIRST_ENTRY(ddv); dv;
2910 	    dv = SDEV_NEXT_ENTRY(ddv, dv), diroff++) {
2911 		sdcmn_err3(("sdev_readdir: diroff %lld soff %lld for '%s' \n",
2912 		    diroff, soff, dv->sdev_name));
2913 
2914 		/* bypassing pre-matured nodes */
2915 		if (diroff < soff || (dv->sdev_state != SDEV_READY)) {
2916 			sdcmn_err3(("sdev_readdir: pre-mature node  "
2917 			    "%s\n", dv->sdev_name));
2918 			continue;
2919 		}
2920 
2921 		/*
2922 		 * Check validity of node
2923 		 */
2924 		if (vtor) {
2925 			switch (vtor(dv)) {
2926 			case SDEV_VTOR_VALID:
2927 				break;
2928 			case SDEV_VTOR_INVALID:
2929 			case SDEV_VTOR_SKIP:
2930 				continue;
2931 			default:
2932 				cmn_err(CE_PANIC,
2933 				    "dev fs: validator failed: %s(%p)\n",
2934 				    dv->sdev_name, (void *)dv);
2935 				break;
2936 			/*NOTREACHED*/
2937 			}
2938 		}
2939 
2940 		/*
2941 		 * call back into the module for the validity/bookkeeping
2942 		 * of this entry
2943 		 */
2944 		if (fn) {
2945 			error = (*fn)(&(dv->sdev_handle), cred);
2946 			if (error) {
2947 				sdcmn_err4(("sdev_readdir: module did not "
2948 				    "validate %s\n", dv->sdev_name));
2949 				continue;
2950 			}
2951 		}
2952 
2953 		namelen = strlen(dv->sdev_name);
2954 		reclen = DIRENT64_RECLEN(namelen);
2955 		if (outcount + reclen > alloc_count) {
2956 			goto full;
2957 		}
2958 		dp->d_reclen = (ushort_t)reclen;
2959 		dp->d_ino = (ino64_t)dv->sdev_ino;
2960 		dp->d_off = (off64_t)diroff + 1;
2961 		(void) strncpy(dp->d_name, dv->sdev_name,
2962 		    DIRENT64_NAMELEN(reclen));
2963 		outcount += reclen;
2964 		dp = nextdp(dp);
2965 	}
2966 
2967 full:
2968 	sdcmn_err4(("sdev_readdir: moving %lu bytes: "
2969 	    "diroff %lld, soff %lld, dv %p\n", outcount, diroff, soff,
2970 	    (void *)dv));
2971 
2972 	if (outcount)
2973 		error = uiomove(outbuf, outcount, UIO_READ, uiop);
2974 
2975 	if (!error) {
2976 		uiop->uio_loffset = diroff;
2977 		if (eofp)
2978 			*eofp = dv ? 0 : 1;
2979 	}
2980 
2981 
2982 	if (ddv->sdev_attrvp) {
2983 		gethrestime(&now);
2984 		attr.va_ctime = now;
2985 		attr.va_atime = now;
2986 		attr.va_mask = AT_CTIME|AT_ATIME;
2987 
2988 		(void) VOP_SETATTR(ddv->sdev_attrvp, &attr, 0, kcred, NULL);
2989 	}
2990 done:
2991 	kmem_free(outbuf, alloc_count);
2992 	return (error);
2993 }
2994 
2995 static int
2996 sdev_modctl_lookup(const char *path, vnode_t **r_vp)
2997 {
2998 	vnode_t *vp;
2999 	vnode_t *cvp;
3000 	struct sdev_node *svp;
3001 	char *nm;
3002 	struct pathname pn;
3003 	int error;
3004 	int persisted = 0;
3005 
3006 	ASSERT(INGLOBALZONE(curproc));
3007 
3008 	if (error = pn_get((char *)path, UIO_SYSSPACE, &pn))
3009 		return (error);
3010 	nm = kmem_alloc(MAXNAMELEN, KM_SLEEP);
3011 
3012 	vp = rootdir;
3013 	VN_HOLD(vp);
3014 
3015 	while (pn_pathleft(&pn)) {
3016 		ASSERT(vp->v_type == VDIR || vp->v_type == VLNK);
3017 		(void) pn_getcomponent(&pn, nm);
3018 
3019 		/*
3020 		 * Deal with the .. special case where we may be
3021 		 * traversing up across a mount point, to the
3022 		 * root of this filesystem or global root.
3023 		 */
3024 		if (nm[0] == '.' && nm[1] == '.' && nm[2] == 0) {
3025 checkforroot:
3026 			if (VN_CMP(vp, rootdir)) {
3027 				nm[1] = 0;
3028 			} else if (vp->v_flag & VROOT) {
3029 				vfs_t *vfsp;
3030 				cvp = vp;
3031 				vfsp = cvp->v_vfsp;
3032 				vfs_rlock_wait(vfsp);
3033 				vp = cvp->v_vfsp->vfs_vnodecovered;
3034 				if (vp == NULL ||
3035 				    (cvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)) {
3036 					vfs_unlock(vfsp);
3037 					VN_RELE(cvp);
3038 					error = EIO;
3039 					break;
3040 				}
3041 				VN_HOLD(vp);
3042 				vfs_unlock(vfsp);
3043 				VN_RELE(cvp);
3044 				cvp = NULL;
3045 				goto checkforroot;
3046 			}
3047 		}
3048 
3049 		error = VOP_LOOKUP(vp, nm, &cvp, NULL, 0, NULL, kcred, NULL,
3050 		    NULL, NULL);
3051 		if (error) {
3052 			VN_RELE(vp);
3053 			break;
3054 		}
3055 
3056 		/* traverse mount points encountered on our journey */
3057 		if (vn_ismntpt(cvp) && (error = traverse(&cvp)) != 0) {
3058 			VN_RELE(vp);
3059 			VN_RELE(cvp);
3060 			break;
3061 		}
3062 
3063 		/*
3064 		 * symbolic link, can be either relative and absolute
3065 		 */
3066 		if ((cvp->v_type == VLNK) && pn_pathleft(&pn)) {
3067 			struct pathname linkpath;
3068 			pn_alloc(&linkpath);
3069 			if (error = pn_getsymlink(cvp, &linkpath, kcred)) {
3070 				pn_free(&linkpath);
3071 				break;
3072 			}
3073 			if (pn_pathleft(&linkpath) == 0)
3074 				(void) pn_set(&linkpath, ".");
3075 			error = pn_insert(&pn, &linkpath, strlen(nm));
3076 			pn_free(&linkpath);
3077 			if (pn.pn_pathlen == 0) {
3078 				VN_RELE(vp);
3079 				return (ENOENT);
3080 			}
3081 			if (pn.pn_path[0] == '/') {
3082 				pn_skipslash(&pn);
3083 				VN_RELE(vp);
3084 				VN_RELE(cvp);
3085 				vp = rootdir;
3086 				VN_HOLD(vp);
3087 			} else {
3088 				VN_RELE(cvp);
3089 			}
3090 			continue;
3091 		}
3092 
3093 		VN_RELE(vp);
3094 
3095 		/*
3096 		 * Direct the operation to the persisting filesystem
3097 		 * underlying /dev.  Bail if we encounter a
3098 		 * non-persistent dev entity here.
3099 		 */
3100 		if (cvp->v_vfsp->vfs_fstype == devtype) {
3101 
3102 			if ((VTOSDEV(cvp)->sdev_flags & SDEV_PERSIST) == 0) {
3103 				error = ENOENT;
3104 				VN_RELE(cvp);
3105 				break;
3106 			}
3107 
3108 			if (VTOSDEV(cvp) == NULL) {
3109 				error = ENOENT;
3110 				VN_RELE(cvp);
3111 				break;
3112 			}
3113 			svp = VTOSDEV(cvp);
3114 			if ((vp = svp->sdev_attrvp) == NULL) {
3115 				error = ENOENT;
3116 				VN_RELE(cvp);
3117 				break;
3118 			}
3119 			persisted = 1;
3120 			VN_HOLD(vp);
3121 			VN_RELE(cvp);
3122 			cvp = vp;
3123 		}
3124 
3125 		vp = cvp;
3126 		pn_skipslash(&pn);
3127 	}
3128 
3129 	kmem_free(nm, MAXNAMELEN);
3130 	pn_free(&pn);
3131 
3132 	if (error)
3133 		return (error);
3134 
3135 	/*
3136 	 * Only return persisted nodes in the filesystem underlying /dev.
3137 	 */
3138 	if (!persisted) {
3139 		VN_RELE(vp);
3140 		return (ENOENT);
3141 	}
3142 
3143 	*r_vp = vp;
3144 	return (0);
3145 }
3146 
3147 int
3148 sdev_modctl_readdir(const char *dir, char ***dirlistp,
3149 	int *npathsp, int *npathsp_alloc, int checking_empty)
3150 {
3151 	char	**pathlist = NULL;
3152 	char	**newlist = NULL;
3153 	int	npaths = 0;
3154 	int	npaths_alloc = 0;
3155 	dirent64_t *dbuf = NULL;
3156 	int	n;
3157 	char	*s;
3158 	int error;
3159 	vnode_t *vp;
3160 	int eof;
3161 	struct iovec iov;
3162 	struct uio uio;
3163 	struct dirent64 *dp;
3164 	size_t dlen;
3165 	size_t dbuflen;
3166 	int ndirents = 64;
3167 	char *nm;
3168 
3169 	error = sdev_modctl_lookup(dir, &vp);
3170 	sdcmn_err11(("modctl readdir: %s by %s: %s\n",
3171 	    dir, curproc->p_user.u_comm,
3172 	    (error == 0) ? "ok" : "failed"));
3173 	if (error)
3174 		return (error);
3175 
3176 	dlen = ndirents * (sizeof (*dbuf));
3177 	dbuf = kmem_alloc(dlen, KM_SLEEP);
3178 
3179 	uio.uio_iov = &iov;
3180 	uio.uio_iovcnt = 1;
3181 	uio.uio_segflg = UIO_SYSSPACE;
3182 	uio.uio_fmode = 0;
3183 	uio.uio_extflg = UIO_COPY_CACHED;
3184 	uio.uio_loffset = 0;
3185 	uio.uio_llimit = MAXOFFSET_T;
3186 
3187 	eof = 0;
3188 	error = 0;
3189 	while (!error && !eof) {
3190 		uio.uio_resid = dlen;
3191 		iov.iov_base = (char *)dbuf;
3192 		iov.iov_len = dlen;
3193 
3194 		(void) VOP_RWLOCK(vp, V_WRITELOCK_FALSE, NULL);
3195 		error = VOP_READDIR(vp, &uio, kcred, &eof, NULL, 0);
3196 		VOP_RWUNLOCK(vp, V_WRITELOCK_FALSE, NULL);
3197 
3198 		dbuflen = dlen - uio.uio_resid;
3199 
3200 		if (error || dbuflen == 0)
3201 			break;
3202 
3203 		for (dp = dbuf; ((intptr_t)dp < (intptr_t)dbuf + dbuflen);
3204 		    dp = (dirent64_t *)((intptr_t)dp + dp->d_reclen)) {
3205 
3206 			nm = dp->d_name;
3207 
3208 			if (strcmp(nm, ".") == 0 || strcmp(nm, "..") == 0)
3209 				continue;
3210 			if (npaths == npaths_alloc) {
3211 				npaths_alloc += 64;
3212 				newlist = (char **)
3213 				    kmem_zalloc((npaths_alloc + 1) *
3214 				    sizeof (char *), KM_SLEEP);
3215 				if (pathlist) {
3216 					bcopy(pathlist, newlist,
3217 					    npaths * sizeof (char *));
3218 					kmem_free(pathlist,
3219 					    (npaths + 1) * sizeof (char *));
3220 				}
3221 				pathlist = newlist;
3222 			}
3223 			n = strlen(nm) + 1;
3224 			s = kmem_alloc(n, KM_SLEEP);
3225 			bcopy(nm, s, n);
3226 			pathlist[npaths++] = s;
3227 			sdcmn_err11(("  %s/%s\n", dir, s));
3228 
3229 			/* if checking empty, one entry is as good as many */
3230 			if (checking_empty) {
3231 				eof = 1;
3232 				break;
3233 			}
3234 		}
3235 	}
3236 
3237 exit:
3238 	VN_RELE(vp);
3239 
3240 	if (dbuf)
3241 		kmem_free(dbuf, dlen);
3242 
3243 	if (error)
3244 		return (error);
3245 
3246 	*dirlistp = pathlist;
3247 	*npathsp = npaths;
3248 	*npathsp_alloc = npaths_alloc;
3249 
3250 	return (0);
3251 }
3252 
3253 void
3254 sdev_modctl_readdir_free(char **pathlist, int npaths, int npaths_alloc)
3255 {
3256 	int	i, n;
3257 
3258 	for (i = 0; i < npaths; i++) {
3259 		n = strlen(pathlist[i]) + 1;
3260 		kmem_free(pathlist[i], n);
3261 	}
3262 
3263 	kmem_free(pathlist, (npaths_alloc + 1) * sizeof (char *));
3264 }
3265 
3266 int
3267 sdev_modctl_devexists(const char *path)
3268 {
3269 	vnode_t *vp;
3270 	int error;
3271 
3272 	error = sdev_modctl_lookup(path, &vp);
3273 	sdcmn_err11(("modctl dev exists: %s by %s: %s\n",
3274 	    path, curproc->p_user.u_comm,
3275 	    (error == 0) ? "ok" : "failed"));
3276 	if (error == 0)
3277 		VN_RELE(vp);
3278 
3279 	return (error);
3280 }
3281 
3282 void
3283 sdev_update_newnsmap(struct devname_nsmap *map, char *module, char *mapname)
3284 {
3285 	rw_enter(&map->dir_lock, RW_WRITER);
3286 	if (module) {
3287 		ASSERT(map->dir_newmodule == NULL);
3288 		map->dir_newmodule = i_ddi_strdup(module, KM_SLEEP);
3289 	}
3290 	if (mapname) {
3291 		ASSERT(map->dir_newmap == NULL);
3292 		map->dir_newmap = i_ddi_strdup(mapname, KM_SLEEP);
3293 	}
3294 
3295 	map->dir_invalid = 1;
3296 	rw_exit(&map->dir_lock);
3297 }
3298 
3299 void
3300 sdev_replace_nsmap(struct devname_nsmap *map, char *module, char *mapname)
3301 {
3302 	char *old_module = NULL;
3303 	char *old_map = NULL;
3304 
3305 	ASSERT(RW_LOCK_HELD(&map->dir_lock));
3306 	if (!rw_tryupgrade(&map->dir_lock)) {
3307 		rw_exit(&map->dir_lock);
3308 		rw_enter(&map->dir_lock, RW_WRITER);
3309 	}
3310 
3311 	old_module = map->dir_module;
3312 	if (module) {
3313 		if (old_module && strcmp(old_module, module) != 0) {
3314 			kmem_free(old_module, strlen(old_module) + 1);
3315 		}
3316 		map->dir_module = module;
3317 		map->dir_newmodule = NULL;
3318 	}
3319 
3320 	old_map = map->dir_map;
3321 	if (mapname) {
3322 		if (old_map && strcmp(old_map, mapname) != 0) {
3323 			kmem_free(old_map, strlen(old_map) + 1);
3324 		}
3325 
3326 		map->dir_map = mapname;
3327 		map->dir_newmap = NULL;
3328 	}
3329 	map->dir_maploaded = 0;
3330 	map->dir_invalid = 0;
3331 	rw_downgrade(&map->dir_lock);
3332 }
3333 
3334 /*
3335  * dir_name should have at least one attribute,
3336  *	dir_module
3337  *	or dir_map
3338  *	or both
3339  * caller holds the devname_nsmaps_lock
3340  */
3341 void
3342 sdev_insert_nsmap(char *dir_name, char *dir_module, char *dir_map)
3343 {
3344 	struct devname_nsmap *map;
3345 	int len = 0;
3346 
3347 	ASSERT(dir_name);
3348 	ASSERT(dir_module || dir_map);
3349 	ASSERT(MUTEX_HELD(&devname_nsmaps_lock));
3350 
3351 	if (map = sdev_get_nsmap_by_dir(dir_name, 1)) {
3352 		sdev_update_newnsmap(map, dir_module, dir_map);
3353 		return;
3354 	}
3355 
3356 	map = (struct devname_nsmap *)kmem_zalloc(sizeof (*map), KM_SLEEP);
3357 	map->dir_name = i_ddi_strdup(dir_name, KM_SLEEP);
3358 	if (dir_module) {
3359 		map->dir_module = i_ddi_strdup(dir_module, KM_SLEEP);
3360 	}
3361 
3362 	if (dir_map) {
3363 		if (dir_map[0] != '/') {
3364 			len = strlen(ETC_DEV_DIR) + strlen(dir_map) + 2;
3365 			map->dir_map = kmem_zalloc(len, KM_SLEEP);
3366 			(void) snprintf(map->dir_map, len, "%s/%s", ETC_DEV_DIR,
3367 			    dir_map);
3368 		} else {
3369 			map->dir_map = i_ddi_strdup(dir_map, KM_SLEEP);
3370 		}
3371 	}
3372 
3373 	map->dir_ops = NULL;
3374 	map->dir_maploaded = 0;
3375 	map->dir_invalid = 0;
3376 	rw_init(&map->dir_lock, NULL, RW_DEFAULT, NULL);
3377 
3378 	map->next = devname_nsmaps;
3379 	map->prev = NULL;
3380 	if (devname_nsmaps) {
3381 		devname_nsmaps->prev = map;
3382 	}
3383 	devname_nsmaps = map;
3384 }
3385 
3386 struct devname_nsmap *
3387 sdev_get_nsmap_by_dir(char *dir_path, int locked)
3388 {
3389 	struct devname_nsmap *map = NULL;
3390 
3391 	if (!locked)
3392 		mutex_enter(&devname_nsmaps_lock);
3393 	for (map = devname_nsmaps; map; map = map->next) {
3394 		sdcmn_err6(("sdev_get_nsmap_by_dir: dir %s\n", map->dir_name));
3395 		if (strcmp(map->dir_name, dir_path) == 0) {
3396 			if (!locked)
3397 				mutex_exit(&devname_nsmaps_lock);
3398 			return (map);
3399 		}
3400 	}
3401 	if (!locked)
3402 		mutex_exit(&devname_nsmaps_lock);
3403 	return (NULL);
3404 }
3405 
3406 struct devname_nsmap *
3407 sdev_get_nsmap_by_module(char *mod_name)
3408 {
3409 	struct devname_nsmap *map = NULL;
3410 
3411 	mutex_enter(&devname_nsmaps_lock);
3412 	for (map = devname_nsmaps; map; map = map->next) {
3413 		sdcmn_err7(("sdev_get_nsmap_by_module: module %s\n",
3414 		    map->dir_module));
3415 		if (map->dir_module && strcmp(map->dir_module, mod_name) == 0) {
3416 			mutex_exit(&devname_nsmaps_lock);
3417 			return (map);
3418 		}
3419 	}
3420 	mutex_exit(&devname_nsmaps_lock);
3421 	return (NULL);
3422 }
3423 
3424 void
3425 sdev_invalidate_nsmaps()
3426 {
3427 	struct devname_nsmap *map = NULL;
3428 
3429 	ASSERT(MUTEX_HELD(&devname_nsmaps_lock));
3430 
3431 	if (devname_nsmaps == NULL)
3432 		return;
3433 
3434 	for (map = devname_nsmaps; map; map = map->next) {
3435 		rw_enter(&map->dir_lock, RW_WRITER);
3436 		map->dir_invalid = 1;
3437 		rw_exit(&map->dir_lock);
3438 	}
3439 	devname_nsmaps_invalidated = 1;
3440 }
3441 
3442 
3443 int
3444 sdev_nsmaps_loaded()
3445 {
3446 	int ret = 0;
3447 
3448 	mutex_enter(&devname_nsmaps_lock);
3449 	if (devname_nsmaps_loaded)
3450 		ret = 1;
3451 
3452 	mutex_exit(&devname_nsmaps_lock);
3453 	return (ret);
3454 }
3455 
3456 int
3457 sdev_nsmaps_reloaded()
3458 {
3459 	int ret = 0;
3460 
3461 	mutex_enter(&devname_nsmaps_lock);
3462 	if (devname_nsmaps_invalidated)
3463 		ret = 1;
3464 
3465 	mutex_exit(&devname_nsmaps_lock);
3466 	return (ret);
3467 }
3468 
3469 static void
3470 sdev_free_nsmap(struct devname_nsmap *map)
3471 {
3472 	ASSERT(map);
3473 	if (map->dir_name)
3474 		kmem_free(map->dir_name, strlen(map->dir_name) + 1);
3475 	if (map->dir_module)
3476 		kmem_free(map->dir_module, strlen(map->dir_module) + 1);
3477 	if (map->dir_map)
3478 		kmem_free(map->dir_map, strlen(map->dir_map) + 1);
3479 	rw_destroy(&map->dir_lock);
3480 	kmem_free(map, sizeof (*map));
3481 }
3482 
3483 void
3484 sdev_validate_nsmaps()
3485 {
3486 	struct devname_nsmap *map = NULL;
3487 	struct devname_nsmap *oldmap = NULL;
3488 
3489 	ASSERT(MUTEX_HELD(&devname_nsmaps_lock));
3490 	map = devname_nsmaps;
3491 	while (map) {
3492 		rw_enter(&map->dir_lock, RW_READER);
3493 		if ((map->dir_invalid == 1) && (map->dir_newmodule == NULL) &&
3494 		    (map->dir_newmap == NULL)) {
3495 			oldmap = map;
3496 			rw_exit(&map->dir_lock);
3497 			if (map->prev)
3498 				map->prev->next = oldmap->next;
3499 			if (map == devname_nsmaps)
3500 				devname_nsmaps = oldmap->next;
3501 
3502 			map = oldmap->next;
3503 			if (map)
3504 				map->prev = oldmap->prev;
3505 			sdev_free_nsmap(oldmap);
3506 			oldmap = NULL;
3507 		} else {
3508 			rw_exit(&map->dir_lock);
3509 			map = map->next;
3510 		}
3511 	}
3512 	devname_nsmaps_invalidated = 0;
3513 }
3514 
3515 static int
3516 sdev_map_is_invalid(struct devname_nsmap *map)
3517 {
3518 	int ret = 0;
3519 
3520 	ASSERT(map);
3521 	rw_enter(&map->dir_lock, RW_READER);
3522 	if (map->dir_invalid)
3523 		ret = 1;
3524 	rw_exit(&map->dir_lock);
3525 	return (ret);
3526 }
3527 
3528 static int
3529 sdev_check_map(struct devname_nsmap *map)
3530 {
3531 	struct devname_nsmap *mapp;
3532 
3533 	mutex_enter(&devname_nsmaps_lock);
3534 	if (devname_nsmaps == NULL) {
3535 		mutex_exit(&devname_nsmaps_lock);
3536 		return (1);
3537 	}
3538 
3539 	for (mapp = devname_nsmaps; mapp; mapp = mapp->next) {
3540 		if (mapp == map) {
3541 			mutex_exit(&devname_nsmaps_lock);
3542 			return (0);
3543 		}
3544 	}
3545 
3546 	mutex_exit(&devname_nsmaps_lock);
3547 	return (1);
3548 
3549 }
3550 
3551 struct devname_nsmap *
3552 sdev_get_map(struct sdev_node *dv, int validate)
3553 {
3554 	struct devname_nsmap *map;
3555 	int error;
3556 
3557 	ASSERT(RW_READ_HELD(&dv->sdev_contents));
3558 	map = dv->sdev_mapinfo;
3559 	if (map && sdev_check_map(map)) {
3560 		if (!rw_tryupgrade(&dv->sdev_contents)) {
3561 			rw_exit(&dv->sdev_contents);
3562 			rw_enter(&dv->sdev_contents, RW_WRITER);
3563 		}
3564 		dv->sdev_mapinfo = NULL;
3565 		rw_downgrade(&dv->sdev_contents);
3566 		return (NULL);
3567 	}
3568 
3569 	if (validate && (!map || (map && sdev_map_is_invalid(map)))) {
3570 		if (!rw_tryupgrade(&dv->sdev_contents)) {
3571 			rw_exit(&dv->sdev_contents);
3572 			rw_enter(&dv->sdev_contents, RW_WRITER);
3573 		}
3574 		error = sdev_get_moduleops(dv);
3575 		if (!error)
3576 			map = dv->sdev_mapinfo;
3577 		rw_downgrade(&dv->sdev_contents);
3578 	}
3579 	return (map);
3580 }
3581 
3582 extern int sdev_vnodeops_tbl_size;
3583 
3584 /*
3585  * construct a new template with overrides from vtab
3586  */
3587 static fs_operation_def_t *
3588 sdev_merge_vtab(const fs_operation_def_t tab[])
3589 {
3590 	fs_operation_def_t *new;
3591 	const fs_operation_def_t *tab_entry;
3592 
3593 	/* make a copy of standard vnode ops table */
3594 	new = kmem_alloc(sdev_vnodeops_tbl_size, KM_SLEEP);
3595 	bcopy((void *)sdev_vnodeops_tbl, new, sdev_vnodeops_tbl_size);
3596 
3597 	/* replace the overrides from tab */
3598 	for (tab_entry = tab; tab_entry->name != NULL; tab_entry++) {
3599 		fs_operation_def_t *std_entry = new;
3600 		while (std_entry->name) {
3601 			if (strcmp(tab_entry->name, std_entry->name) == 0) {
3602 				std_entry->func = tab_entry->func;
3603 				break;
3604 			}
3605 			std_entry++;
3606 		}
3607 		if (std_entry->name == NULL)
3608 			cmn_err(CE_NOTE, "sdev_merge_vtab: entry %s unused.",
3609 			    tab_entry->name);
3610 	}
3611 
3612 	return (new);
3613 }
3614 
3615 /* free memory allocated by sdev_merge_vtab */
3616 static void
3617 sdev_free_vtab(fs_operation_def_t *new)
3618 {
3619 	kmem_free(new, sdev_vnodeops_tbl_size);
3620 }
3621 
3622 void
3623 devname_get_vnode(devname_handle_t *hdl, vnode_t **vpp)
3624 {
3625 	struct sdev_node *dv = hdl->dh_data;
3626 
3627 	ASSERT(dv);
3628 
3629 	rw_enter(&dv->sdev_contents, RW_READER);
3630 	*vpp = SDEVTOV(dv);
3631 	rw_exit(&dv->sdev_contents);
3632 }
3633 
3634 int
3635 devname_get_path(devname_handle_t *hdl, char **path)
3636 {
3637 	struct sdev_node *dv = hdl->dh_data;
3638 
3639 	ASSERT(dv);
3640 
3641 	rw_enter(&dv->sdev_contents, RW_READER);
3642 	*path = dv->sdev_path;
3643 	rw_exit(&dv->sdev_contents);
3644 	return (0);
3645 }
3646 
3647 int
3648 devname_get_name(devname_handle_t *hdl, char **entry)
3649 {
3650 	struct sdev_node *dv = hdl->dh_data;
3651 
3652 	ASSERT(dv);
3653 	rw_enter(&dv->sdev_contents, RW_READER);
3654 	*entry = dv->sdev_name;
3655 	rw_exit(&dv->sdev_contents);
3656 	return (0);
3657 }
3658 
3659 void
3660 devname_get_dir_vnode(devname_handle_t *hdl, vnode_t **vpp)
3661 {
3662 	struct sdev_node *dv = hdl->dh_data->sdev_dotdot;
3663 
3664 	ASSERT(dv);
3665 
3666 	rw_enter(&dv->sdev_contents, RW_READER);
3667 	*vpp = SDEVTOV(dv);
3668 	rw_exit(&dv->sdev_contents);
3669 }
3670 
3671 int
3672 devname_get_dir_path(devname_handle_t *hdl, char **path)
3673 {
3674 	struct sdev_node *dv = hdl->dh_data->sdev_dotdot;
3675 
3676 	ASSERT(dv);
3677 	rw_enter(&dv->sdev_contents, RW_READER);
3678 	*path = dv->sdev_path;
3679 	rw_exit(&dv->sdev_contents);
3680 	return (0);
3681 }
3682 
3683 int
3684 devname_get_dir_name(devname_handle_t *hdl, char **entry)
3685 {
3686 	struct sdev_node *dv = hdl->dh_data->sdev_dotdot;
3687 
3688 	ASSERT(dv);
3689 	rw_enter(&dv->sdev_contents, RW_READER);
3690 	*entry = dv->sdev_name;
3691 	rw_exit(&dv->sdev_contents);
3692 	return (0);
3693 }
3694 
3695 int
3696 devname_get_dir_nsmap(devname_handle_t *hdl, struct devname_nsmap **map)
3697 {
3698 	struct sdev_node *dv = hdl->dh_data->sdev_dotdot;
3699 
3700 	ASSERT(dv);
3701 	rw_enter(&dv->sdev_contents, RW_READER);
3702 	*map = dv->sdev_mapinfo;
3703 	rw_exit(&dv->sdev_contents);
3704 	return (0);
3705 }
3706 
3707 int
3708 devname_get_dir_handle(devname_handle_t *hdl, devname_handle_t **dir_hdl)
3709 {
3710 	struct sdev_node *dv = hdl->dh_data->sdev_dotdot;
3711 
3712 	ASSERT(dv);
3713 	rw_enter(&dv->sdev_contents, RW_READER);
3714 	*dir_hdl = &(dv->sdev_handle);
3715 	rw_exit(&dv->sdev_contents);
3716 	return (0);
3717 }
3718 
3719 void
3720 devname_set_nodetype(devname_handle_t *hdl, void *args, int spec)
3721 {
3722 	struct sdev_node *dv = hdl->dh_data;
3723 
3724 	ASSERT(dv);
3725 	rw_enter(&dv->sdev_contents, RW_WRITER);
3726 	hdl->dh_spec = (devname_spec_t)spec;
3727 	hdl->dh_args = (void *)i_ddi_strdup((char *)args, KM_SLEEP);
3728 	rw_exit(&dv->sdev_contents);
3729 }
3730 
3731 /*
3732  * a generic setattr() function
3733  *
3734  * note: flags only supports AT_UID and AT_GID.
3735  *	 Future enhancements can be done for other types, e.g. AT_MODE
3736  */
3737 int
3738 devname_setattr_func(struct vnode *vp, struct vattr *vap, int flags,
3739     struct cred *cred, int (*callback)(struct sdev_node *, struct vattr *,
3740     int), int protocol)
3741 {
3742 	struct sdev_node	*dv = VTOSDEV(vp);
3743 	struct sdev_node	*parent = dv->sdev_dotdot;
3744 	struct vattr		*get;
3745 	uint_t			mask = vap->va_mask;
3746 	int 			error;
3747 
3748 	/* some sanity checks */
3749 	if (vap->va_mask & AT_NOSET)
3750 		return (EINVAL);
3751 
3752 	if (vap->va_mask & AT_SIZE) {
3753 		if (vp->v_type == VDIR) {
3754 			return (EISDIR);
3755 		}
3756 	}
3757 
3758 	/* no need to set attribute, but do not fail either */
3759 	ASSERT(parent);
3760 	rw_enter(&parent->sdev_contents, RW_READER);
3761 	if (dv->sdev_state == SDEV_ZOMBIE) {
3762 		rw_exit(&parent->sdev_contents);
3763 		return (0);
3764 	}
3765 
3766 	/* If backing store exists, just set it. */
3767 	if (dv->sdev_attrvp) {
3768 		rw_exit(&parent->sdev_contents);
3769 		return (VOP_SETATTR(dv->sdev_attrvp, vap, flags, cred, NULL));
3770 	}
3771 
3772 	/*
3773 	 * Otherwise, for nodes with the persistence attribute, create it.
3774 	 */
3775 	ASSERT(dv->sdev_attr);
3776 	if (SDEV_IS_PERSIST(dv) ||
3777 	    ((vap->va_mask & ~AT_TIMES) != 0 && !SDEV_IS_DYNAMIC(dv))) {
3778 		sdev_vattr_merge(dv, vap);
3779 		rw_enter(&dv->sdev_contents, RW_WRITER);
3780 		error = sdev_shadow_node(dv, cred);
3781 		rw_exit(&dv->sdev_contents);
3782 		rw_exit(&parent->sdev_contents);
3783 
3784 		if (error)
3785 			return (error);
3786 		return (VOP_SETATTR(dv->sdev_attrvp, vap, flags, cred, NULL));
3787 	}
3788 
3789 
3790 	/*
3791 	 * sdev_attr was allocated in sdev_mknode
3792 	 */
3793 	rw_enter(&dv->sdev_contents, RW_WRITER);
3794 	error = secpolicy_vnode_setattr(cred, vp, vap,
3795 	    dv->sdev_attr, flags, sdev_unlocked_access, dv);
3796 	if (error) {
3797 		rw_exit(&dv->sdev_contents);
3798 		rw_exit(&parent->sdev_contents);
3799 		return (error);
3800 	}
3801 
3802 	get = dv->sdev_attr;
3803 	if (mask & AT_MODE) {
3804 		get->va_mode &= S_IFMT;
3805 		get->va_mode |= vap->va_mode & ~S_IFMT;
3806 	}
3807 
3808 	if ((mask & AT_UID) || (mask & AT_GID)) {
3809 		if (mask & AT_UID)
3810 			get->va_uid = vap->va_uid;
3811 		if (mask & AT_GID)
3812 			get->va_gid = vap->va_gid;
3813 		/*
3814 		 * a callback must be provided if the protocol is set
3815 		 */
3816 		if ((protocol & AT_UID) || (protocol & AT_GID)) {
3817 			ASSERT(callback);
3818 			error = callback(dv, get, protocol);
3819 			if (error) {
3820 				rw_exit(&dv->sdev_contents);
3821 				rw_exit(&parent->sdev_contents);
3822 				return (error);
3823 			}
3824 		}
3825 	}
3826 
3827 	if (mask & AT_ATIME)
3828 		get->va_atime = vap->va_atime;
3829 	if (mask & AT_MTIME)
3830 		get->va_mtime = vap->va_mtime;
3831 	if (mask & (AT_MODE | AT_UID | AT_GID | AT_CTIME)) {
3832 		gethrestime(&get->va_ctime);
3833 	}
3834 
3835 	sdev_vattr_merge(dv, get);
3836 	rw_exit(&dv->sdev_contents);
3837 	rw_exit(&parent->sdev_contents);
3838 	return (0);
3839 }
3840 
3841 /*
3842  * a generic inactive() function
3843  */
3844 void
3845 devname_inactive_func(struct vnode *vp, struct cred *cred,
3846     void (*callback)(struct vnode *))
3847 {
3848 	int clean;
3849 	struct sdev_node *dv = VTOSDEV(vp);
3850 	struct sdev_node *ddv = dv->sdev_dotdot;
3851 	int state;
3852 	struct devname_nsmap *map = NULL;
3853 	struct devname_ops *dirops = NULL;
3854 	void (*fn)(devname_handle_t *, struct cred *) = NULL;
3855 
3856 	rw_enter(&ddv->sdev_contents, RW_WRITER);
3857 	state = dv->sdev_state;
3858 
3859 	mutex_enter(&vp->v_lock);
3860 	ASSERT(vp->v_count >= 1);
3861 
3862 	if (vp->v_count == 1 && callback != NULL)
3863 		callback(vp);
3864 
3865 	clean = (vp->v_count == 1) && (state == SDEV_ZOMBIE);
3866 
3867 	/*
3868 	 * last ref count on the ZOMBIE node is released.
3869 	 * clean up the sdev_node, and
3870 	 * release the hold on the backing store node so that
3871 	 * the ZOMBIE backing stores also cleaned out.
3872 	 */
3873 	if (clean) {
3874 		ASSERT(ddv);
3875 		if (SDEV_IS_GLOBAL(dv)) {
3876 			map = ddv->sdev_mapinfo;
3877 			dirops = map ? map->dir_ops : NULL;
3878 			if (dirops && (fn = dirops->devnops_inactive))
3879 				(*fn)(&(dv->sdev_handle), cred);
3880 		}
3881 
3882 		ddv->sdev_nlink--;
3883 		if (vp->v_type == VDIR) {
3884 			dv->sdev_nlink--;
3885 		}
3886 		if ((dv->sdev_flags & SDEV_STALE) == 0)
3887 			avl_remove(&ddv->sdev_entries, dv);
3888 		dv->sdev_nlink--;
3889 		--vp->v_count;
3890 		mutex_exit(&vp->v_lock);
3891 		sdev_nodedestroy(dv, 0);
3892 	} else {
3893 		--vp->v_count;
3894 		mutex_exit(&vp->v_lock);
3895 	}
3896 	rw_exit(&ddv->sdev_contents);
3897 }
3898