17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5c2907092Svv149972 * Common Development and Distribution License (the "License").
6c2907092Svv149972 * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate /*
22349dcea3SGarrett D'Amore * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
25*16a4a807SGeorge Wilson /*
26*16a4a807SGeorge Wilson * Copyright (c) 2012 by Delphix. All rights reserved.
27*16a4a807SGeorge Wilson */
287c478bd9Sstevel@tonic-gate
297c478bd9Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
307c478bd9Sstevel@tonic-gate /* All Rights Reserved */
317c478bd9Sstevel@tonic-gate
327c478bd9Sstevel@tonic-gate /*
337c478bd9Sstevel@tonic-gate * University Copyright- Copyright (c) 1982, 1986, 1988
347c478bd9Sstevel@tonic-gate * The Regents of the University of California
357c478bd9Sstevel@tonic-gate * All Rights Reserved
367c478bd9Sstevel@tonic-gate *
377c478bd9Sstevel@tonic-gate * University Acknowledgment- Portions of this document are derived from
387c478bd9Sstevel@tonic-gate * software developed by the University of California, Berkeley, and its
397c478bd9Sstevel@tonic-gate * contributors.
407c478bd9Sstevel@tonic-gate */
417c478bd9Sstevel@tonic-gate
427c478bd9Sstevel@tonic-gate
437c478bd9Sstevel@tonic-gate #include <sys/types.h>
447c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
457c478bd9Sstevel@tonic-gate #include <sys/param.h>
467c478bd9Sstevel@tonic-gate #include <sys/systm.h>
477c478bd9Sstevel@tonic-gate #include <sys/buf.h>
487c478bd9Sstevel@tonic-gate #include <sys/conf.h>
497c478bd9Sstevel@tonic-gate #include <sys/cred.h>
507c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
517c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
527c478bd9Sstevel@tonic-gate #include <sys/vfs.h>
53aa59c4cbSrsb #include <sys/vfs_opreg.h>
547c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
557c478bd9Sstevel@tonic-gate #include <sys/fs/snode.h>
567c478bd9Sstevel@tonic-gate #include <sys/fs/fifonode.h>
577c478bd9Sstevel@tonic-gate #include <sys/debug.h>
587c478bd9Sstevel@tonic-gate #include <sys/errno.h>
597c478bd9Sstevel@tonic-gate #include <sys/time.h>
607c478bd9Sstevel@tonic-gate #include <sys/file.h>
617c478bd9Sstevel@tonic-gate #include <sys/open.h>
627c478bd9Sstevel@tonic-gate #include <sys/user.h>
637c478bd9Sstevel@tonic-gate #include <sys/termios.h>
647c478bd9Sstevel@tonic-gate #include <sys/stream.h>
657c478bd9Sstevel@tonic-gate #include <sys/strsubr.h>
667c478bd9Sstevel@tonic-gate #include <sys/autoconf.h>
677c478bd9Sstevel@tonic-gate #include <sys/esunddi.h>
687c478bd9Sstevel@tonic-gate #include <sys/flock.h>
697c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
707c478bd9Sstevel@tonic-gate
717c478bd9Sstevel@tonic-gate struct vfs spec_vfs;
727c478bd9Sstevel@tonic-gate static dev_t specdev;
737c478bd9Sstevel@tonic-gate struct kmem_cache *snode_cache;
7425e8c5aaSvikram int spec_debug = 0;
757c478bd9Sstevel@tonic-gate
767c478bd9Sstevel@tonic-gate static struct snode *sfind(dev_t, vtype_t, struct vnode *);
777c478bd9Sstevel@tonic-gate static struct vnode *get_cvp(dev_t, vtype_t, struct snode *, int *);
787c478bd9Sstevel@tonic-gate static void sinsert(struct snode *);
797c478bd9Sstevel@tonic-gate
807c478bd9Sstevel@tonic-gate struct vnode *
specvp_devfs(struct vnode * realvp,dev_t dev,vtype_t vtyp,struct cred * cr,dev_info_t * dip)817c478bd9Sstevel@tonic-gate specvp_devfs(
827c478bd9Sstevel@tonic-gate struct vnode *realvp,
837c478bd9Sstevel@tonic-gate dev_t dev,
847c478bd9Sstevel@tonic-gate vtype_t vtyp,
857c478bd9Sstevel@tonic-gate struct cred *cr,
867c478bd9Sstevel@tonic-gate dev_info_t *dip)
877c478bd9Sstevel@tonic-gate {
887c478bd9Sstevel@tonic-gate struct vnode *vp;
897c478bd9Sstevel@tonic-gate
907c478bd9Sstevel@tonic-gate ASSERT(realvp && dip);
917c478bd9Sstevel@tonic-gate vp = specvp(realvp, dev, vtyp, cr);
927c478bd9Sstevel@tonic-gate ASSERT(vp);
937c478bd9Sstevel@tonic-gate
947c478bd9Sstevel@tonic-gate /* associate a dip hold with the common snode's s_dip pointer */
957c478bd9Sstevel@tonic-gate spec_assoc_vp_with_devi(vp, dip);
967c478bd9Sstevel@tonic-gate return (vp);
977c478bd9Sstevel@tonic-gate }
987c478bd9Sstevel@tonic-gate
997c478bd9Sstevel@tonic-gate /*
1007c478bd9Sstevel@tonic-gate * Return a shadow special vnode for the given dev.
1017c478bd9Sstevel@tonic-gate * If no snode exists for this dev create one and put it
1027c478bd9Sstevel@tonic-gate * in a table hashed by <dev, realvp>. If the snode for
1037c478bd9Sstevel@tonic-gate * this dev is already in the table return it (ref count is
1047c478bd9Sstevel@tonic-gate * incremented by sfind). The snode will be flushed from the
1057c478bd9Sstevel@tonic-gate * table when spec_inactive calls sdelete.
1067c478bd9Sstevel@tonic-gate *
1077c478bd9Sstevel@tonic-gate * The fsid is inherited from the real vnode so that clones
1087c478bd9Sstevel@tonic-gate * can be found.
1097c478bd9Sstevel@tonic-gate *
1107c478bd9Sstevel@tonic-gate */
1117c478bd9Sstevel@tonic-gate struct vnode *
specvp(struct vnode * vp,dev_t dev,vtype_t type,struct cred * cr)1127c478bd9Sstevel@tonic-gate specvp(
1137c478bd9Sstevel@tonic-gate struct vnode *vp,
1147c478bd9Sstevel@tonic-gate dev_t dev,
1157c478bd9Sstevel@tonic-gate vtype_t type,
1167c478bd9Sstevel@tonic-gate struct cred *cr)
1177c478bd9Sstevel@tonic-gate {
1187c478bd9Sstevel@tonic-gate struct snode *sp;
1197c478bd9Sstevel@tonic-gate struct snode *nsp;
1207c478bd9Sstevel@tonic-gate struct snode *csp;
1217c478bd9Sstevel@tonic-gate struct vnode *svp;
1227c478bd9Sstevel@tonic-gate struct vattr va;
1237c478bd9Sstevel@tonic-gate int rc;
1247c478bd9Sstevel@tonic-gate int used_csp = 0; /* Did we use pre-allocated csp */
1257c478bd9Sstevel@tonic-gate
1267c478bd9Sstevel@tonic-gate if (vp == NULL)
1277c478bd9Sstevel@tonic-gate return (NULL);
1287c478bd9Sstevel@tonic-gate if (vp->v_type == VFIFO)
1297c478bd9Sstevel@tonic-gate return (fifovp(vp, cr));
1307c478bd9Sstevel@tonic-gate
1317c478bd9Sstevel@tonic-gate ASSERT(vp->v_type == type);
1327c478bd9Sstevel@tonic-gate ASSERT(vp->v_rdev == dev);
1337c478bd9Sstevel@tonic-gate
1347c478bd9Sstevel@tonic-gate /*
1357c478bd9Sstevel@tonic-gate * Pre-allocate snodes before holding any locks in case we block
1367c478bd9Sstevel@tonic-gate */
1377c478bd9Sstevel@tonic-gate nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
1387c478bd9Sstevel@tonic-gate csp = kmem_cache_alloc(snode_cache, KM_SLEEP);
1397c478bd9Sstevel@tonic-gate
1407c478bd9Sstevel@tonic-gate /*
1417c478bd9Sstevel@tonic-gate * Get the time attributes outside of the stable lock since
1427c478bd9Sstevel@tonic-gate * this operation may block. Unfortunately, it may not have
1437c478bd9Sstevel@tonic-gate * been required if the snode is in the cache.
1447c478bd9Sstevel@tonic-gate */
1457c478bd9Sstevel@tonic-gate va.va_mask = AT_FSID | AT_TIMES;
146da6c28aaSamw rc = VOP_GETATTR(vp, &va, 0, cr, NULL); /* XXX may block! */
1477c478bd9Sstevel@tonic-gate
1487c478bd9Sstevel@tonic-gate mutex_enter(&stable_lock);
1497c478bd9Sstevel@tonic-gate if ((sp = sfind(dev, type, vp)) == NULL) {
1507c478bd9Sstevel@tonic-gate struct vnode *cvp;
1517c478bd9Sstevel@tonic-gate
1527c478bd9Sstevel@tonic-gate sp = nsp; /* Use pre-allocated snode */
1537c478bd9Sstevel@tonic-gate svp = STOV(sp);
1547c478bd9Sstevel@tonic-gate
1557c478bd9Sstevel@tonic-gate sp->s_realvp = vp;
1567c478bd9Sstevel@tonic-gate VN_HOLD(vp);
1577c478bd9Sstevel@tonic-gate sp->s_commonvp = NULL;
1587c478bd9Sstevel@tonic-gate sp->s_dev = dev;
1597c478bd9Sstevel@tonic-gate sp->s_dip = NULL;
1607c478bd9Sstevel@tonic-gate sp->s_nextr = NULL;
1617c478bd9Sstevel@tonic-gate sp->s_list = NULL;
1627c478bd9Sstevel@tonic-gate sp->s_plcy = NULL;
1637c478bd9Sstevel@tonic-gate sp->s_size = 0;
1647c478bd9Sstevel@tonic-gate sp->s_flag = 0;
1657c478bd9Sstevel@tonic-gate if (rc == 0) {
1667c478bd9Sstevel@tonic-gate /*
1677c478bd9Sstevel@tonic-gate * Set times in snode to those in the vnode.
1687c478bd9Sstevel@tonic-gate */
1697c478bd9Sstevel@tonic-gate sp->s_fsid = va.va_fsid;
1707c478bd9Sstevel@tonic-gate sp->s_atime = va.va_atime.tv_sec;
1717c478bd9Sstevel@tonic-gate sp->s_mtime = va.va_mtime.tv_sec;
1727c478bd9Sstevel@tonic-gate sp->s_ctime = va.va_ctime.tv_sec;
1737c478bd9Sstevel@tonic-gate } else {
1747c478bd9Sstevel@tonic-gate sp->s_fsid = specdev;
1757c478bd9Sstevel@tonic-gate sp->s_atime = 0;
1767c478bd9Sstevel@tonic-gate sp->s_mtime = 0;
1777c478bd9Sstevel@tonic-gate sp->s_ctime = 0;
1787c478bd9Sstevel@tonic-gate }
1797c478bd9Sstevel@tonic-gate sp->s_count = 0;
1807c478bd9Sstevel@tonic-gate sp->s_mapcnt = 0;
1817c478bd9Sstevel@tonic-gate
1827c478bd9Sstevel@tonic-gate vn_reinit(svp);
1837c478bd9Sstevel@tonic-gate svp->v_flag = (vp->v_flag & VROOT);
1847c478bd9Sstevel@tonic-gate svp->v_vfsp = vp->v_vfsp;
1857c478bd9Sstevel@tonic-gate VFS_HOLD(svp->v_vfsp);
1867c478bd9Sstevel@tonic-gate svp->v_type = type;
1877c478bd9Sstevel@tonic-gate svp->v_rdev = dev;
1887c478bd9Sstevel@tonic-gate (void) vn_copypath(vp, svp);
1897c478bd9Sstevel@tonic-gate if (type == VBLK || type == VCHR) {
1907c478bd9Sstevel@tonic-gate cvp = get_cvp(dev, type, csp, &used_csp);
1917c478bd9Sstevel@tonic-gate svp->v_stream = cvp->v_stream;
1927c478bd9Sstevel@tonic-gate
1937c478bd9Sstevel@tonic-gate sp->s_commonvp = cvp;
1947c478bd9Sstevel@tonic-gate }
1957c478bd9Sstevel@tonic-gate vn_exists(svp);
1967c478bd9Sstevel@tonic-gate sinsert(sp);
1977c478bd9Sstevel@tonic-gate mutex_exit(&stable_lock);
1987c478bd9Sstevel@tonic-gate if (used_csp == 0) {
1997c478bd9Sstevel@tonic-gate /* Didn't use pre-allocated snode so free it */
2007c478bd9Sstevel@tonic-gate kmem_cache_free(snode_cache, csp);
2017c478bd9Sstevel@tonic-gate }
2027c478bd9Sstevel@tonic-gate } else {
2037c478bd9Sstevel@tonic-gate mutex_exit(&stable_lock);
2047c478bd9Sstevel@tonic-gate /* free unused snode memory */
2057c478bd9Sstevel@tonic-gate kmem_cache_free(snode_cache, nsp);
2067c478bd9Sstevel@tonic-gate kmem_cache_free(snode_cache, csp);
2077c478bd9Sstevel@tonic-gate }
2087c478bd9Sstevel@tonic-gate return (STOV(sp));
2097c478bd9Sstevel@tonic-gate }
2107c478bd9Sstevel@tonic-gate
2117c478bd9Sstevel@tonic-gate /*
2127c478bd9Sstevel@tonic-gate * Return a special vnode for the given dev; no vnode is supplied
2137c478bd9Sstevel@tonic-gate * for it to shadow. Always create a new snode and put it in the
2147c478bd9Sstevel@tonic-gate * table hashed by <dev, NULL>. The snode will be flushed from the
2157c478bd9Sstevel@tonic-gate * table when spec_inactive() calls sdelete(). The association of
2167c478bd9Sstevel@tonic-gate * this node with a attached instance of hardware is not made until
2177c478bd9Sstevel@tonic-gate * spec_open time.
2187c478bd9Sstevel@tonic-gate *
2197c478bd9Sstevel@tonic-gate * N.B. Assumes caller takes on responsibility of making sure no one
2207c478bd9Sstevel@tonic-gate * else is creating a snode for (dev, type) at this time.
2217c478bd9Sstevel@tonic-gate */
2227c478bd9Sstevel@tonic-gate struct vnode *
makespecvp(dev_t dev,vtype_t type)2237c478bd9Sstevel@tonic-gate makespecvp(dev_t dev, vtype_t type)
2247c478bd9Sstevel@tonic-gate {
2257c478bd9Sstevel@tonic-gate struct snode *sp;
2267c478bd9Sstevel@tonic-gate struct vnode *svp, *cvp;
2277c478bd9Sstevel@tonic-gate time_t now;
2287c478bd9Sstevel@tonic-gate
2297c478bd9Sstevel@tonic-gate sp = kmem_cache_alloc(snode_cache, KM_SLEEP);
2307c478bd9Sstevel@tonic-gate svp = STOV(sp);
2317c478bd9Sstevel@tonic-gate cvp = commonvp(dev, type);
2327c478bd9Sstevel@tonic-gate now = gethrestime_sec();
2337c478bd9Sstevel@tonic-gate
2347c478bd9Sstevel@tonic-gate sp->s_realvp = NULL;
2357c478bd9Sstevel@tonic-gate sp->s_commonvp = cvp;
2367c478bd9Sstevel@tonic-gate sp->s_dev = dev;
2377c478bd9Sstevel@tonic-gate sp->s_dip = NULL;
2387c478bd9Sstevel@tonic-gate sp->s_nextr = NULL;
2397c478bd9Sstevel@tonic-gate sp->s_list = NULL;
2407c478bd9Sstevel@tonic-gate sp->s_plcy = NULL;
2417c478bd9Sstevel@tonic-gate sp->s_size = 0;
2427c478bd9Sstevel@tonic-gate sp->s_flag = 0;
2437c478bd9Sstevel@tonic-gate sp->s_fsid = specdev;
2447c478bd9Sstevel@tonic-gate sp->s_atime = now;
2457c478bd9Sstevel@tonic-gate sp->s_mtime = now;
2467c478bd9Sstevel@tonic-gate sp->s_ctime = now;
2477c478bd9Sstevel@tonic-gate sp->s_count = 0;
2487c478bd9Sstevel@tonic-gate sp->s_mapcnt = 0;
2497c478bd9Sstevel@tonic-gate
2507c478bd9Sstevel@tonic-gate vn_reinit(svp);
2517c478bd9Sstevel@tonic-gate svp->v_vfsp = &spec_vfs;
2527c478bd9Sstevel@tonic-gate svp->v_stream = cvp->v_stream;
2537c478bd9Sstevel@tonic-gate svp->v_type = type;
2547c478bd9Sstevel@tonic-gate svp->v_rdev = dev;
2557c478bd9Sstevel@tonic-gate
2567c478bd9Sstevel@tonic-gate vn_exists(svp);
2577c478bd9Sstevel@tonic-gate mutex_enter(&stable_lock);
2587c478bd9Sstevel@tonic-gate sinsert(sp);
2597c478bd9Sstevel@tonic-gate mutex_exit(&stable_lock);
2607c478bd9Sstevel@tonic-gate
2617c478bd9Sstevel@tonic-gate return (svp);
2627c478bd9Sstevel@tonic-gate }
2637c478bd9Sstevel@tonic-gate
26425e8c5aaSvikram
26525e8c5aaSvikram /*
26625e8c5aaSvikram * This function is called from spec_assoc_vp_with_devi(). That function
26725e8c5aaSvikram * associates a "new" dip with a common snode, releasing (any) old dip
26825e8c5aaSvikram * in the process. This function (spec_assoc_fence()) looks at the "new dip"
26925e8c5aaSvikram * and determines whether the snode should be fenced of or not. As the table
27025e8c5aaSvikram * below indicates, the value of old-dip is a don't care for all cases.
27125e8c5aaSvikram *
27225e8c5aaSvikram * old-dip new-dip common-snode
27325e8c5aaSvikram * =========================================
27425e8c5aaSvikram * Don't care NULL unfence
27525e8c5aaSvikram * Don't care retired fence
27625e8c5aaSvikram * Don't care not-retired unfence
27725e8c5aaSvikram *
27825e8c5aaSvikram * Since old-dip value is a "don't care", it is not passed into this function.
27925e8c5aaSvikram */
28025e8c5aaSvikram static void
spec_assoc_fence(dev_info_t * ndip,vnode_t * vp)28125e8c5aaSvikram spec_assoc_fence(dev_info_t *ndip, vnode_t *vp)
28225e8c5aaSvikram {
28325e8c5aaSvikram int fence;
28425e8c5aaSvikram struct snode *csp;
28525e8c5aaSvikram
28625e8c5aaSvikram ASSERT(vp);
28725e8c5aaSvikram ASSERT(vn_matchops(vp, spec_getvnodeops()));
28825e8c5aaSvikram
28925e8c5aaSvikram fence = 0;
29025e8c5aaSvikram if (ndip != NULL) {
29125e8c5aaSvikram mutex_enter(&DEVI(ndip)->devi_lock);
29225e8c5aaSvikram if (DEVI(ndip)->devi_flags & DEVI_RETIRED)
29325e8c5aaSvikram fence = 1;
29425e8c5aaSvikram mutex_exit(&DEVI(ndip)->devi_lock);
29525e8c5aaSvikram }
29625e8c5aaSvikram
29725e8c5aaSvikram csp = VTOCS(vp);
29825e8c5aaSvikram ASSERT(csp);
29925e8c5aaSvikram
30025e8c5aaSvikram /* SFENCED flag only set on common snode */
30125e8c5aaSvikram mutex_enter(&csp->s_lock);
30225e8c5aaSvikram if (fence)
30325e8c5aaSvikram csp->s_flag |= SFENCED;
30425e8c5aaSvikram else
30525e8c5aaSvikram csp->s_flag &= ~SFENCED;
30625e8c5aaSvikram mutex_exit(&csp->s_lock);
30725e8c5aaSvikram
30825e8c5aaSvikram FENDBG((CE_NOTE, "%sfenced common snode (%p) for new dip=%p",
30925e8c5aaSvikram fence ? "" : "un", (void *)csp, (void *)ndip));
31025e8c5aaSvikram }
31125e8c5aaSvikram
3127c478bd9Sstevel@tonic-gate /*
3137c478bd9Sstevel@tonic-gate * Associate the common snode with a devinfo node. This is called from:
3147c478bd9Sstevel@tonic-gate *
3157c478bd9Sstevel@tonic-gate * 1) specvp_devfs to associate a specfs node with the dip attached
3167c478bd9Sstevel@tonic-gate * by devfs.
3177c478bd9Sstevel@tonic-gate *
3187c478bd9Sstevel@tonic-gate * 2) spec_open after path reconstruction and attach.
3197c478bd9Sstevel@tonic-gate *
3207c478bd9Sstevel@tonic-gate * 3) From dacf processing to associate a makespecvp node with
3217c478bd9Sstevel@tonic-gate * the dip that dacf postattach processing is being performed on.
3227c478bd9Sstevel@tonic-gate * This association is made prior to open to avoid recursion issues.
3237c478bd9Sstevel@tonic-gate *
3247c478bd9Sstevel@tonic-gate * 4) From ddi_assoc_queue_with_devi to change vnode association as part of
3257c478bd9Sstevel@tonic-gate * DL_ATTACH/DL_DETACH processing (SDIPSET already set). The call
3267c478bd9Sstevel@tonic-gate * from ddi_assoc_queue_with_devi may specify a NULL dip.
3277c478bd9Sstevel@tonic-gate *
3287c478bd9Sstevel@tonic-gate * We put an extra hold on the devinfo node passed in as we establish it as
3297c478bd9Sstevel@tonic-gate * the new s_dip pointer. Any hold associated with the prior s_dip pointer
3307c478bd9Sstevel@tonic-gate * is released. The new hold will stay active until another call to
3317c478bd9Sstevel@tonic-gate * spec_assoc_vp_with_devi or until the common snode is destroyed by
3327c478bd9Sstevel@tonic-gate * spec_inactive after the last VN_RELE of the common node. This devinfo hold
3337c478bd9Sstevel@tonic-gate * transfers across a clone open except in the clone_dev case, where the clone
3347c478bd9Sstevel@tonic-gate * driver is no longer required after open.
3357c478bd9Sstevel@tonic-gate *
3367c478bd9Sstevel@tonic-gate * When SDIPSET is set and s_dip is NULL, the vnode has an association with
3377c478bd9Sstevel@tonic-gate * the driver even though there is currently no association with a specific
3387c478bd9Sstevel@tonic-gate * hardware instance.
3397c478bd9Sstevel@tonic-gate */
3407c478bd9Sstevel@tonic-gate void
spec_assoc_vp_with_devi(struct vnode * vp,dev_info_t * dip)3417c478bd9Sstevel@tonic-gate spec_assoc_vp_with_devi(struct vnode *vp, dev_info_t *dip)
3427c478bd9Sstevel@tonic-gate {
3437c478bd9Sstevel@tonic-gate struct snode *csp;
3447c478bd9Sstevel@tonic-gate dev_info_t *olddip;
3457c478bd9Sstevel@tonic-gate
3467c478bd9Sstevel@tonic-gate ASSERT(vp);
3477c478bd9Sstevel@tonic-gate
3487c478bd9Sstevel@tonic-gate /*
3497c478bd9Sstevel@tonic-gate * Don't establish a NULL association for a vnode associated with the
3507c478bd9Sstevel@tonic-gate * clone driver. The qassociate(, -1) call from a streams driver's
3517c478bd9Sstevel@tonic-gate * open implementation to indicate support for qassociate has the
3527c478bd9Sstevel@tonic-gate * side-effect of this type of spec_assoc_vp_with_devi call. This
3537c478bd9Sstevel@tonic-gate * call should not change the the association of the pre-clone
3547c478bd9Sstevel@tonic-gate * vnode associated with the clone driver, the post-clone newdev
3557c478bd9Sstevel@tonic-gate * association will be established later by spec_clone().
3567c478bd9Sstevel@tonic-gate */
3577c478bd9Sstevel@tonic-gate if ((dip == NULL) && (getmajor(vp->v_rdev) == clone_major))
3587c478bd9Sstevel@tonic-gate return;
3597c478bd9Sstevel@tonic-gate
3607c478bd9Sstevel@tonic-gate /* hold the new */
3617c478bd9Sstevel@tonic-gate if (dip)
3627c478bd9Sstevel@tonic-gate e_ddi_hold_devi(dip);
3637c478bd9Sstevel@tonic-gate
3647c478bd9Sstevel@tonic-gate csp = VTOS(VTOS(vp)->s_commonvp);
3657c478bd9Sstevel@tonic-gate mutex_enter(&csp->s_lock);
3667c478bd9Sstevel@tonic-gate olddip = csp->s_dip;
3677c478bd9Sstevel@tonic-gate csp->s_dip = dip;
3687c478bd9Sstevel@tonic-gate csp->s_flag |= SDIPSET;
3697c478bd9Sstevel@tonic-gate
3707c478bd9Sstevel@tonic-gate /* If association changes then invalidate cached size */
3717c478bd9Sstevel@tonic-gate if (olddip != dip)
3727c478bd9Sstevel@tonic-gate csp->s_flag &= ~SSIZEVALID;
3737c478bd9Sstevel@tonic-gate mutex_exit(&csp->s_lock);
3747c478bd9Sstevel@tonic-gate
37525e8c5aaSvikram spec_assoc_fence(dip, vp);
37625e8c5aaSvikram
3777c478bd9Sstevel@tonic-gate /* release the old */
3787c478bd9Sstevel@tonic-gate if (olddip)
3797c478bd9Sstevel@tonic-gate ddi_release_devi(olddip);
3807c478bd9Sstevel@tonic-gate }
3817c478bd9Sstevel@tonic-gate
3827c478bd9Sstevel@tonic-gate /*
3837c478bd9Sstevel@tonic-gate * Return the held dip associated with the specified snode.
3847c478bd9Sstevel@tonic-gate */
3857c478bd9Sstevel@tonic-gate dev_info_t *
spec_hold_devi_by_vp(struct vnode * vp)3867c478bd9Sstevel@tonic-gate spec_hold_devi_by_vp(struct vnode *vp)
3877c478bd9Sstevel@tonic-gate {
3887c478bd9Sstevel@tonic-gate struct snode *csp;
3897c478bd9Sstevel@tonic-gate dev_info_t *dip;
3907c478bd9Sstevel@tonic-gate
3917c478bd9Sstevel@tonic-gate ASSERT(vn_matchops(vp, spec_getvnodeops()));
3927c478bd9Sstevel@tonic-gate
3937c478bd9Sstevel@tonic-gate csp = VTOS(VTOS(vp)->s_commonvp);
3947c478bd9Sstevel@tonic-gate dip = csp->s_dip;
3957c478bd9Sstevel@tonic-gate if (dip)
3967c478bd9Sstevel@tonic-gate e_ddi_hold_devi(dip);
3977c478bd9Sstevel@tonic-gate return (dip);
3987c478bd9Sstevel@tonic-gate }
3997c478bd9Sstevel@tonic-gate
4007c478bd9Sstevel@tonic-gate /*
4017c478bd9Sstevel@tonic-gate * Find a special vnode that refers to the given device
4027c478bd9Sstevel@tonic-gate * of the given type. Never return a "common" vnode.
4037c478bd9Sstevel@tonic-gate * Return NULL if a special vnode does not exist.
4047c478bd9Sstevel@tonic-gate * HOLD the vnode before returning it.
4057c478bd9Sstevel@tonic-gate */
4067c478bd9Sstevel@tonic-gate struct vnode *
specfind(dev_t dev,vtype_t type)4077c478bd9Sstevel@tonic-gate specfind(dev_t dev, vtype_t type)
4087c478bd9Sstevel@tonic-gate {
4097c478bd9Sstevel@tonic-gate struct snode *st;
4107c478bd9Sstevel@tonic-gate struct vnode *nvp;
4117c478bd9Sstevel@tonic-gate
4127c478bd9Sstevel@tonic-gate mutex_enter(&stable_lock);
4137c478bd9Sstevel@tonic-gate st = stable[STABLEHASH(dev)];
4147c478bd9Sstevel@tonic-gate while (st != NULL) {
4157c478bd9Sstevel@tonic-gate if (st->s_dev == dev) {
4167c478bd9Sstevel@tonic-gate nvp = STOV(st);
4177c478bd9Sstevel@tonic-gate if (nvp->v_type == type && st->s_commonvp != nvp) {
4187c478bd9Sstevel@tonic-gate VN_HOLD(nvp);
4197c478bd9Sstevel@tonic-gate mutex_exit(&stable_lock);
4207c478bd9Sstevel@tonic-gate return (nvp);
4217c478bd9Sstevel@tonic-gate }
4227c478bd9Sstevel@tonic-gate }
4237c478bd9Sstevel@tonic-gate st = st->s_next;
4247c478bd9Sstevel@tonic-gate }
4257c478bd9Sstevel@tonic-gate mutex_exit(&stable_lock);
4267c478bd9Sstevel@tonic-gate return (NULL);
4277c478bd9Sstevel@tonic-gate }
4287c478bd9Sstevel@tonic-gate
4297c478bd9Sstevel@tonic-gate /*
4307c478bd9Sstevel@tonic-gate * Loop through the snode cache looking for snodes referencing dip.
4317c478bd9Sstevel@tonic-gate *
4327c478bd9Sstevel@tonic-gate * This function determines if a devinfo node is "BUSY" from the perspective
4337c478bd9Sstevel@tonic-gate * of having an active vnode associated with the device, which represents a
4347c478bd9Sstevel@tonic-gate * dependency on the device's services. This function is needed because a
4357c478bd9Sstevel@tonic-gate * devinfo node can have a non-zero devi_ref and still NOT be "BUSY" when,
4367c478bd9Sstevel@tonic-gate * for instance, the framework is manipulating the node (has an open
4377c478bd9Sstevel@tonic-gate * ndi_hold_devi).
4387c478bd9Sstevel@tonic-gate *
4397c478bd9Sstevel@tonic-gate * Returns:
4407c478bd9Sstevel@tonic-gate * DEVI_REFERENCED - if dip is referenced
4417c478bd9Sstevel@tonic-gate * DEVI_NOT_REFERENCED - if dip is not referenced
4427c478bd9Sstevel@tonic-gate */
4437c478bd9Sstevel@tonic-gate int
devi_stillreferenced(dev_info_t * dip)4447c478bd9Sstevel@tonic-gate devi_stillreferenced(dev_info_t *dip)
4457c478bd9Sstevel@tonic-gate {
4467c478bd9Sstevel@tonic-gate struct snode *sp;
4477c478bd9Sstevel@tonic-gate int i;
4487c478bd9Sstevel@tonic-gate
4497c478bd9Sstevel@tonic-gate /* if no hold then there can't be an snode with s_dip == dip */
4507c478bd9Sstevel@tonic-gate if (e_ddi_devi_holdcnt(dip) == 0)
4517c478bd9Sstevel@tonic-gate return (DEVI_NOT_REFERENCED);
4527c478bd9Sstevel@tonic-gate
4537c478bd9Sstevel@tonic-gate mutex_enter(&stable_lock);
4547c478bd9Sstevel@tonic-gate for (i = 0; i < STABLESIZE; i++) {
4557c478bd9Sstevel@tonic-gate for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
4567c478bd9Sstevel@tonic-gate if (sp->s_dip == dip) {
4577c478bd9Sstevel@tonic-gate mutex_exit(&stable_lock);
4587c478bd9Sstevel@tonic-gate return (DEVI_REFERENCED);
4597c478bd9Sstevel@tonic-gate }
4607c478bd9Sstevel@tonic-gate }
4617c478bd9Sstevel@tonic-gate }
4627c478bd9Sstevel@tonic-gate mutex_exit(&stable_lock);
4637c478bd9Sstevel@tonic-gate return (DEVI_NOT_REFERENCED);
4647c478bd9Sstevel@tonic-gate }
4657c478bd9Sstevel@tonic-gate
4667c478bd9Sstevel@tonic-gate /*
4677c478bd9Sstevel@tonic-gate * Given an snode, returns the open count and the dip
4687c478bd9Sstevel@tonic-gate * associated with that snode
469da6c28aaSamw * Assumes the caller holds the appropriate locks
4707c478bd9Sstevel@tonic-gate * to prevent snode and/or dip from going away.
4717c478bd9Sstevel@tonic-gate * Returns:
4727c478bd9Sstevel@tonic-gate * -1 No associated dip
4737c478bd9Sstevel@tonic-gate * >= 0 Number of opens.
4747c478bd9Sstevel@tonic-gate */
4757c478bd9Sstevel@tonic-gate int
spec_devi_open_count(struct snode * sp,dev_info_t ** dipp)4767c478bd9Sstevel@tonic-gate spec_devi_open_count(struct snode *sp, dev_info_t **dipp)
4777c478bd9Sstevel@tonic-gate {
4787c478bd9Sstevel@tonic-gate dev_info_t *dip;
4797c478bd9Sstevel@tonic-gate uint_t count;
4807c478bd9Sstevel@tonic-gate struct vnode *vp;
4817c478bd9Sstevel@tonic-gate
4827c478bd9Sstevel@tonic-gate ASSERT(sp);
4837c478bd9Sstevel@tonic-gate ASSERT(dipp);
4847c478bd9Sstevel@tonic-gate
4857c478bd9Sstevel@tonic-gate vp = STOV(sp);
4867c478bd9Sstevel@tonic-gate
4877c478bd9Sstevel@tonic-gate *dipp = NULL;
4887c478bd9Sstevel@tonic-gate
4897c478bd9Sstevel@tonic-gate /*
4907c478bd9Sstevel@tonic-gate * We are only interested in common snodes. Only common snodes
4917c478bd9Sstevel@tonic-gate * get their s_count fields bumped up on opens.
4927c478bd9Sstevel@tonic-gate */
4937c478bd9Sstevel@tonic-gate if (sp->s_commonvp != vp || (dip = sp->s_dip) == NULL)
4947c478bd9Sstevel@tonic-gate return (-1);
4957c478bd9Sstevel@tonic-gate
4967c478bd9Sstevel@tonic-gate mutex_enter(&sp->s_lock);
4977c478bd9Sstevel@tonic-gate count = sp->s_count + sp->s_mapcnt;
4987c478bd9Sstevel@tonic-gate if (sp->s_flag & SLOCKED)
4997c478bd9Sstevel@tonic-gate count++;
5007c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock);
5017c478bd9Sstevel@tonic-gate
5027c478bd9Sstevel@tonic-gate *dipp = dip;
5037c478bd9Sstevel@tonic-gate
5047c478bd9Sstevel@tonic-gate return (count);
5057c478bd9Sstevel@tonic-gate }
5067c478bd9Sstevel@tonic-gate
5077c478bd9Sstevel@tonic-gate /*
5087c478bd9Sstevel@tonic-gate * Given a device vnode, return the common
5097c478bd9Sstevel@tonic-gate * vnode associated with it.
5107c478bd9Sstevel@tonic-gate */
5117c478bd9Sstevel@tonic-gate struct vnode *
common_specvp(struct vnode * vp)5127c478bd9Sstevel@tonic-gate common_specvp(struct vnode *vp)
5137c478bd9Sstevel@tonic-gate {
5147c478bd9Sstevel@tonic-gate struct snode *sp;
5157c478bd9Sstevel@tonic-gate
5167c478bd9Sstevel@tonic-gate if ((vp->v_type != VBLK) && (vp->v_type != VCHR) ||
5177c478bd9Sstevel@tonic-gate !vn_matchops(vp, spec_getvnodeops()))
5187c478bd9Sstevel@tonic-gate return (vp);
5197c478bd9Sstevel@tonic-gate sp = VTOS(vp);
5207c478bd9Sstevel@tonic-gate return (sp->s_commonvp);
5217c478bd9Sstevel@tonic-gate }
5227c478bd9Sstevel@tonic-gate
5237c478bd9Sstevel@tonic-gate /*
5247c478bd9Sstevel@tonic-gate * Returns a special vnode for the given dev. The vnode is the
5257c478bd9Sstevel@tonic-gate * one which is "common" to all the snodes which represent the
5267c478bd9Sstevel@tonic-gate * same device.
5277c478bd9Sstevel@tonic-gate * Similar to commonvp() but doesn't acquire the stable_lock, and
5287c478bd9Sstevel@tonic-gate * may use a pre-allocated snode provided by caller.
5297c478bd9Sstevel@tonic-gate */
5307c478bd9Sstevel@tonic-gate static struct vnode *
get_cvp(dev_t dev,vtype_t type,struct snode * nsp,int * used_nsp)5317c478bd9Sstevel@tonic-gate get_cvp(
5327c478bd9Sstevel@tonic-gate dev_t dev,
5337c478bd9Sstevel@tonic-gate vtype_t type,
5347c478bd9Sstevel@tonic-gate struct snode *nsp, /* pre-allocated snode */
5357c478bd9Sstevel@tonic-gate int *used_nsp) /* flag indicating if we use nsp */
5367c478bd9Sstevel@tonic-gate {
5377c478bd9Sstevel@tonic-gate struct snode *sp;
5387c478bd9Sstevel@tonic-gate struct vnode *svp;
5397c478bd9Sstevel@tonic-gate
5407c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&stable_lock));
5417c478bd9Sstevel@tonic-gate if ((sp = sfind(dev, type, NULL)) == NULL) {
5427c478bd9Sstevel@tonic-gate sp = nsp; /* Use pre-allocated snode */
5437c478bd9Sstevel@tonic-gate *used_nsp = 1; /* return value */
5447c478bd9Sstevel@tonic-gate svp = STOV(sp);
5457c478bd9Sstevel@tonic-gate
5467c478bd9Sstevel@tonic-gate sp->s_realvp = NULL;
5477c478bd9Sstevel@tonic-gate sp->s_commonvp = svp; /* points to itself */
5487c478bd9Sstevel@tonic-gate sp->s_dev = dev;
5497c478bd9Sstevel@tonic-gate sp->s_dip = NULL;
5507c478bd9Sstevel@tonic-gate sp->s_nextr = NULL;
5517c478bd9Sstevel@tonic-gate sp->s_list = NULL;
5527c478bd9Sstevel@tonic-gate sp->s_plcy = NULL;
5537c478bd9Sstevel@tonic-gate sp->s_size = UNKNOWN_SIZE;
5547c478bd9Sstevel@tonic-gate sp->s_flag = 0;
5557c478bd9Sstevel@tonic-gate sp->s_fsid = specdev;
5567c478bd9Sstevel@tonic-gate sp->s_atime = 0;
5577c478bd9Sstevel@tonic-gate sp->s_mtime = 0;
5587c478bd9Sstevel@tonic-gate sp->s_ctime = 0;
5597c478bd9Sstevel@tonic-gate sp->s_count = 0;
5607c478bd9Sstevel@tonic-gate sp->s_mapcnt = 0;
5617c478bd9Sstevel@tonic-gate
5627c478bd9Sstevel@tonic-gate vn_reinit(svp);
5637c478bd9Sstevel@tonic-gate svp->v_vfsp = &spec_vfs;
5647c478bd9Sstevel@tonic-gate svp->v_type = type;
5657c478bd9Sstevel@tonic-gate svp->v_rdev = dev;
5667c478bd9Sstevel@tonic-gate vn_exists(svp);
5677c478bd9Sstevel@tonic-gate sinsert(sp);
5687c478bd9Sstevel@tonic-gate } else
5697c478bd9Sstevel@tonic-gate *used_nsp = 0;
5707c478bd9Sstevel@tonic-gate return (STOV(sp));
5717c478bd9Sstevel@tonic-gate }
5727c478bd9Sstevel@tonic-gate
5737c478bd9Sstevel@tonic-gate /*
5747c478bd9Sstevel@tonic-gate * Returns a special vnode for the given dev. The vnode is the
5757c478bd9Sstevel@tonic-gate * one which is "common" to all the snodes which represent the
5767c478bd9Sstevel@tonic-gate * same device. For use ONLY by SPECFS.
5777c478bd9Sstevel@tonic-gate */
5787c478bd9Sstevel@tonic-gate struct vnode *
commonvp(dev_t dev,vtype_t type)5797c478bd9Sstevel@tonic-gate commonvp(dev_t dev, vtype_t type)
5807c478bd9Sstevel@tonic-gate {
5817c478bd9Sstevel@tonic-gate struct snode *sp, *nsp;
5827c478bd9Sstevel@tonic-gate struct vnode *svp;
5837c478bd9Sstevel@tonic-gate
5847c478bd9Sstevel@tonic-gate /* Pre-allocate snode in case we might block */
5857c478bd9Sstevel@tonic-gate nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
5867c478bd9Sstevel@tonic-gate
5877c478bd9Sstevel@tonic-gate mutex_enter(&stable_lock);
5887c478bd9Sstevel@tonic-gate if ((sp = sfind(dev, type, NULL)) == NULL) {
5897c478bd9Sstevel@tonic-gate sp = nsp; /* Use pre-alloced snode */
5907c478bd9Sstevel@tonic-gate svp = STOV(sp);
5917c478bd9Sstevel@tonic-gate
5927c478bd9Sstevel@tonic-gate sp->s_realvp = NULL;
5937c478bd9Sstevel@tonic-gate sp->s_commonvp = svp; /* points to itself */
5947c478bd9Sstevel@tonic-gate sp->s_dev = dev;
5957c478bd9Sstevel@tonic-gate sp->s_dip = NULL;
5967c478bd9Sstevel@tonic-gate sp->s_nextr = NULL;
5977c478bd9Sstevel@tonic-gate sp->s_list = NULL;
5987c478bd9Sstevel@tonic-gate sp->s_plcy = NULL;
5997c478bd9Sstevel@tonic-gate sp->s_size = UNKNOWN_SIZE;
6007c478bd9Sstevel@tonic-gate sp->s_flag = 0;
6017c478bd9Sstevel@tonic-gate sp->s_fsid = specdev;
6027c478bd9Sstevel@tonic-gate sp->s_atime = 0;
6037c478bd9Sstevel@tonic-gate sp->s_mtime = 0;
6047c478bd9Sstevel@tonic-gate sp->s_ctime = 0;
6057c478bd9Sstevel@tonic-gate sp->s_count = 0;
6067c478bd9Sstevel@tonic-gate sp->s_mapcnt = 0;
6077c478bd9Sstevel@tonic-gate
6087c478bd9Sstevel@tonic-gate vn_reinit(svp);
6097c478bd9Sstevel@tonic-gate svp->v_vfsp = &spec_vfs;
6107c478bd9Sstevel@tonic-gate svp->v_type = type;
6117c478bd9Sstevel@tonic-gate svp->v_rdev = dev;
6127c478bd9Sstevel@tonic-gate vn_exists(svp);
6137c478bd9Sstevel@tonic-gate sinsert(sp);
6147c478bd9Sstevel@tonic-gate mutex_exit(&stable_lock);
6157c478bd9Sstevel@tonic-gate } else {
6167c478bd9Sstevel@tonic-gate mutex_exit(&stable_lock);
6177c478bd9Sstevel@tonic-gate /* Didn't need the pre-allocated snode */
6187c478bd9Sstevel@tonic-gate kmem_cache_free(snode_cache, nsp);
6197c478bd9Sstevel@tonic-gate }
6207c478bd9Sstevel@tonic-gate return (STOV(sp));
6217c478bd9Sstevel@tonic-gate }
6227c478bd9Sstevel@tonic-gate
6237c478bd9Sstevel@tonic-gate /*
6247c478bd9Sstevel@tonic-gate * Snode lookup stuff.
6257c478bd9Sstevel@tonic-gate * These routines maintain a table of snodes hashed by dev so
6267c478bd9Sstevel@tonic-gate * that the snode for an dev can be found if it already exists.
6277c478bd9Sstevel@tonic-gate */
6287c478bd9Sstevel@tonic-gate struct snode *stable[STABLESIZE];
6297c478bd9Sstevel@tonic-gate int stablesz = STABLESIZE;
6307c478bd9Sstevel@tonic-gate kmutex_t stable_lock;
6317c478bd9Sstevel@tonic-gate
6327c478bd9Sstevel@tonic-gate /*
6337c478bd9Sstevel@tonic-gate * Put a snode in the table.
6347c478bd9Sstevel@tonic-gate */
6357c478bd9Sstevel@tonic-gate static void
sinsert(struct snode * sp)6367c478bd9Sstevel@tonic-gate sinsert(struct snode *sp)
6377c478bd9Sstevel@tonic-gate {
6387c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&stable_lock));
6397c478bd9Sstevel@tonic-gate sp->s_next = stable[STABLEHASH(sp->s_dev)];
6407c478bd9Sstevel@tonic-gate stable[STABLEHASH(sp->s_dev)] = sp;
6417c478bd9Sstevel@tonic-gate }
6427c478bd9Sstevel@tonic-gate
6437c478bd9Sstevel@tonic-gate /*
6447c478bd9Sstevel@tonic-gate * Remove an snode from the hash table.
6457c478bd9Sstevel@tonic-gate * The realvp is not released here because spec_inactive() still
6467c478bd9Sstevel@tonic-gate * needs it to do a spec_fsync().
6477c478bd9Sstevel@tonic-gate */
6487c478bd9Sstevel@tonic-gate void
sdelete(struct snode * sp)6497c478bd9Sstevel@tonic-gate sdelete(struct snode *sp)
6507c478bd9Sstevel@tonic-gate {
6517c478bd9Sstevel@tonic-gate struct snode *st;
6527c478bd9Sstevel@tonic-gate struct snode *stprev = NULL;
6537c478bd9Sstevel@tonic-gate
6547c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&stable_lock));
6557c478bd9Sstevel@tonic-gate st = stable[STABLEHASH(sp->s_dev)];
6567c478bd9Sstevel@tonic-gate while (st != NULL) {
6577c478bd9Sstevel@tonic-gate if (st == sp) {
6587c478bd9Sstevel@tonic-gate if (stprev == NULL)
6597c478bd9Sstevel@tonic-gate stable[STABLEHASH(sp->s_dev)] = st->s_next;
6607c478bd9Sstevel@tonic-gate else
6617c478bd9Sstevel@tonic-gate stprev->s_next = st->s_next;
6627c478bd9Sstevel@tonic-gate break;
6637c478bd9Sstevel@tonic-gate }
6647c478bd9Sstevel@tonic-gate stprev = st;
6657c478bd9Sstevel@tonic-gate st = st->s_next;
6667c478bd9Sstevel@tonic-gate }
6677c478bd9Sstevel@tonic-gate }
6687c478bd9Sstevel@tonic-gate
6697c478bd9Sstevel@tonic-gate /*
6707c478bd9Sstevel@tonic-gate * Lookup an snode by <dev, type, vp>.
6717c478bd9Sstevel@tonic-gate * ONLY looks for snodes with non-NULL s_realvp members and
6727c478bd9Sstevel@tonic-gate * common snodes (with s_commonvp pointing to its vnode).
6737c478bd9Sstevel@tonic-gate *
6747c478bd9Sstevel@tonic-gate * If vp is NULL, only return commonvp. Otherwise return
6757c478bd9Sstevel@tonic-gate * shadow vp with both shadow and common vp's VN_HELD.
6767c478bd9Sstevel@tonic-gate */
6777c478bd9Sstevel@tonic-gate static struct snode *
sfind(dev_t dev,vtype_t type,struct vnode * vp)6787c478bd9Sstevel@tonic-gate sfind(
6797c478bd9Sstevel@tonic-gate dev_t dev,
6807c478bd9Sstevel@tonic-gate vtype_t type,
6817c478bd9Sstevel@tonic-gate struct vnode *vp)
6827c478bd9Sstevel@tonic-gate {
6837c478bd9Sstevel@tonic-gate struct snode *st;
6847c478bd9Sstevel@tonic-gate struct vnode *svp;
6857c478bd9Sstevel@tonic-gate
6867c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&stable_lock));
6877c478bd9Sstevel@tonic-gate st = stable[STABLEHASH(dev)];
6887c478bd9Sstevel@tonic-gate while (st != NULL) {
6897c478bd9Sstevel@tonic-gate svp = STOV(st);
6907c478bd9Sstevel@tonic-gate if (st->s_dev == dev && svp->v_type == type &&
6917c478bd9Sstevel@tonic-gate VN_CMP(st->s_realvp, vp) &&
692c2907092Svv149972 (vp != NULL || st->s_commonvp == svp) &&
693c2907092Svv149972 (vp == NULL || st->s_realvp->v_vfsp == vp->v_vfsp)) {
6947c478bd9Sstevel@tonic-gate VN_HOLD(svp);
6957c478bd9Sstevel@tonic-gate return (st);
6967c478bd9Sstevel@tonic-gate }
6977c478bd9Sstevel@tonic-gate st = st->s_next;
6987c478bd9Sstevel@tonic-gate }
6997c478bd9Sstevel@tonic-gate return (NULL);
7007c478bd9Sstevel@tonic-gate }
7017c478bd9Sstevel@tonic-gate
7027c478bd9Sstevel@tonic-gate /*
7037c478bd9Sstevel@tonic-gate * Mark the accessed, updated, or changed times in an snode
7047c478bd9Sstevel@tonic-gate * with the current time.
7057c478bd9Sstevel@tonic-gate */
7067c478bd9Sstevel@tonic-gate void
smark(struct snode * sp,int flag)7077c478bd9Sstevel@tonic-gate smark(struct snode *sp, int flag)
7087c478bd9Sstevel@tonic-gate {
7097c478bd9Sstevel@tonic-gate time_t now = gethrestime_sec();
7107c478bd9Sstevel@tonic-gate
7117c478bd9Sstevel@tonic-gate /* check for change to avoid unnecessary locking */
7127c478bd9Sstevel@tonic-gate ASSERT((flag & ~(SACC|SUPD|SCHG)) == 0);
7137c478bd9Sstevel@tonic-gate if (((flag & sp->s_flag) != flag) ||
7147c478bd9Sstevel@tonic-gate ((flag & SACC) && (sp->s_atime != now)) ||
7157c478bd9Sstevel@tonic-gate ((flag & SUPD) && (sp->s_mtime != now)) ||
7167c478bd9Sstevel@tonic-gate ((flag & SCHG) && (sp->s_ctime != now))) {
7177c478bd9Sstevel@tonic-gate /* lock and update */
7187c478bd9Sstevel@tonic-gate mutex_enter(&sp->s_lock);
7197c478bd9Sstevel@tonic-gate sp->s_flag |= flag;
7207c478bd9Sstevel@tonic-gate if (flag & SACC)
7217c478bd9Sstevel@tonic-gate sp->s_atime = now;
7227c478bd9Sstevel@tonic-gate if (flag & SUPD)
7237c478bd9Sstevel@tonic-gate sp->s_mtime = now;
7247c478bd9Sstevel@tonic-gate if (flag & SCHG)
7257c478bd9Sstevel@tonic-gate sp->s_ctime = now;
7267c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock);
7277c478bd9Sstevel@tonic-gate }
7287c478bd9Sstevel@tonic-gate }
7297c478bd9Sstevel@tonic-gate
7307c478bd9Sstevel@tonic-gate /*
7317c478bd9Sstevel@tonic-gate * Return the maximum file offset permitted for this device.
7327c478bd9Sstevel@tonic-gate * -1 means unrestricted. SLOFFSET is associated with D_64BIT.
7337c478bd9Sstevel@tonic-gate *
7347c478bd9Sstevel@tonic-gate * On a 32-bit kernel this will limit:
7357c478bd9Sstevel@tonic-gate * o D_64BIT devices to SPEC_MAXOFFSET_T.
7367c478bd9Sstevel@tonic-gate * o non-D_64BIT character drivers to a 32-bit offset (MAXOFF_T).
7377c478bd9Sstevel@tonic-gate */
7387c478bd9Sstevel@tonic-gate offset_t
spec_maxoffset(struct vnode * vp)7397c478bd9Sstevel@tonic-gate spec_maxoffset(struct vnode *vp)
7407c478bd9Sstevel@tonic-gate {
7417c478bd9Sstevel@tonic-gate struct snode *sp = VTOS(vp);
7427c478bd9Sstevel@tonic-gate struct snode *csp = VTOS(sp->s_commonvp);
7437c478bd9Sstevel@tonic-gate
744349dcea3SGarrett D'Amore if (vp->v_stream)
7457c478bd9Sstevel@tonic-gate return ((offset_t)-1);
7467c478bd9Sstevel@tonic-gate else if (csp->s_flag & SANYOFFSET) /* D_U64BIT */
7477c478bd9Sstevel@tonic-gate return ((offset_t)-1);
7487c478bd9Sstevel@tonic-gate #ifdef _ILP32
7497c478bd9Sstevel@tonic-gate if (csp->s_flag & SLOFFSET) /* D_64BIT */
7507c478bd9Sstevel@tonic-gate return (SPEC_MAXOFFSET_T);
7517c478bd9Sstevel@tonic-gate #endif /* _ILP32 */
7527c478bd9Sstevel@tonic-gate return (MAXOFF_T);
7537c478bd9Sstevel@tonic-gate }
7547c478bd9Sstevel@tonic-gate
7557c478bd9Sstevel@tonic-gate /*ARGSUSED*/
7567c478bd9Sstevel@tonic-gate static int
snode_constructor(void * buf,void * cdrarg,int kmflags)7577c478bd9Sstevel@tonic-gate snode_constructor(void *buf, void *cdrarg, int kmflags)
7587c478bd9Sstevel@tonic-gate {
7597c478bd9Sstevel@tonic-gate struct snode *sp = buf;
7607c478bd9Sstevel@tonic-gate struct vnode *vp;
7617c478bd9Sstevel@tonic-gate
762b5fca8f8Stomee vp = sp->s_vnode = vn_alloc(kmflags);
763b5fca8f8Stomee if (vp == NULL) {
764b5fca8f8Stomee return (-1);
765b5fca8f8Stomee }
7667c478bd9Sstevel@tonic-gate vn_setops(vp, spec_getvnodeops());
767b5fca8f8Stomee vp->v_data = sp;
7687c478bd9Sstevel@tonic-gate
7697c478bd9Sstevel@tonic-gate mutex_init(&sp->s_lock, NULL, MUTEX_DEFAULT, NULL);
7707c478bd9Sstevel@tonic-gate cv_init(&sp->s_cv, NULL, CV_DEFAULT, NULL);
7717c478bd9Sstevel@tonic-gate return (0);
7727c478bd9Sstevel@tonic-gate }
7737c478bd9Sstevel@tonic-gate
7747c478bd9Sstevel@tonic-gate /*ARGSUSED1*/
7757c478bd9Sstevel@tonic-gate static void
snode_destructor(void * buf,void * cdrarg)7767c478bd9Sstevel@tonic-gate snode_destructor(void *buf, void *cdrarg)
7777c478bd9Sstevel@tonic-gate {
7787c478bd9Sstevel@tonic-gate struct snode *sp = buf;
7797c478bd9Sstevel@tonic-gate struct vnode *vp = STOV(sp);
7807c478bd9Sstevel@tonic-gate
7817c478bd9Sstevel@tonic-gate mutex_destroy(&sp->s_lock);
7827c478bd9Sstevel@tonic-gate cv_destroy(&sp->s_cv);
7837c478bd9Sstevel@tonic-gate
7847c478bd9Sstevel@tonic-gate vn_free(vp);
7857c478bd9Sstevel@tonic-gate }
7867c478bd9Sstevel@tonic-gate
7877c478bd9Sstevel@tonic-gate
7887c478bd9Sstevel@tonic-gate int
specinit(int fstype,char * name)7897c478bd9Sstevel@tonic-gate specinit(int fstype, char *name)
7907c478bd9Sstevel@tonic-gate {
7917c478bd9Sstevel@tonic-gate static const fs_operation_def_t spec_vfsops_template[] = {
792aa59c4cbSrsb VFSNAME_SYNC, { .vfs_sync = spec_sync },
7937c478bd9Sstevel@tonic-gate NULL, NULL
7947c478bd9Sstevel@tonic-gate };
7957c478bd9Sstevel@tonic-gate extern struct vnodeops *spec_vnodeops;
7967c478bd9Sstevel@tonic-gate extern const fs_operation_def_t spec_vnodeops_template[];
7977c478bd9Sstevel@tonic-gate struct vfsops *spec_vfsops;
7987c478bd9Sstevel@tonic-gate int error;
7997c478bd9Sstevel@tonic-gate dev_t dev;
8007c478bd9Sstevel@tonic-gate
8017c478bd9Sstevel@tonic-gate /*
8027c478bd9Sstevel@tonic-gate * Associate vfs and vnode operations.
8037c478bd9Sstevel@tonic-gate */
8047c478bd9Sstevel@tonic-gate error = vfs_setfsops(fstype, spec_vfsops_template, &spec_vfsops);
8057c478bd9Sstevel@tonic-gate if (error != 0) {
8067c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "specinit: bad vfs ops template");
8077c478bd9Sstevel@tonic-gate return (error);
8087c478bd9Sstevel@tonic-gate }
8097c478bd9Sstevel@tonic-gate
8107c478bd9Sstevel@tonic-gate error = vn_make_ops(name, spec_vnodeops_template, &spec_vnodeops);
8117c478bd9Sstevel@tonic-gate if (error != 0) {
8127c478bd9Sstevel@tonic-gate (void) vfs_freevfsops_by_type(fstype);
8137c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "specinit: bad vnode ops template");
8147c478bd9Sstevel@tonic-gate return (error);
8157c478bd9Sstevel@tonic-gate }
8167c478bd9Sstevel@tonic-gate
8177c478bd9Sstevel@tonic-gate mutex_init(&stable_lock, NULL, MUTEX_DEFAULT, NULL);
8187c478bd9Sstevel@tonic-gate mutex_init(&spec_syncbusy, NULL, MUTEX_DEFAULT, NULL);
8197c478bd9Sstevel@tonic-gate
8207c478bd9Sstevel@tonic-gate /*
8217c478bd9Sstevel@tonic-gate * Create snode cache
8227c478bd9Sstevel@tonic-gate */
8237c478bd9Sstevel@tonic-gate snode_cache = kmem_cache_create("snode_cache", sizeof (struct snode),
8247c478bd9Sstevel@tonic-gate 0, snode_constructor, snode_destructor, NULL, NULL, NULL, 0);
8257c478bd9Sstevel@tonic-gate
8267c478bd9Sstevel@tonic-gate /*
8277c478bd9Sstevel@tonic-gate * Associate vfs operations with spec_vfs
8287c478bd9Sstevel@tonic-gate */
8297c478bd9Sstevel@tonic-gate VFS_INIT(&spec_vfs, spec_vfsops, (caddr_t)NULL);
8307c478bd9Sstevel@tonic-gate if ((dev = getudev()) == -1)
8317c478bd9Sstevel@tonic-gate dev = 0;
8327c478bd9Sstevel@tonic-gate specdev = makedevice(dev, 0);
8337c478bd9Sstevel@tonic-gate return (0);
8347c478bd9Sstevel@tonic-gate }
8357c478bd9Sstevel@tonic-gate
8367c478bd9Sstevel@tonic-gate int
device_close(struct vnode * vp,int flag,struct cred * cr)8377c478bd9Sstevel@tonic-gate device_close(struct vnode *vp, int flag, struct cred *cr)
8387c478bd9Sstevel@tonic-gate {
8397c478bd9Sstevel@tonic-gate struct snode *sp = VTOS(vp);
8407c478bd9Sstevel@tonic-gate enum vtype type = vp->v_type;
8417c478bd9Sstevel@tonic-gate struct vnode *cvp;
8427c478bd9Sstevel@tonic-gate dev_t dev;
8437c478bd9Sstevel@tonic-gate int error;
8447c478bd9Sstevel@tonic-gate
8457c478bd9Sstevel@tonic-gate dev = sp->s_dev;
8467c478bd9Sstevel@tonic-gate cvp = sp->s_commonvp;
8477c478bd9Sstevel@tonic-gate
8487c478bd9Sstevel@tonic-gate switch (type) {
8497c478bd9Sstevel@tonic-gate
8507c478bd9Sstevel@tonic-gate case VCHR:
851349dcea3SGarrett D'Amore if (vp->v_stream) {
8527c478bd9Sstevel@tonic-gate if (cvp->v_stream != NULL)
8537c478bd9Sstevel@tonic-gate error = strclose(cvp, flag, cr);
8547c478bd9Sstevel@tonic-gate vp->v_stream = NULL;
8557c478bd9Sstevel@tonic-gate } else
8567c478bd9Sstevel@tonic-gate error = dev_close(dev, flag, OTYP_CHR, cr);
8577c478bd9Sstevel@tonic-gate break;
8587c478bd9Sstevel@tonic-gate
8597c478bd9Sstevel@tonic-gate case VBLK:
8607c478bd9Sstevel@tonic-gate /*
8617c478bd9Sstevel@tonic-gate * On last close a block device we must
8627c478bd9Sstevel@tonic-gate * invalidate any in-core blocks so that we
8637c478bd9Sstevel@tonic-gate * can, for example, change floppy disks.
8647c478bd9Sstevel@tonic-gate */
8657c478bd9Sstevel@tonic-gate (void) spec_putpage(cvp, (offset_t)0,
866da6c28aaSamw (size_t)0, B_INVAL|B_FORCE, cr, NULL);
8677c478bd9Sstevel@tonic-gate bflush(dev);
8687c478bd9Sstevel@tonic-gate binval(dev);
8697c478bd9Sstevel@tonic-gate error = dev_close(dev, flag, OTYP_BLK, cr);
8707c478bd9Sstevel@tonic-gate break;
8717c478bd9Sstevel@tonic-gate default:
8727c478bd9Sstevel@tonic-gate panic("device_close: not a device");
8737c478bd9Sstevel@tonic-gate /*NOTREACHED*/
8747c478bd9Sstevel@tonic-gate }
8757c478bd9Sstevel@tonic-gate
8767c478bd9Sstevel@tonic-gate return (error);
8777c478bd9Sstevel@tonic-gate }
8787c478bd9Sstevel@tonic-gate
8797c478bd9Sstevel@tonic-gate struct vnode *
makectty(vnode_t * ovp)8807c478bd9Sstevel@tonic-gate makectty(vnode_t *ovp)
8817c478bd9Sstevel@tonic-gate {
8827c478bd9Sstevel@tonic-gate vnode_t *vp;
8837c478bd9Sstevel@tonic-gate
8847c478bd9Sstevel@tonic-gate if (vp = makespecvp(ovp->v_rdev, VCHR)) {
8857c478bd9Sstevel@tonic-gate struct snode *sp;
8867c478bd9Sstevel@tonic-gate struct snode *csp;
8877c478bd9Sstevel@tonic-gate struct vnode *cvp;
8887c478bd9Sstevel@tonic-gate
8897c478bd9Sstevel@tonic-gate sp = VTOS(vp);
8907c478bd9Sstevel@tonic-gate cvp = sp->s_commonvp;
8917c478bd9Sstevel@tonic-gate csp = VTOS(cvp);
8927c478bd9Sstevel@tonic-gate mutex_enter(&csp->s_lock);
8937c478bd9Sstevel@tonic-gate csp->s_count++;
8947c478bd9Sstevel@tonic-gate mutex_exit(&csp->s_lock);
8957c478bd9Sstevel@tonic-gate }
8967c478bd9Sstevel@tonic-gate
8977c478bd9Sstevel@tonic-gate return (vp);
8987c478bd9Sstevel@tonic-gate }
8997c478bd9Sstevel@tonic-gate
9007c478bd9Sstevel@tonic-gate void
spec_snode_walk(int (* callback)(struct snode * sp,void * arg),void * arg)9017c478bd9Sstevel@tonic-gate spec_snode_walk(int (*callback)(struct snode *sp, void *arg), void *arg)
9027c478bd9Sstevel@tonic-gate {
9037c478bd9Sstevel@tonic-gate struct snode *sp;
9047c478bd9Sstevel@tonic-gate int i;
9057c478bd9Sstevel@tonic-gate
9067c478bd9Sstevel@tonic-gate ASSERT(callback);
9077c478bd9Sstevel@tonic-gate
9087c478bd9Sstevel@tonic-gate mutex_enter(&stable_lock);
9097c478bd9Sstevel@tonic-gate for (i = 0; i < STABLESIZE; i++) {
9107c478bd9Sstevel@tonic-gate for (sp = stable[i]; sp; sp = sp->s_next) {
9117c478bd9Sstevel@tonic-gate if (callback(sp, arg) != DDI_WALK_CONTINUE)
9127c478bd9Sstevel@tonic-gate goto out;
9137c478bd9Sstevel@tonic-gate }
9147c478bd9Sstevel@tonic-gate }
9157c478bd9Sstevel@tonic-gate out:
9167c478bd9Sstevel@tonic-gate mutex_exit(&stable_lock);
9177c478bd9Sstevel@tonic-gate }
9187c478bd9Sstevel@tonic-gate
9197c478bd9Sstevel@tonic-gate int
spec_is_clone(vnode_t * vp)9207c478bd9Sstevel@tonic-gate spec_is_clone(vnode_t *vp)
9217c478bd9Sstevel@tonic-gate {
9227c478bd9Sstevel@tonic-gate struct snode *sp;
9237c478bd9Sstevel@tonic-gate
9247c478bd9Sstevel@tonic-gate if (vn_matchops(vp, spec_getvnodeops())) {
9257c478bd9Sstevel@tonic-gate sp = VTOS(vp);
9267c478bd9Sstevel@tonic-gate return ((sp->s_flag & SCLONE) ? 1 : 0);
9277c478bd9Sstevel@tonic-gate }
9287c478bd9Sstevel@tonic-gate
9297c478bd9Sstevel@tonic-gate return (0);
9307c478bd9Sstevel@tonic-gate }
9317c478bd9Sstevel@tonic-gate
9327c478bd9Sstevel@tonic-gate int
spec_is_selfclone(vnode_t * vp)9337c478bd9Sstevel@tonic-gate spec_is_selfclone(vnode_t *vp)
9347c478bd9Sstevel@tonic-gate {
9357c478bd9Sstevel@tonic-gate struct snode *sp;
9367c478bd9Sstevel@tonic-gate
9377c478bd9Sstevel@tonic-gate if (vn_matchops(vp, spec_getvnodeops())) {
9387c478bd9Sstevel@tonic-gate sp = VTOS(vp);
9397c478bd9Sstevel@tonic-gate return ((sp->s_flag & SSELFCLONE) ? 1 : 0);
9407c478bd9Sstevel@tonic-gate }
9417c478bd9Sstevel@tonic-gate
9427c478bd9Sstevel@tonic-gate return (0);
9437c478bd9Sstevel@tonic-gate }
94425e8c5aaSvikram
94525e8c5aaSvikram /*
94625e8c5aaSvikram * We may be invoked with a NULL vp in which case we fence off
94725e8c5aaSvikram * all snodes associated with dip
94825e8c5aaSvikram */
94925e8c5aaSvikram int
spec_fence_snode(dev_info_t * dip,struct vnode * vp)95025e8c5aaSvikram spec_fence_snode(dev_info_t *dip, struct vnode *vp)
95125e8c5aaSvikram {
95225e8c5aaSvikram struct snode *sp;
95325e8c5aaSvikram struct snode *csp;
95425e8c5aaSvikram int retired;
95525e8c5aaSvikram int i;
95625e8c5aaSvikram char *path;
95725e8c5aaSvikram int emitted;
95825e8c5aaSvikram
95925e8c5aaSvikram ASSERT(dip);
96025e8c5aaSvikram
96125e8c5aaSvikram retired = 0;
96225e8c5aaSvikram mutex_enter(&DEVI(dip)->devi_lock);
96325e8c5aaSvikram if (DEVI(dip)->devi_flags & DEVI_RETIRED)
96425e8c5aaSvikram retired = 1;
96525e8c5aaSvikram mutex_exit(&DEVI(dip)->devi_lock);
96625e8c5aaSvikram
96725e8c5aaSvikram if (!retired)
96825e8c5aaSvikram return (0);
96925e8c5aaSvikram
97025e8c5aaSvikram path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
97125e8c5aaSvikram (void) ddi_pathname(dip, path);
97225e8c5aaSvikram
97325e8c5aaSvikram
97425e8c5aaSvikram if (vp != NULL) {
97525e8c5aaSvikram ASSERT(vn_matchops(vp, spec_getvnodeops()));
97625e8c5aaSvikram csp = VTOCS(vp);
97725e8c5aaSvikram ASSERT(csp);
97825e8c5aaSvikram mutex_enter(&csp->s_lock);
97925e8c5aaSvikram csp->s_flag |= SFENCED;
98025e8c5aaSvikram mutex_exit(&csp->s_lock);
98125e8c5aaSvikram FENDBG((CE_NOTE, "fenced off snode(%p) for dip: %s",
98225e8c5aaSvikram (void *)csp, path));
98325e8c5aaSvikram kmem_free(path, MAXPATHLEN);
98425e8c5aaSvikram return (0);
98525e8c5aaSvikram }
98625e8c5aaSvikram
98725e8c5aaSvikram emitted = 0;
98825e8c5aaSvikram mutex_enter(&stable_lock);
98925e8c5aaSvikram for (i = 0; i < STABLESIZE; i++) {
99025e8c5aaSvikram for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
99125e8c5aaSvikram ASSERT(sp->s_commonvp);
99225e8c5aaSvikram csp = VTOS(sp->s_commonvp);
99325e8c5aaSvikram if (csp->s_dip == dip) {
99425e8c5aaSvikram /* fence off the common snode */
99525e8c5aaSvikram mutex_enter(&csp->s_lock);
99625e8c5aaSvikram csp->s_flag |= SFENCED;
99725e8c5aaSvikram mutex_exit(&csp->s_lock);
99825e8c5aaSvikram if (!emitted) {
99925e8c5aaSvikram FENDBG((CE_NOTE, "fenced 1 of N"));
100025e8c5aaSvikram emitted++;
100125e8c5aaSvikram }
100225e8c5aaSvikram }
100325e8c5aaSvikram }
100425e8c5aaSvikram }
100525e8c5aaSvikram mutex_exit(&stable_lock);
100625e8c5aaSvikram
100725e8c5aaSvikram FENDBG((CE_NOTE, "fenced off all snodes for dip: %s", path));
100825e8c5aaSvikram kmem_free(path, MAXPATHLEN);
100925e8c5aaSvikram
101025e8c5aaSvikram return (0);
101125e8c5aaSvikram }
101225e8c5aaSvikram
101325e8c5aaSvikram
101425e8c5aaSvikram int
spec_unfence_snode(dev_info_t * dip)101525e8c5aaSvikram spec_unfence_snode(dev_info_t *dip)
101625e8c5aaSvikram {
101725e8c5aaSvikram struct snode *sp;
101825e8c5aaSvikram struct snode *csp;
101925e8c5aaSvikram int i;
102025e8c5aaSvikram char *path;
102125e8c5aaSvikram int emitted;
102225e8c5aaSvikram
102325e8c5aaSvikram ASSERT(dip);
102425e8c5aaSvikram
102525e8c5aaSvikram path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
102625e8c5aaSvikram (void) ddi_pathname(dip, path);
102725e8c5aaSvikram
102825e8c5aaSvikram emitted = 0;
102925e8c5aaSvikram mutex_enter(&stable_lock);
103025e8c5aaSvikram for (i = 0; i < STABLESIZE; i++) {
103125e8c5aaSvikram for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
103225e8c5aaSvikram ASSERT(sp->s_commonvp);
103325e8c5aaSvikram csp = VTOS(sp->s_commonvp);
103425e8c5aaSvikram ASSERT(csp);
103525e8c5aaSvikram if (csp->s_dip == dip) {
103625e8c5aaSvikram /* unfence the common snode */
103725e8c5aaSvikram mutex_enter(&csp->s_lock);
103825e8c5aaSvikram csp->s_flag &= ~SFENCED;
103925e8c5aaSvikram mutex_exit(&csp->s_lock);
104025e8c5aaSvikram if (!emitted) {
104125e8c5aaSvikram FENDBG((CE_NOTE, "unfenced 1 of N"));
104225e8c5aaSvikram emitted++;
104325e8c5aaSvikram }
104425e8c5aaSvikram }
104525e8c5aaSvikram }
104625e8c5aaSvikram }
104725e8c5aaSvikram mutex_exit(&stable_lock);
104825e8c5aaSvikram
104925e8c5aaSvikram FENDBG((CE_NOTE, "unfenced all snodes for dip: %s", path));
105025e8c5aaSvikram kmem_free(path, MAXPATHLEN);
105125e8c5aaSvikram
105225e8c5aaSvikram return (0);
105325e8c5aaSvikram }
1054e7cbe64fSgw25295
1055e7cbe64fSgw25295 void
spec_size_invalidate(dev_t dev,vtype_t type)1056e7cbe64fSgw25295 spec_size_invalidate(dev_t dev, vtype_t type)
1057e7cbe64fSgw25295 {
1058e7cbe64fSgw25295
1059e7cbe64fSgw25295 struct snode *csp;
1060e7cbe64fSgw25295
1061e7cbe64fSgw25295 mutex_enter(&stable_lock);
1062e7cbe64fSgw25295 if ((csp = sfind(dev, type, NULL)) != NULL) {
1063e7cbe64fSgw25295 mutex_enter(&csp->s_lock);
1064e7cbe64fSgw25295 csp->s_flag &= ~SSIZEVALID;
1065*16a4a807SGeorge Wilson VN_RELE_ASYNC(STOV(csp), system_taskq);
1066e7cbe64fSgw25295 mutex_exit(&csp->s_lock);
1067e7cbe64fSgw25295 }
1068e7cbe64fSgw25295 mutex_exit(&stable_lock);
1069e7cbe64fSgw25295 }
1070