xref: /titanic_41/usr/src/uts/common/fs/specfs/specsubr.c (revision e802abbda8c322f24d47835734f4a793ef15ddc8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 /*
30  * University Copyright- Copyright (c) 1982, 1986, 1988
31  * The Regents of the University of California
32  * All Rights Reserved
33  *
34  * University Acknowledgment- Portions of this document are derived from
35  * software developed by the University of California, Berkeley, and its
36  * contributors.
37  */
38 
39 
40 #pragma ident	"%Z%%M%	%I%	%E% SMI"
41 
42 #include <sys/types.h>
43 #include <sys/t_lock.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/buf.h>
47 #include <sys/conf.h>
48 #include <sys/cred.h>
49 #include <sys/kmem.h>
50 #include <sys/sysmacros.h>
51 #include <sys/vfs.h>
52 #include <sys/vfs_opreg.h>
53 #include <sys/vnode.h>
54 #include <sys/fs/snode.h>
55 #include <sys/fs/fifonode.h>
56 #include <sys/debug.h>
57 #include <sys/errno.h>
58 #include <sys/time.h>
59 #include <sys/file.h>
60 #include <sys/open.h>
61 #include <sys/user.h>
62 #include <sys/termios.h>
63 #include <sys/stream.h>
64 #include <sys/strsubr.h>
65 #include <sys/autoconf.h>
66 #include <sys/esunddi.h>
67 #include <sys/flock.h>
68 #include <sys/modctl.h>
69 
70 struct vfs spec_vfs;
71 static dev_t specdev;
72 struct kmem_cache *snode_cache;
73 int spec_debug = 0;
74 
75 static struct snode *sfind(dev_t, vtype_t, struct vnode *);
76 static struct vnode *get_cvp(dev_t, vtype_t, struct snode *, int *);
77 static void sinsert(struct snode *);
78 
79 struct vnode *
80 specvp_devfs(
81 	struct vnode	*realvp,
82 	dev_t		dev,
83 	vtype_t		vtyp,
84 	struct cred	*cr,
85 	dev_info_t	*dip)
86 {
87 	struct vnode	*vp;
88 
89 	ASSERT(realvp && dip);
90 	vp = specvp(realvp, dev, vtyp, cr);
91 	ASSERT(vp);
92 
93 	/* associate a dip hold with the common snode's s_dip pointer */
94 	spec_assoc_vp_with_devi(vp, dip);
95 	return (vp);
96 }
97 
98 /*
99  * Return a shadow special vnode for the given dev.
100  * If no snode exists for this dev create one and put it
101  * in a table hashed by <dev, realvp>.  If the snode for
102  * this dev is already in the table return it (ref count is
103  * incremented by sfind).  The snode will be flushed from the
104  * table when spec_inactive calls sdelete.
105  *
106  * The fsid is inherited from the real vnode so that clones
107  * can be found.
108  *
109  */
110 struct vnode *
111 specvp(
112 	struct vnode	*vp,
113 	dev_t		dev,
114 	vtype_t		type,
115 	struct cred	*cr)
116 {
117 	struct snode *sp;
118 	struct snode *nsp;
119 	struct snode *csp;
120 	struct vnode *svp;
121 	struct vattr va;
122 	int	rc;
123 	int	used_csp = 0;		/* Did we use pre-allocated csp */
124 
125 	if (vp == NULL)
126 		return (NULL);
127 	if (vp->v_type == VFIFO)
128 		return (fifovp(vp, cr));
129 
130 	ASSERT(vp->v_type == type);
131 	ASSERT(vp->v_rdev == dev);
132 
133 	/*
134 	 * Pre-allocate snodes before holding any locks in case we block
135 	 */
136 	nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
137 	csp = kmem_cache_alloc(snode_cache, KM_SLEEP);
138 
139 	/*
140 	 * Get the time attributes outside of the stable lock since
141 	 * this operation may block. Unfortunately, it may not have
142 	 * been required if the snode is in the cache.
143 	 */
144 	va.va_mask = AT_FSID | AT_TIMES;
145 	rc = VOP_GETATTR(vp, &va, 0, cr, NULL);	/* XXX may block! */
146 
147 	mutex_enter(&stable_lock);
148 	if ((sp = sfind(dev, type, vp)) == NULL) {
149 		struct vnode *cvp;
150 
151 		sp = nsp;	/* Use pre-allocated snode */
152 		svp = STOV(sp);
153 
154 		sp->s_realvp	= vp;
155 		VN_HOLD(vp);
156 		sp->s_commonvp	= NULL;
157 		sp->s_dev	= dev;
158 		sp->s_dip	= NULL;
159 		sp->s_nextr	= NULL;
160 		sp->s_list	= NULL;
161 		sp->s_plcy	= NULL;
162 		sp->s_size	= 0;
163 		sp->s_flag	= 0;
164 		if (rc == 0) {
165 			/*
166 			 * Set times in snode to those in the vnode.
167 			 */
168 			sp->s_fsid = va.va_fsid;
169 			sp->s_atime = va.va_atime.tv_sec;
170 			sp->s_mtime = va.va_mtime.tv_sec;
171 			sp->s_ctime = va.va_ctime.tv_sec;
172 		} else {
173 			sp->s_fsid = specdev;
174 			sp->s_atime = 0;
175 			sp->s_mtime = 0;
176 			sp->s_ctime = 0;
177 		}
178 		sp->s_count	= 0;
179 		sp->s_mapcnt	= 0;
180 
181 		vn_reinit(svp);
182 		svp->v_flag	= (vp->v_flag & VROOT);
183 		svp->v_vfsp	= vp->v_vfsp;
184 		VFS_HOLD(svp->v_vfsp);
185 		svp->v_type	= type;
186 		svp->v_rdev	= dev;
187 		(void) vn_copypath(vp, svp);
188 		if (type == VBLK || type == VCHR) {
189 			cvp = get_cvp(dev, type, csp, &used_csp);
190 			svp->v_stream = cvp->v_stream;
191 
192 			sp->s_commonvp = cvp;
193 		}
194 		vn_exists(svp);
195 		sinsert(sp);
196 		mutex_exit(&stable_lock);
197 		if (used_csp == 0) {
198 			/* Didn't use pre-allocated snode so free it */
199 			kmem_cache_free(snode_cache, csp);
200 		}
201 	} else {
202 		mutex_exit(&stable_lock);
203 		/* free unused snode memory */
204 		kmem_cache_free(snode_cache, nsp);
205 		kmem_cache_free(snode_cache, csp);
206 	}
207 	return (STOV(sp));
208 }
209 
210 /*
211  * Return a special vnode for the given dev; no vnode is supplied
212  * for it to shadow.  Always create a new snode and put it in the
213  * table hashed by <dev, NULL>.  The snode will be flushed from the
214  * table when spec_inactive() calls sdelete().  The association of
215  * this node with a attached instance of hardware is not made until
216  * spec_open time.
217  *
218  * N.B. Assumes caller takes on responsibility of making sure no one
219  * else is creating a snode for (dev, type) at this time.
220  */
221 struct vnode *
222 makespecvp(dev_t dev, vtype_t type)
223 {
224 	struct snode *sp;
225 	struct vnode *svp, *cvp;
226 	time_t now;
227 
228 	sp = kmem_cache_alloc(snode_cache, KM_SLEEP);
229 	svp = STOV(sp);
230 	cvp = commonvp(dev, type);
231 	now = gethrestime_sec();
232 
233 	sp->s_realvp	= NULL;
234 	sp->s_commonvp	= cvp;
235 	sp->s_dev	= dev;
236 	sp->s_dip	= NULL;
237 	sp->s_nextr	= NULL;
238 	sp->s_list	= NULL;
239 	sp->s_plcy	= NULL;
240 	sp->s_size	= 0;
241 	sp->s_flag	= 0;
242 	sp->s_fsid	= specdev;
243 	sp->s_atime	= now;
244 	sp->s_mtime	= now;
245 	sp->s_ctime	= now;
246 	sp->s_count	= 0;
247 	sp->s_mapcnt	= 0;
248 
249 	vn_reinit(svp);
250 	svp->v_vfsp	= &spec_vfs;
251 	svp->v_stream	= cvp->v_stream;
252 	svp->v_type	= type;
253 	svp->v_rdev	= dev;
254 
255 	vn_exists(svp);
256 	mutex_enter(&stable_lock);
257 	sinsert(sp);
258 	mutex_exit(&stable_lock);
259 
260 	return (svp);
261 }
262 
263 
264 /*
265  * This function is called from spec_assoc_vp_with_devi(). That function
266  * associates a "new" dip with a common snode, releasing (any) old dip
267  * in the process. This function (spec_assoc_fence()) looks at the "new dip"
268  * and determines whether the snode should be fenced of or not. As the table
269  * below indicates, the value of old-dip is a don't care for all cases.
270  *
271  * old-dip	new-dip		common-snode
272  * =========================================
273  * Don't care	NULL		unfence
274  * Don't care	retired		fence
275  * Don't care	not-retired	unfence
276  *
277  * Since old-dip value is a "don't care", it is not passed into this function.
278  */
279 static void
280 spec_assoc_fence(dev_info_t *ndip, vnode_t *vp)
281 {
282 	int		fence;
283 	struct snode	*csp;
284 
285 	ASSERT(vp);
286 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
287 
288 	fence = 0;
289 	if (ndip != NULL) {
290 		mutex_enter(&DEVI(ndip)->devi_lock);
291 		if (DEVI(ndip)->devi_flags & DEVI_RETIRED)
292 			fence = 1;
293 		mutex_exit(&DEVI(ndip)->devi_lock);
294 	}
295 
296 	csp = VTOCS(vp);
297 	ASSERT(csp);
298 
299 	/* SFENCED flag only set on common snode */
300 	mutex_enter(&csp->s_lock);
301 	if (fence)
302 		csp->s_flag |= SFENCED;
303 	else
304 		csp->s_flag &= ~SFENCED;
305 	mutex_exit(&csp->s_lock);
306 
307 	FENDBG((CE_NOTE, "%sfenced common snode (%p) for new dip=%p",
308 	    fence ? "" : "un", (void *)csp, (void *)ndip));
309 }
310 
311 /*
312  * Associate the common snode with a devinfo node.  This is called from:
313  *
314  *   1) specvp_devfs to associate a specfs node with the dip attached
315  *	by devfs.
316  *
317  *   2) spec_open after path reconstruction and attach.
318  *
319  *   3) From dacf processing to associate a makespecvp node with
320  *	the dip that dacf postattach processing is being performed on.
321  *	This association is made prior to open to avoid recursion issues.
322  *
323  *   4) From ddi_assoc_queue_with_devi to change vnode association as part of
324  *	DL_ATTACH/DL_DETACH processing (SDIPSET already set).  The call
325  *	from ddi_assoc_queue_with_devi may specify a NULL dip.
326  *
327  * We put an extra hold on the devinfo node passed in as we establish it as
328  * the new s_dip pointer.  Any hold associated with the prior s_dip pointer
329  * is released. The new hold will stay active until another call to
330  * spec_assoc_vp_with_devi or until the common snode is destroyed by
331  * spec_inactive after the last VN_RELE of the common node. This devinfo hold
332  * transfers across a clone open except in the clone_dev case, where the clone
333  * driver is no longer required after open.
334  *
335  * When SDIPSET is set and s_dip is NULL, the vnode has an association with
336  * the driver even though there is currently no association with a specific
337  * hardware instance.
338  */
339 void
340 spec_assoc_vp_with_devi(struct vnode *vp, dev_info_t *dip)
341 {
342 	struct snode	*csp;
343 	dev_info_t	*olddip;
344 
345 	ASSERT(vp);
346 
347 	/*
348 	 * Don't establish a NULL association for a vnode associated with the
349 	 * clone driver.  The qassociate(, -1) call from a streams driver's
350 	 * open implementation to indicate support for qassociate has the
351 	 * side-effect of this type of spec_assoc_vp_with_devi call. This
352 	 * call should not change the the association of the pre-clone
353 	 * vnode associated with the clone driver, the post-clone newdev
354 	 * association will be established later by spec_clone().
355 	 */
356 	if ((dip == NULL) && (getmajor(vp->v_rdev) == clone_major))
357 		return;
358 
359 	/* hold the new */
360 	if (dip)
361 		e_ddi_hold_devi(dip);
362 
363 	csp = VTOS(VTOS(vp)->s_commonvp);
364 	mutex_enter(&csp->s_lock);
365 	olddip = csp->s_dip;
366 	csp->s_dip = dip;
367 	csp->s_flag |= SDIPSET;
368 
369 	/* If association changes then invalidate cached size */
370 	if (olddip != dip)
371 		csp->s_flag &= ~SSIZEVALID;
372 	mutex_exit(&csp->s_lock);
373 
374 	spec_assoc_fence(dip, vp);
375 
376 	/* release the old */
377 	if (olddip)
378 		ddi_release_devi(olddip);
379 }
380 
381 /*
382  * Return the held dip associated with the specified snode.
383  */
384 dev_info_t *
385 spec_hold_devi_by_vp(struct vnode *vp)
386 {
387 	struct snode	*csp;
388 	dev_info_t	*dip;
389 
390 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
391 
392 	csp = VTOS(VTOS(vp)->s_commonvp);
393 	dip = csp->s_dip;
394 	if (dip)
395 		e_ddi_hold_devi(dip);
396 	return (dip);
397 }
398 
399 /*
400  * Find a special vnode that refers to the given device
401  * of the given type.  Never return a "common" vnode.
402  * Return NULL if a special vnode does not exist.
403  * HOLD the vnode before returning it.
404  */
405 struct vnode *
406 specfind(dev_t dev, vtype_t type)
407 {
408 	struct snode *st;
409 	struct vnode *nvp;
410 
411 	mutex_enter(&stable_lock);
412 	st = stable[STABLEHASH(dev)];
413 	while (st != NULL) {
414 		if (st->s_dev == dev) {
415 			nvp = STOV(st);
416 			if (nvp->v_type == type && st->s_commonvp != nvp) {
417 				VN_HOLD(nvp);
418 				mutex_exit(&stable_lock);
419 				return (nvp);
420 			}
421 		}
422 		st = st->s_next;
423 	}
424 	mutex_exit(&stable_lock);
425 	return (NULL);
426 }
427 
428 /*
429  * Loop through the snode cache looking for snodes referencing dip.
430  *
431  * This function determines if a devinfo node is "BUSY" from the perspective
432  * of having an active vnode associated with the device, which represents a
433  * dependency on the device's services.  This function is needed because a
434  * devinfo node can have a non-zero devi_ref and still NOT be "BUSY" when,
435  * for instance, the framework is manipulating the node (has an open
436  * ndi_hold_devi).
437  *
438  * Returns:
439  *	DEVI_REFERENCED		- if dip is referenced
440  *	DEVI_NOT_REFERENCED	- if dip is not referenced
441  */
442 int
443 devi_stillreferenced(dev_info_t *dip)
444 {
445 	struct snode	*sp;
446 	int		i;
447 
448 	/* if no hold then there can't be an snode with s_dip == dip */
449 	if (e_ddi_devi_holdcnt(dip) == 0)
450 		return (DEVI_NOT_REFERENCED);
451 
452 	mutex_enter(&stable_lock);
453 	for (i = 0; i < STABLESIZE; i++) {
454 		for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
455 			if (sp->s_dip == dip) {
456 				mutex_exit(&stable_lock);
457 				return (DEVI_REFERENCED);
458 			}
459 		}
460 	}
461 	mutex_exit(&stable_lock);
462 	return (DEVI_NOT_REFERENCED);
463 }
464 
465 /*
466  * Given an snode, returns the open count and the dip
467  * associated with that snode
468  * Assumes the caller holds the appropriate locks
469  * to prevent snode and/or dip from going away.
470  * Returns:
471  *	-1	No associated dip
472  *	>= 0	Number of opens.
473  */
474 int
475 spec_devi_open_count(struct snode *sp, dev_info_t **dipp)
476 {
477 	dev_info_t *dip;
478 	uint_t count;
479 	struct vnode *vp;
480 
481 	ASSERT(sp);
482 	ASSERT(dipp);
483 
484 	vp = STOV(sp);
485 
486 	*dipp = NULL;
487 
488 	/*
489 	 * We are only interested in common snodes. Only common snodes
490 	 * get their s_count fields bumped up on opens.
491 	 */
492 	if (sp->s_commonvp != vp || (dip = sp->s_dip) == NULL)
493 		return (-1);
494 
495 	mutex_enter(&sp->s_lock);
496 	count = sp->s_count + sp->s_mapcnt;
497 	if (sp->s_flag & SLOCKED)
498 		count++;
499 	mutex_exit(&sp->s_lock);
500 
501 	*dipp = dip;
502 
503 	return (count);
504 }
505 
506 /*
507  * Given a device vnode, return the common
508  * vnode associated with it.
509  */
510 struct vnode *
511 common_specvp(struct vnode *vp)
512 {
513 	struct snode *sp;
514 
515 	if ((vp->v_type != VBLK) && (vp->v_type != VCHR) ||
516 	    !vn_matchops(vp, spec_getvnodeops()))
517 		return (vp);
518 	sp = VTOS(vp);
519 	return (sp->s_commonvp);
520 }
521 
522 /*
523  * Returns a special vnode for the given dev.  The vnode is the
524  * one which is "common" to all the snodes which represent the
525  * same device.
526  * Similar to commonvp() but doesn't acquire the stable_lock, and
527  * may use a pre-allocated snode provided by caller.
528  */
529 static struct vnode *
530 get_cvp(
531 	dev_t		dev,
532 	vtype_t		type,
533 	struct snode	*nsp,		/* pre-allocated snode */
534 	int		*used_nsp)	/* flag indicating if we use nsp */
535 {
536 	struct snode *sp;
537 	struct vnode *svp;
538 
539 	ASSERT(MUTEX_HELD(&stable_lock));
540 	if ((sp = sfind(dev, type, NULL)) == NULL) {
541 		sp = nsp;		/* Use pre-allocated snode */
542 		*used_nsp = 1;		/* return value */
543 		svp = STOV(sp);
544 
545 		sp->s_realvp	= NULL;
546 		sp->s_commonvp	= svp;		/* points to itself */
547 		sp->s_dev	= dev;
548 		sp->s_dip	= NULL;
549 		sp->s_nextr	= NULL;
550 		sp->s_list	= NULL;
551 		sp->s_plcy	= NULL;
552 		sp->s_size	= UNKNOWN_SIZE;
553 		sp->s_flag	= 0;
554 		sp->s_fsid	= specdev;
555 		sp->s_atime	= 0;
556 		sp->s_mtime	= 0;
557 		sp->s_ctime	= 0;
558 		sp->s_count	= 0;
559 		sp->s_mapcnt	= 0;
560 
561 		vn_reinit(svp);
562 		svp->v_vfsp	= &spec_vfs;
563 		svp->v_type	= type;
564 		svp->v_rdev	= dev;
565 		vn_exists(svp);
566 		sinsert(sp);
567 	} else
568 		*used_nsp = 0;
569 	return (STOV(sp));
570 }
571 
572 /*
573  * Returns a special vnode for the given dev.  The vnode is the
574  * one which is "common" to all the snodes which represent the
575  * same device.  For use ONLY by SPECFS.
576  */
577 struct vnode *
578 commonvp(dev_t dev, vtype_t type)
579 {
580 	struct snode *sp, *nsp;
581 	struct vnode *svp;
582 
583 	/* Pre-allocate snode in case we might block */
584 	nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
585 
586 	mutex_enter(&stable_lock);
587 	if ((sp = sfind(dev, type, NULL)) == NULL) {
588 		sp = nsp;		/* Use pre-alloced snode */
589 		svp = STOV(sp);
590 
591 		sp->s_realvp	= NULL;
592 		sp->s_commonvp	= svp;		/* points to itself */
593 		sp->s_dev	= dev;
594 		sp->s_dip	= NULL;
595 		sp->s_nextr	= NULL;
596 		sp->s_list	= NULL;
597 		sp->s_plcy	= NULL;
598 		sp->s_size	= UNKNOWN_SIZE;
599 		sp->s_flag	= 0;
600 		sp->s_fsid	= specdev;
601 		sp->s_atime	= 0;
602 		sp->s_mtime	= 0;
603 		sp->s_ctime	= 0;
604 		sp->s_count	= 0;
605 		sp->s_mapcnt	= 0;
606 
607 		vn_reinit(svp);
608 		svp->v_vfsp	= &spec_vfs;
609 		svp->v_type	= type;
610 		svp->v_rdev	= dev;
611 		vn_exists(svp);
612 		sinsert(sp);
613 		mutex_exit(&stable_lock);
614 	} else {
615 		mutex_exit(&stable_lock);
616 		/* Didn't need the pre-allocated snode */
617 		kmem_cache_free(snode_cache, nsp);
618 	}
619 	return (STOV(sp));
620 }
621 
622 /*
623  * Snode lookup stuff.
624  * These routines maintain a table of snodes hashed by dev so
625  * that the snode for an dev can be found if it already exists.
626  */
627 struct snode *stable[STABLESIZE];
628 int		stablesz = STABLESIZE;
629 kmutex_t	stable_lock;
630 
631 /*
632  * Put a snode in the table.
633  */
634 static void
635 sinsert(struct snode *sp)
636 {
637 	ASSERT(MUTEX_HELD(&stable_lock));
638 	sp->s_next = stable[STABLEHASH(sp->s_dev)];
639 	stable[STABLEHASH(sp->s_dev)] = sp;
640 }
641 
642 /*
643  * Remove an snode from the hash table.
644  * The realvp is not released here because spec_inactive() still
645  * needs it to do a spec_fsync().
646  */
647 void
648 sdelete(struct snode *sp)
649 {
650 	struct snode *st;
651 	struct snode *stprev = NULL;
652 
653 	ASSERT(MUTEX_HELD(&stable_lock));
654 	st = stable[STABLEHASH(sp->s_dev)];
655 	while (st != NULL) {
656 		if (st == sp) {
657 			if (stprev == NULL)
658 				stable[STABLEHASH(sp->s_dev)] = st->s_next;
659 			else
660 				stprev->s_next = st->s_next;
661 			break;
662 		}
663 		stprev = st;
664 		st = st->s_next;
665 	}
666 }
667 
668 /*
669  * Lookup an snode by <dev, type, vp>.
670  * ONLY looks for snodes with non-NULL s_realvp members and
671  * common snodes (with s_commonvp pointing to its vnode).
672  *
673  * If vp is NULL, only return commonvp. Otherwise return
674  * shadow vp with both shadow and common vp's VN_HELD.
675  */
676 static struct snode *
677 sfind(
678 	dev_t	dev,
679 	vtype_t	type,
680 	struct vnode *vp)
681 {
682 	struct snode *st;
683 	struct vnode *svp;
684 
685 	ASSERT(MUTEX_HELD(&stable_lock));
686 	st = stable[STABLEHASH(dev)];
687 	while (st != NULL) {
688 		svp = STOV(st);
689 		if (st->s_dev == dev && svp->v_type == type &&
690 		    VN_CMP(st->s_realvp, vp) &&
691 		    (vp != NULL || st->s_commonvp == svp) &&
692 		    (vp == NULL || st->s_realvp->v_vfsp == vp->v_vfsp)) {
693 			VN_HOLD(svp);
694 			return (st);
695 		}
696 		st = st->s_next;
697 	}
698 	return (NULL);
699 }
700 
701 /*
702  * Mark the accessed, updated, or changed times in an snode
703  * with the current time.
704  */
705 void
706 smark(struct snode *sp, int flag)
707 {
708 	time_t	now = gethrestime_sec();
709 
710 	/* check for change to avoid unnecessary locking */
711 	ASSERT((flag & ~(SACC|SUPD|SCHG)) == 0);
712 	if (((flag & sp->s_flag) != flag) ||
713 	    ((flag & SACC) && (sp->s_atime != now)) ||
714 	    ((flag & SUPD) && (sp->s_mtime != now)) ||
715 	    ((flag & SCHG) && (sp->s_ctime != now))) {
716 		/* lock and update */
717 		mutex_enter(&sp->s_lock);
718 		sp->s_flag |= flag;
719 		if (flag & SACC)
720 			sp->s_atime = now;
721 		if (flag & SUPD)
722 			sp->s_mtime = now;
723 		if (flag & SCHG)
724 			sp->s_ctime = now;
725 		mutex_exit(&sp->s_lock);
726 	}
727 }
728 
729 /*
730  * Return the maximum file offset permitted for this device.
731  * -1 means unrestricted.  SLOFFSET is associated with D_64BIT.
732  *
733  * On a 32-bit kernel this will limit:
734  *   o	D_64BIT devices to SPEC_MAXOFFSET_T.
735  *   o	non-D_64BIT character drivers to a 32-bit offset (MAXOFF_T).
736  */
737 offset_t
738 spec_maxoffset(struct vnode *vp)
739 {
740 	struct snode *sp = VTOS(vp);
741 	struct snode *csp = VTOS(sp->s_commonvp);
742 
743 	if (STREAMSTAB(getmajor(sp->s_dev)))
744 		return ((offset_t)-1);
745 	else if (csp->s_flag & SANYOFFSET)	/* D_U64BIT */
746 		return ((offset_t)-1);
747 #ifdef _ILP32
748 	if (csp->s_flag & SLOFFSET)		/* D_64BIT */
749 		return (SPEC_MAXOFFSET_T);
750 #endif	/* _ILP32 */
751 	return (MAXOFF_T);
752 }
753 
754 /*ARGSUSED*/
755 static int
756 snode_constructor(void *buf, void *cdrarg, int kmflags)
757 {
758 	struct snode *sp = buf;
759 	struct vnode *vp;
760 
761 	vp = sp->s_vnode = vn_alloc(kmflags);
762 	if (vp == NULL) {
763 		return (-1);
764 	}
765 	vn_setops(vp, spec_getvnodeops());
766 	vp->v_data = sp;
767 
768 	mutex_init(&sp->s_lock, NULL, MUTEX_DEFAULT, NULL);
769 	cv_init(&sp->s_cv, NULL, CV_DEFAULT, NULL);
770 	return (0);
771 }
772 
773 /*ARGSUSED1*/
774 static void
775 snode_destructor(void *buf, void *cdrarg)
776 {
777 	struct snode *sp = buf;
778 	struct vnode *vp = STOV(sp);
779 
780 	mutex_destroy(&sp->s_lock);
781 	cv_destroy(&sp->s_cv);
782 
783 	vn_free(vp);
784 }
785 
786 
787 int
788 specinit(int fstype, char *name)
789 {
790 	static const fs_operation_def_t spec_vfsops_template[] = {
791 		VFSNAME_SYNC, { .vfs_sync = spec_sync },
792 		NULL, NULL
793 	};
794 	extern struct vnodeops *spec_vnodeops;
795 	extern const fs_operation_def_t spec_vnodeops_template[];
796 	struct vfsops *spec_vfsops;
797 	int error;
798 	dev_t dev;
799 
800 	/*
801 	 * Associate vfs and vnode operations.
802 	 */
803 	error = vfs_setfsops(fstype, spec_vfsops_template, &spec_vfsops);
804 	if (error != 0) {
805 		cmn_err(CE_WARN, "specinit: bad vfs ops template");
806 		return (error);
807 	}
808 
809 	error = vn_make_ops(name, spec_vnodeops_template, &spec_vnodeops);
810 	if (error != 0) {
811 		(void) vfs_freevfsops_by_type(fstype);
812 		cmn_err(CE_WARN, "specinit: bad vnode ops template");
813 		return (error);
814 	}
815 
816 	mutex_init(&stable_lock, NULL, MUTEX_DEFAULT, NULL);
817 	mutex_init(&spec_syncbusy, NULL, MUTEX_DEFAULT, NULL);
818 
819 	/*
820 	 * Create snode cache
821 	 */
822 	snode_cache = kmem_cache_create("snode_cache", sizeof (struct snode),
823 	    0, snode_constructor, snode_destructor, NULL, NULL, NULL, 0);
824 
825 	/*
826 	 * Associate vfs operations with spec_vfs
827 	 */
828 	VFS_INIT(&spec_vfs, spec_vfsops, (caddr_t)NULL);
829 	if ((dev = getudev()) == -1)
830 		dev = 0;
831 	specdev = makedevice(dev, 0);
832 	return (0);
833 }
834 
835 int
836 device_close(struct vnode *vp, int flag, struct cred *cr)
837 {
838 	struct snode *sp = VTOS(vp);
839 	enum vtype type = vp->v_type;
840 	struct vnode *cvp;
841 	dev_t dev;
842 	int error;
843 
844 	dev = sp->s_dev;
845 	cvp = sp->s_commonvp;
846 
847 	switch (type) {
848 
849 	case VCHR:
850 		if (STREAMSTAB(getmajor(dev))) {
851 			if (cvp->v_stream != NULL)
852 				error = strclose(cvp, flag, cr);
853 			vp->v_stream = NULL;
854 		} else
855 			error = dev_close(dev, flag, OTYP_CHR, cr);
856 		break;
857 
858 	case VBLK:
859 		/*
860 		 * On last close a block device we must
861 		 * invalidate any in-core blocks so that we
862 		 * can, for example, change floppy disks.
863 		 */
864 		(void) spec_putpage(cvp, (offset_t)0,
865 		    (size_t)0, B_INVAL|B_FORCE, cr, NULL);
866 		bflush(dev);
867 		binval(dev);
868 		error = dev_close(dev, flag, OTYP_BLK, cr);
869 		break;
870 	default:
871 		panic("device_close: not a device");
872 		/*NOTREACHED*/
873 	}
874 
875 	return (error);
876 }
877 
878 struct vnode *
879 makectty(vnode_t *ovp)
880 {
881 	vnode_t *vp;
882 
883 	if (vp = makespecvp(ovp->v_rdev, VCHR)) {
884 		struct snode *sp;
885 		struct snode *csp;
886 		struct vnode *cvp;
887 
888 		sp = VTOS(vp);
889 		cvp = sp->s_commonvp;
890 		csp = VTOS(cvp);
891 		mutex_enter(&csp->s_lock);
892 		csp->s_count++;
893 		mutex_exit(&csp->s_lock);
894 	}
895 
896 	return (vp);
897 }
898 
899 void
900 spec_snode_walk(int (*callback)(struct snode *sp, void *arg), void *arg)
901 {
902 	struct snode	*sp;
903 	int		i;
904 
905 	ASSERT(callback);
906 
907 	mutex_enter(&stable_lock);
908 	for (i = 0; i < STABLESIZE; i++) {
909 		for (sp = stable[i]; sp; sp = sp->s_next) {
910 			if (callback(sp, arg) != DDI_WALK_CONTINUE)
911 				goto out;
912 		}
913 	}
914 out:
915 	mutex_exit(&stable_lock);
916 }
917 
918 int
919 spec_is_clone(vnode_t *vp)
920 {
921 	struct snode *sp;
922 
923 	if (vn_matchops(vp, spec_getvnodeops())) {
924 		sp = VTOS(vp);
925 		return ((sp->s_flag & SCLONE) ? 1 : 0);
926 	}
927 
928 	return (0);
929 }
930 
931 int
932 spec_is_selfclone(vnode_t *vp)
933 {
934 	struct snode *sp;
935 
936 	if (vn_matchops(vp, spec_getvnodeops())) {
937 		sp = VTOS(vp);
938 		return ((sp->s_flag & SSELFCLONE) ? 1 : 0);
939 	}
940 
941 	return (0);
942 }
943 
944 /*
945  * We may be invoked with a NULL vp in which case we fence off
946  * all snodes associated with dip
947  */
948 int
949 spec_fence_snode(dev_info_t *dip, struct vnode *vp)
950 {
951 	struct snode	*sp;
952 	struct snode	*csp;
953 	int		retired;
954 	int		i;
955 	char		*path;
956 	int		emitted;
957 
958 	ASSERT(dip);
959 
960 	retired = 0;
961 	mutex_enter(&DEVI(dip)->devi_lock);
962 	if (DEVI(dip)->devi_flags & DEVI_RETIRED)
963 		retired = 1;
964 	mutex_exit(&DEVI(dip)->devi_lock);
965 
966 	if (!retired)
967 		return (0);
968 
969 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
970 	(void) ddi_pathname(dip, path);
971 
972 
973 	if (vp != NULL) {
974 		ASSERT(vn_matchops(vp, spec_getvnodeops()));
975 		csp = VTOCS(vp);
976 		ASSERT(csp);
977 		mutex_enter(&csp->s_lock);
978 		csp->s_flag |= SFENCED;
979 		mutex_exit(&csp->s_lock);
980 		FENDBG((CE_NOTE, "fenced off snode(%p) for dip: %s",
981 		    (void *)csp, path));
982 		kmem_free(path, MAXPATHLEN);
983 		return (0);
984 	}
985 
986 	emitted = 0;
987 	mutex_enter(&stable_lock);
988 	for (i = 0; i < STABLESIZE; i++) {
989 		for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
990 			ASSERT(sp->s_commonvp);
991 			csp = VTOS(sp->s_commonvp);
992 			if (csp->s_dip == dip) {
993 				/* fence off the common snode */
994 				mutex_enter(&csp->s_lock);
995 				csp->s_flag |= SFENCED;
996 				mutex_exit(&csp->s_lock);
997 				if (!emitted) {
998 					FENDBG((CE_NOTE, "fenced 1 of N"));
999 					emitted++;
1000 				}
1001 			}
1002 		}
1003 	}
1004 	mutex_exit(&stable_lock);
1005 
1006 	FENDBG((CE_NOTE, "fenced off all snodes for dip: %s", path));
1007 	kmem_free(path, MAXPATHLEN);
1008 
1009 	return (0);
1010 }
1011 
1012 
1013 int
1014 spec_unfence_snode(dev_info_t *dip)
1015 {
1016 	struct snode	*sp;
1017 	struct snode	*csp;
1018 	int		i;
1019 	char		*path;
1020 	int		emitted;
1021 
1022 	ASSERT(dip);
1023 
1024 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1025 	(void) ddi_pathname(dip, path);
1026 
1027 	emitted = 0;
1028 	mutex_enter(&stable_lock);
1029 	for (i = 0; i < STABLESIZE; i++) {
1030 		for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
1031 			ASSERT(sp->s_commonvp);
1032 			csp = VTOS(sp->s_commonvp);
1033 			ASSERT(csp);
1034 			if (csp->s_dip == dip) {
1035 				/* unfence the common snode */
1036 				mutex_enter(&csp->s_lock);
1037 				csp->s_flag &= ~SFENCED;
1038 				mutex_exit(&csp->s_lock);
1039 				if (!emitted) {
1040 					FENDBG((CE_NOTE, "unfenced 1 of N"));
1041 					emitted++;
1042 				}
1043 			}
1044 		}
1045 	}
1046 	mutex_exit(&stable_lock);
1047 
1048 	FENDBG((CE_NOTE, "unfenced all snodes for dip: %s", path));
1049 	kmem_free(path, MAXPATHLEN);
1050 
1051 	return (0);
1052 }
1053 
1054 void
1055 spec_size_invalidate(dev_t dev, vtype_t type)
1056 {
1057 
1058 	struct snode *csp;
1059 
1060 	mutex_enter(&stable_lock);
1061 	if ((csp = sfind(dev, type, NULL)) != NULL) {
1062 		mutex_enter(&csp->s_lock);
1063 		csp->s_flag &= ~SSIZEVALID;
1064 		VN_RELE(STOV(csp));
1065 		mutex_exit(&csp->s_lock);
1066 	}
1067 	mutex_exit(&stable_lock);
1068 }
1069