xref: /titanic_50/usr/src/uts/common/fs/specfs/specsubr.c (revision 672986541be54a7a471bb088e60780c37e371d7e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 /*
30  * University Copyright- Copyright (c) 1982, 1986, 1988
31  * The Regents of the University of California
32  * All Rights Reserved
33  *
34  * University Acknowledgment- Portions of this document are derived from
35  * software developed by the University of California, Berkeley, and its
36  * contributors.
37  */
38 
39 
40 #pragma ident	"%Z%%M%	%I%	%E% SMI"
41 
42 #include <sys/types.h>
43 #include <sys/t_lock.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/buf.h>
47 #include <sys/conf.h>
48 #include <sys/cred.h>
49 #include <sys/kmem.h>
50 #include <sys/sysmacros.h>
51 #include <sys/vfs.h>
52 #include <sys/vfs_opreg.h>
53 #include <sys/vnode.h>
54 #include <sys/fs/snode.h>
55 #include <sys/fs/fifonode.h>
56 #include <sys/debug.h>
57 #include <sys/errno.h>
58 #include <sys/time.h>
59 #include <sys/file.h>
60 #include <sys/open.h>
61 #include <sys/user.h>
62 #include <sys/termios.h>
63 #include <sys/stream.h>
64 #include <sys/strsubr.h>
65 #include <sys/autoconf.h>
66 #include <sys/esunddi.h>
67 #include <sys/flock.h>
68 #include <sys/modctl.h>
69 
70 struct vfs spec_vfs;
71 static dev_t specdev;
72 struct kmem_cache *snode_cache;
73 
74 static struct snode *sfind(dev_t, vtype_t, struct vnode *);
75 static struct vnode *get_cvp(dev_t, vtype_t, struct snode *, int *);
76 static void sinsert(struct snode *);
77 
78 struct vnode *
79 specvp_devfs(
80 	struct vnode	*realvp,
81 	dev_t		dev,
82 	vtype_t		vtyp,
83 	struct cred	*cr,
84 	dev_info_t	*dip)
85 {
86 	struct vnode	*vp;
87 
88 	ASSERT(realvp && dip);
89 	vp = specvp(realvp, dev, vtyp, cr);
90 	ASSERT(vp);
91 
92 	/* associate a dip hold with the common snode's s_dip pointer */
93 	spec_assoc_vp_with_devi(vp, dip);
94 	return (vp);
95 }
96 
97 /*
98  * Return a shadow special vnode for the given dev.
99  * If no snode exists for this dev create one and put it
100  * in a table hashed by <dev, realvp>.  If the snode for
101  * this dev is already in the table return it (ref count is
102  * incremented by sfind).  The snode will be flushed from the
103  * table when spec_inactive calls sdelete.
104  *
105  * The fsid is inherited from the real vnode so that clones
106  * can be found.
107  *
108  */
109 struct vnode *
110 specvp(
111 	struct vnode	*vp,
112 	dev_t		dev,
113 	vtype_t		type,
114 	struct cred	*cr)
115 {
116 	struct snode *sp;
117 	struct snode *nsp;
118 	struct snode *csp;
119 	struct vnode *svp;
120 	struct vattr va;
121 	int	rc;
122 	int	used_csp = 0;		/* Did we use pre-allocated csp */
123 
124 	if (vp == NULL)
125 		return (NULL);
126 	if (vp->v_type == VFIFO)
127 		return (fifovp(vp, cr));
128 
129 	ASSERT(vp->v_type == type);
130 	ASSERT(vp->v_rdev == dev);
131 
132 	/*
133 	 * Pre-allocate snodes before holding any locks in case we block
134 	 */
135 	nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
136 	csp = kmem_cache_alloc(snode_cache, KM_SLEEP);
137 
138 	/*
139 	 * Get the time attributes outside of the stable lock since
140 	 * this operation may block. Unfortunately, it may not have
141 	 * been required if the snode is in the cache.
142 	 */
143 	va.va_mask = AT_FSID | AT_TIMES;
144 	rc = VOP_GETATTR(vp, &va, 0, cr);	/* XXX may block! */
145 
146 	mutex_enter(&stable_lock);
147 	if ((sp = sfind(dev, type, vp)) == NULL) {
148 		struct vnode *cvp;
149 
150 		sp = nsp;	/* Use pre-allocated snode */
151 		svp = STOV(sp);
152 
153 		sp->s_realvp	= vp;
154 		VN_HOLD(vp);
155 		sp->s_commonvp	= NULL;
156 		sp->s_dev	= dev;
157 		sp->s_dip	= NULL;
158 		sp->s_nextr	= NULL;
159 		sp->s_list	= NULL;
160 		sp->s_plcy	= NULL;
161 		sp->s_size	= 0;
162 		sp->s_flag	= 0;
163 		if (rc == 0) {
164 			/*
165 			 * Set times in snode to those in the vnode.
166 			 */
167 			sp->s_fsid = va.va_fsid;
168 			sp->s_atime = va.va_atime.tv_sec;
169 			sp->s_mtime = va.va_mtime.tv_sec;
170 			sp->s_ctime = va.va_ctime.tv_sec;
171 		} else {
172 			sp->s_fsid = specdev;
173 			sp->s_atime = 0;
174 			sp->s_mtime = 0;
175 			sp->s_ctime = 0;
176 		}
177 		sp->s_count	= 0;
178 		sp->s_mapcnt	= 0;
179 
180 		vn_reinit(svp);
181 		svp->v_flag	= (vp->v_flag & VROOT);
182 		svp->v_vfsp	= vp->v_vfsp;
183 		VFS_HOLD(svp->v_vfsp);
184 		svp->v_type	= type;
185 		svp->v_rdev	= dev;
186 		(void) vn_copypath(vp, svp);
187 		if (type == VBLK || type == VCHR) {
188 			cvp = get_cvp(dev, type, csp, &used_csp);
189 			svp->v_stream = cvp->v_stream;
190 
191 			sp->s_commonvp = cvp;
192 		}
193 		vn_exists(svp);
194 		sinsert(sp);
195 		mutex_exit(&stable_lock);
196 		if (used_csp == 0) {
197 			/* Didn't use pre-allocated snode so free it */
198 			kmem_cache_free(snode_cache, csp);
199 		}
200 	} else {
201 		mutex_exit(&stable_lock);
202 		/* free unused snode memory */
203 		kmem_cache_free(snode_cache, nsp);
204 		kmem_cache_free(snode_cache, csp);
205 	}
206 	return (STOV(sp));
207 }
208 
209 /*
210  * Return a special vnode for the given dev; no vnode is supplied
211  * for it to shadow.  Always create a new snode and put it in the
212  * table hashed by <dev, NULL>.  The snode will be flushed from the
213  * table when spec_inactive() calls sdelete().  The association of
214  * this node with a attached instance of hardware is not made until
215  * spec_open time.
216  *
217  * N.B. Assumes caller takes on responsibility of making sure no one
218  * else is creating a snode for (dev, type) at this time.
219  */
220 struct vnode *
221 makespecvp(dev_t dev, vtype_t type)
222 {
223 	struct snode *sp;
224 	struct vnode *svp, *cvp;
225 	time_t now;
226 
227 	sp = kmem_cache_alloc(snode_cache, KM_SLEEP);
228 	svp = STOV(sp);
229 	cvp = commonvp(dev, type);
230 	now = gethrestime_sec();
231 
232 	sp->s_realvp	= NULL;
233 	sp->s_commonvp	= cvp;
234 	sp->s_dev	= dev;
235 	sp->s_dip	= NULL;
236 	sp->s_nextr	= NULL;
237 	sp->s_list	= NULL;
238 	sp->s_plcy	= NULL;
239 	sp->s_size	= 0;
240 	sp->s_flag	= 0;
241 	sp->s_fsid	= specdev;
242 	sp->s_atime	= now;
243 	sp->s_mtime	= now;
244 	sp->s_ctime	= now;
245 	sp->s_count	= 0;
246 	sp->s_mapcnt	= 0;
247 
248 	vn_reinit(svp);
249 	svp->v_vfsp	= &spec_vfs;
250 	svp->v_stream	= cvp->v_stream;
251 	svp->v_type	= type;
252 	svp->v_rdev	= dev;
253 
254 	vn_exists(svp);
255 	mutex_enter(&stable_lock);
256 	sinsert(sp);
257 	mutex_exit(&stable_lock);
258 
259 	return (svp);
260 }
261 
262 /*
263  * Associate the common snode with a devinfo node.  This is called from:
264  *
265  *   1) specvp_devfs to associate a specfs node with the dip attached
266  *	by devfs.
267  *
268  *   2) spec_open after path reconstruction and attach.
269  *
270  *   3) From dacf processing to associate a makespecvp node with
271  *	the dip that dacf postattach processing is being performed on.
272  *	This association is made prior to open to avoid recursion issues.
273  *
274  *   4) From ddi_assoc_queue_with_devi to change vnode association as part of
275  *	DL_ATTACH/DL_DETACH processing (SDIPSET already set).  The call
276  *	from ddi_assoc_queue_with_devi may specify a NULL dip.
277  *
278  * We put an extra hold on the devinfo node passed in as we establish it as
279  * the new s_dip pointer.  Any hold associated with the prior s_dip pointer
280  * is released. The new hold will stay active until another call to
281  * spec_assoc_vp_with_devi or until the common snode is destroyed by
282  * spec_inactive after the last VN_RELE of the common node. This devinfo hold
283  * transfers across a clone open except in the clone_dev case, where the clone
284  * driver is no longer required after open.
285  *
286  * When SDIPSET is set and s_dip is NULL, the vnode has an association with
287  * the driver even though there is currently no association with a specific
288  * hardware instance.
289  */
290 void
291 spec_assoc_vp_with_devi(struct vnode *vp, dev_info_t *dip)
292 {
293 	struct snode	*csp;
294 	dev_info_t	*olddip;
295 
296 	ASSERT(vp);
297 
298 	/*
299 	 * Don't establish a NULL association for a vnode associated with the
300 	 * clone driver.  The qassociate(, -1) call from a streams driver's
301 	 * open implementation to indicate support for qassociate has the
302 	 * side-effect of this type of spec_assoc_vp_with_devi call. This
303 	 * call should not change the the association of the pre-clone
304 	 * vnode associated with the clone driver, the post-clone newdev
305 	 * association will be established later by spec_clone().
306 	 */
307 	if ((dip == NULL) && (getmajor(vp->v_rdev) == clone_major))
308 		return;
309 
310 	/* hold the new */
311 	if (dip)
312 		e_ddi_hold_devi(dip);
313 
314 	csp = VTOS(VTOS(vp)->s_commonvp);
315 	mutex_enter(&csp->s_lock);
316 	olddip = csp->s_dip;
317 	csp->s_dip = dip;
318 	csp->s_flag |= SDIPSET;
319 
320 	/* If association changes then invalidate cached size */
321 	if (olddip != dip)
322 		csp->s_flag &= ~SSIZEVALID;
323 	mutex_exit(&csp->s_lock);
324 
325 	/* release the old */
326 	if (olddip)
327 		ddi_release_devi(olddip);
328 }
329 
330 /*
331  * Return the held dip associated with the specified snode.
332  */
333 dev_info_t *
334 spec_hold_devi_by_vp(struct vnode *vp)
335 {
336 	struct snode	*csp;
337 	dev_info_t	*dip;
338 
339 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
340 
341 	csp = VTOS(VTOS(vp)->s_commonvp);
342 	dip = csp->s_dip;
343 	if (dip)
344 		e_ddi_hold_devi(dip);
345 	return (dip);
346 }
347 
348 /*
349  * Find a special vnode that refers to the given device
350  * of the given type.  Never return a "common" vnode.
351  * Return NULL if a special vnode does not exist.
352  * HOLD the vnode before returning it.
353  */
354 struct vnode *
355 specfind(dev_t dev, vtype_t type)
356 {
357 	struct snode *st;
358 	struct vnode *nvp;
359 
360 	mutex_enter(&stable_lock);
361 	st = stable[STABLEHASH(dev)];
362 	while (st != NULL) {
363 		if (st->s_dev == dev) {
364 			nvp = STOV(st);
365 			if (nvp->v_type == type && st->s_commonvp != nvp) {
366 				VN_HOLD(nvp);
367 				mutex_exit(&stable_lock);
368 				return (nvp);
369 			}
370 		}
371 		st = st->s_next;
372 	}
373 	mutex_exit(&stable_lock);
374 	return (NULL);
375 }
376 
377 /*
378  * Loop through the snode cache looking for snodes referencing dip.
379  *
380  * This function determines if a devinfo node is "BUSY" from the perspective
381  * of having an active vnode associated with the device, which represents a
382  * dependency on the device's services.  This function is needed because a
383  * devinfo node can have a non-zero devi_ref and still NOT be "BUSY" when,
384  * for instance, the framework is manipulating the node (has an open
385  * ndi_hold_devi).
386  *
387  * Returns:
388  *	DEVI_REFERENCED		- if dip is referenced
389  *	DEVI_NOT_REFERENCED	- if dip is not referenced
390  */
391 int
392 devi_stillreferenced(dev_info_t *dip)
393 {
394 	struct snode	*sp;
395 	int		i;
396 
397 	/* if no hold then there can't be an snode with s_dip == dip */
398 	if (e_ddi_devi_holdcnt(dip) == 0)
399 		return (DEVI_NOT_REFERENCED);
400 
401 	mutex_enter(&stable_lock);
402 	for (i = 0; i < STABLESIZE; i++) {
403 		for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
404 			if (sp->s_dip == dip) {
405 				mutex_exit(&stable_lock);
406 				return (DEVI_REFERENCED);
407 			}
408 		}
409 	}
410 	mutex_exit(&stable_lock);
411 	return (DEVI_NOT_REFERENCED);
412 }
413 
414 /*
415  * Given an snode, returns the open count and the dip
416  * associated with that snode
417  * Assumes the caller holds the approriate locks
418  * to prevent snode and/or dip from going away.
419  * Returns:
420  *	-1	No associated dip
421  *	>= 0	Number of opens.
422  */
423 int
424 spec_devi_open_count(struct snode *sp, dev_info_t **dipp)
425 {
426 	dev_info_t *dip;
427 	uint_t count;
428 	struct vnode *vp;
429 
430 	ASSERT(sp);
431 	ASSERT(dipp);
432 
433 	vp = STOV(sp);
434 
435 	*dipp = NULL;
436 
437 	/*
438 	 * We are only interested in common snodes. Only common snodes
439 	 * get their s_count fields bumped up on opens.
440 	 */
441 	if (sp->s_commonvp != vp || (dip = sp->s_dip) == NULL)
442 		return (-1);
443 
444 	mutex_enter(&sp->s_lock);
445 	count = sp->s_count + sp->s_mapcnt;
446 	if (sp->s_flag & SLOCKED)
447 		count++;
448 	mutex_exit(&sp->s_lock);
449 
450 	*dipp = dip;
451 
452 	return (count);
453 }
454 
455 /*
456  * Given a device vnode, return the common
457  * vnode associated with it.
458  */
459 struct vnode *
460 common_specvp(struct vnode *vp)
461 {
462 	struct snode *sp;
463 
464 	if ((vp->v_type != VBLK) && (vp->v_type != VCHR) ||
465 	    !vn_matchops(vp, spec_getvnodeops()))
466 		return (vp);
467 	sp = VTOS(vp);
468 	return (sp->s_commonvp);
469 }
470 
471 /*
472  * Returns a special vnode for the given dev.  The vnode is the
473  * one which is "common" to all the snodes which represent the
474  * same device.
475  * Similar to commonvp() but doesn't acquire the stable_lock, and
476  * may use a pre-allocated snode provided by caller.
477  */
478 static struct vnode *
479 get_cvp(
480 	dev_t		dev,
481 	vtype_t		type,
482 	struct snode	*nsp,		/* pre-allocated snode */
483 	int		*used_nsp)	/* flag indicating if we use nsp */
484 {
485 	struct snode *sp;
486 	struct vnode *svp;
487 
488 	ASSERT(MUTEX_HELD(&stable_lock));
489 	if ((sp = sfind(dev, type, NULL)) == NULL) {
490 		sp = nsp;		/* Use pre-allocated snode */
491 		*used_nsp = 1;		/* return value */
492 		svp = STOV(sp);
493 
494 		sp->s_realvp	= NULL;
495 		sp->s_commonvp	= svp;		/* points to itself */
496 		sp->s_dev	= dev;
497 		sp->s_dip	= NULL;
498 		sp->s_nextr	= NULL;
499 		sp->s_list	= NULL;
500 		sp->s_plcy	= NULL;
501 		sp->s_size	= UNKNOWN_SIZE;
502 		sp->s_flag	= 0;
503 		sp->s_fsid	= specdev;
504 		sp->s_atime	= 0;
505 		sp->s_mtime	= 0;
506 		sp->s_ctime	= 0;
507 		sp->s_count	= 0;
508 		sp->s_mapcnt	= 0;
509 
510 		vn_reinit(svp);
511 		svp->v_vfsp	= &spec_vfs;
512 		svp->v_type	= type;
513 		svp->v_rdev	= dev;
514 		vn_exists(svp);
515 		sinsert(sp);
516 	} else
517 		*used_nsp = 0;
518 	return (STOV(sp));
519 }
520 
521 /*
522  * Returns a special vnode for the given dev.  The vnode is the
523  * one which is "common" to all the snodes which represent the
524  * same device.  For use ONLY by SPECFS.
525  */
526 struct vnode *
527 commonvp(dev_t dev, vtype_t type)
528 {
529 	struct snode *sp, *nsp;
530 	struct vnode *svp;
531 
532 	/* Pre-allocate snode in case we might block */
533 	nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
534 
535 	mutex_enter(&stable_lock);
536 	if ((sp = sfind(dev, type, NULL)) == NULL) {
537 		sp = nsp;		/* Use pre-alloced snode */
538 		svp = STOV(sp);
539 
540 		sp->s_realvp	= NULL;
541 		sp->s_commonvp	= svp;		/* points to itself */
542 		sp->s_dev	= dev;
543 		sp->s_dip	= NULL;
544 		sp->s_nextr	= NULL;
545 		sp->s_list	= NULL;
546 		sp->s_plcy	= NULL;
547 		sp->s_size	= UNKNOWN_SIZE;
548 		sp->s_flag	= 0;
549 		sp->s_fsid	= specdev;
550 		sp->s_atime	= 0;
551 		sp->s_mtime	= 0;
552 		sp->s_ctime	= 0;
553 		sp->s_count	= 0;
554 		sp->s_mapcnt	= 0;
555 
556 		vn_reinit(svp);
557 		svp->v_vfsp	= &spec_vfs;
558 		svp->v_type	= type;
559 		svp->v_rdev	= dev;
560 		vn_exists(svp);
561 		sinsert(sp);
562 		mutex_exit(&stable_lock);
563 	} else {
564 		mutex_exit(&stable_lock);
565 		/* Didn't need the pre-allocated snode */
566 		kmem_cache_free(snode_cache, nsp);
567 	}
568 	return (STOV(sp));
569 }
570 
571 /*
572  * Snode lookup stuff.
573  * These routines maintain a table of snodes hashed by dev so
574  * that the snode for an dev can be found if it already exists.
575  */
576 struct snode *stable[STABLESIZE];
577 int		stablesz = STABLESIZE;
578 kmutex_t	stable_lock;
579 
580 /*
581  * Put a snode in the table.
582  */
583 static void
584 sinsert(struct snode *sp)
585 {
586 	ASSERT(MUTEX_HELD(&stable_lock));
587 	sp->s_next = stable[STABLEHASH(sp->s_dev)];
588 	stable[STABLEHASH(sp->s_dev)] = sp;
589 }
590 
591 /*
592  * Remove an snode from the hash table.
593  * The realvp is not released here because spec_inactive() still
594  * needs it to do a spec_fsync().
595  */
596 void
597 sdelete(struct snode *sp)
598 {
599 	struct snode *st;
600 	struct snode *stprev = NULL;
601 
602 	ASSERT(MUTEX_HELD(&stable_lock));
603 	st = stable[STABLEHASH(sp->s_dev)];
604 	while (st != NULL) {
605 		if (st == sp) {
606 			if (stprev == NULL)
607 				stable[STABLEHASH(sp->s_dev)] = st->s_next;
608 			else
609 				stprev->s_next = st->s_next;
610 			break;
611 		}
612 		stprev = st;
613 		st = st->s_next;
614 	}
615 }
616 
617 /*
618  * Lookup an snode by <dev, type, vp>.
619  * ONLY looks for snodes with non-NULL s_realvp members and
620  * common snodes (with s_commonvp pointing to its vnode).
621  *
622  * If vp is NULL, only return commonvp. Otherwise return
623  * shadow vp with both shadow and common vp's VN_HELD.
624  */
625 static struct snode *
626 sfind(
627 	dev_t	dev,
628 	vtype_t	type,
629 	struct vnode *vp)
630 {
631 	struct snode *st;
632 	struct vnode *svp;
633 
634 	ASSERT(MUTEX_HELD(&stable_lock));
635 	st = stable[STABLEHASH(dev)];
636 	while (st != NULL) {
637 		svp = STOV(st);
638 		if (st->s_dev == dev && svp->v_type == type &&
639 		    VN_CMP(st->s_realvp, vp) &&
640 		    (vp != NULL || st->s_commonvp == svp) &&
641 		    (vp == NULL || st->s_realvp->v_vfsp == vp->v_vfsp)) {
642 			VN_HOLD(svp);
643 			return (st);
644 		}
645 		st = st->s_next;
646 	}
647 	return (NULL);
648 }
649 
650 /*
651  * Mark the accessed, updated, or changed times in an snode
652  * with the current time.
653  */
654 void
655 smark(struct snode *sp, int flag)
656 {
657 	time_t	now = gethrestime_sec();
658 
659 	/* check for change to avoid unnecessary locking */
660 	ASSERT((flag & ~(SACC|SUPD|SCHG)) == 0);
661 	if (((flag & sp->s_flag) != flag) ||
662 	    ((flag & SACC) && (sp->s_atime != now)) ||
663 	    ((flag & SUPD) && (sp->s_mtime != now)) ||
664 	    ((flag & SCHG) && (sp->s_ctime != now))) {
665 		/* lock and update */
666 		mutex_enter(&sp->s_lock);
667 		sp->s_flag |= flag;
668 		if (flag & SACC)
669 			sp->s_atime = now;
670 		if (flag & SUPD)
671 			sp->s_mtime = now;
672 		if (flag & SCHG)
673 			sp->s_ctime = now;
674 		mutex_exit(&sp->s_lock);
675 	}
676 }
677 
678 /*
679  * Return the maximum file offset permitted for this device.
680  * -1 means unrestricted.  SLOFFSET is associated with D_64BIT.
681  *
682  * On a 32-bit kernel this will limit:
683  *   o	D_64BIT devices to SPEC_MAXOFFSET_T.
684  *   o	non-D_64BIT character drivers to a 32-bit offset (MAXOFF_T).
685  */
686 offset_t
687 spec_maxoffset(struct vnode *vp)
688 {
689 	struct snode *sp = VTOS(vp);
690 	struct snode *csp = VTOS(sp->s_commonvp);
691 
692 	if (STREAMSTAB(getmajor(sp->s_dev)))
693 		return ((offset_t)-1);
694 	else if (csp->s_flag & SANYOFFSET)	/* D_U64BIT */
695 		return ((offset_t)-1);
696 #ifdef _ILP32
697 	if (csp->s_flag & SLOFFSET)		/* D_64BIT */
698 		return (SPEC_MAXOFFSET_T);
699 #endif	/* _ILP32 */
700 	return (MAXOFF_T);
701 }
702 
703 /*ARGSUSED*/
704 static int
705 snode_constructor(void *buf, void *cdrarg, int kmflags)
706 {
707 	struct snode *sp = buf;
708 	struct vnode *vp;
709 
710 	vp = vn_alloc(KM_SLEEP);
711 
712 	sp->s_vnode = vp;
713 
714 	vn_setops(vp, spec_getvnodeops());
715 	vp->v_data = (caddr_t)sp;
716 
717 	mutex_init(&sp->s_lock, NULL, MUTEX_DEFAULT, NULL);
718 	cv_init(&sp->s_cv, NULL, CV_DEFAULT, NULL);
719 	return (0);
720 }
721 
722 /*ARGSUSED1*/
723 static void
724 snode_destructor(void *buf, void *cdrarg)
725 {
726 	struct snode *sp = buf;
727 	struct vnode *vp = STOV(sp);
728 
729 	mutex_destroy(&sp->s_lock);
730 	cv_destroy(&sp->s_cv);
731 
732 	vn_free(vp);
733 }
734 
735 
736 int
737 specinit(int fstype, char *name)
738 {
739 	static const fs_operation_def_t spec_vfsops_template[] = {
740 		VFSNAME_SYNC, { .vfs_sync = spec_sync },
741 		NULL, NULL
742 	};
743 	extern struct vnodeops *spec_vnodeops;
744 	extern const fs_operation_def_t spec_vnodeops_template[];
745 	struct vfsops *spec_vfsops;
746 	int error;
747 	dev_t dev;
748 
749 	/*
750 	 * Associate vfs and vnode operations.
751 	 */
752 	error = vfs_setfsops(fstype, spec_vfsops_template, &spec_vfsops);
753 	if (error != 0) {
754 		cmn_err(CE_WARN, "specinit: bad vfs ops template");
755 		return (error);
756 	}
757 
758 	error = vn_make_ops(name, spec_vnodeops_template, &spec_vnodeops);
759 	if (error != 0) {
760 		(void) vfs_freevfsops_by_type(fstype);
761 		cmn_err(CE_WARN, "specinit: bad vnode ops template");
762 		return (error);
763 	}
764 
765 	mutex_init(&stable_lock, NULL, MUTEX_DEFAULT, NULL);
766 	mutex_init(&spec_syncbusy, NULL, MUTEX_DEFAULT, NULL);
767 
768 	/*
769 	 * Create snode cache
770 	 */
771 	snode_cache = kmem_cache_create("snode_cache", sizeof (struct snode),
772 		0, snode_constructor, snode_destructor, NULL, NULL, NULL, 0);
773 
774 	/*
775 	 * Associate vfs operations with spec_vfs
776 	 */
777 	VFS_INIT(&spec_vfs, spec_vfsops, (caddr_t)NULL);
778 	if ((dev = getudev()) == -1)
779 		dev = 0;
780 	specdev = makedevice(dev, 0);
781 	return (0);
782 }
783 
784 int
785 device_close(struct vnode *vp, int flag, struct cred *cr)
786 {
787 	struct snode *sp = VTOS(vp);
788 	enum vtype type = vp->v_type;
789 	struct vnode *cvp;
790 	dev_t dev;
791 	int error;
792 
793 	dev = sp->s_dev;
794 	cvp = sp->s_commonvp;
795 
796 	switch (type) {
797 
798 	case VCHR:
799 		if (STREAMSTAB(getmajor(dev))) {
800 			if (cvp->v_stream != NULL)
801 				error = strclose(cvp, flag, cr);
802 			vp->v_stream = NULL;
803 		} else
804 			error = dev_close(dev, flag, OTYP_CHR, cr);
805 		break;
806 
807 	case VBLK:
808 		/*
809 		 * On last close a block device we must
810 		 * invalidate any in-core blocks so that we
811 		 * can, for example, change floppy disks.
812 		 */
813 		(void) spec_putpage(cvp, (offset_t)0,
814 		    (size_t)0, B_INVAL|B_FORCE, cr);
815 		bflush(dev);
816 		binval(dev);
817 		error = dev_close(dev, flag, OTYP_BLK, cr);
818 		break;
819 	default:
820 		panic("device_close: not a device");
821 		/*NOTREACHED*/
822 	}
823 
824 	return (error);
825 }
826 
827 struct vnode *
828 makectty(vnode_t *ovp)
829 {
830 	vnode_t *vp;
831 
832 	if (vp = makespecvp(ovp->v_rdev, VCHR)) {
833 		struct snode *sp;
834 		struct snode *csp;
835 		struct vnode *cvp;
836 
837 		sp = VTOS(vp);
838 		cvp = sp->s_commonvp;
839 		csp = VTOS(cvp);
840 		mutex_enter(&csp->s_lock);
841 		csp->s_count++;
842 		mutex_exit(&csp->s_lock);
843 	}
844 
845 	return (vp);
846 }
847 
848 void
849 spec_snode_walk(int (*callback)(struct snode *sp, void *arg), void *arg)
850 {
851 	struct snode	*sp;
852 	int		i;
853 
854 	ASSERT(callback);
855 
856 	mutex_enter(&stable_lock);
857 	for (i = 0; i < STABLESIZE; i++) {
858 		for (sp = stable[i]; sp; sp = sp->s_next) {
859 			if (callback(sp, arg) != DDI_WALK_CONTINUE)
860 				goto out;
861 		}
862 	}
863 out:
864 	mutex_exit(&stable_lock);
865 }
866 
867 int
868 spec_is_clone(vnode_t *vp)
869 {
870 	struct snode *sp;
871 
872 	if (vn_matchops(vp, spec_getvnodeops())) {
873 		sp = VTOS(vp);
874 		return ((sp->s_flag & SCLONE) ? 1 : 0);
875 	}
876 
877 	return (0);
878 }
879 
880 int
881 spec_is_selfclone(vnode_t *vp)
882 {
883 	struct snode *sp;
884 
885 	if (vn_matchops(vp, spec_getvnodeops())) {
886 		sp = VTOS(vp);
887 		return ((sp->s_flag & SSELFCLONE) ? 1 : 0);
888 	}
889 
890 	return (0);
891 }
892