xref: /illumos-gate/usr/src/uts/common/fs/specfs/specsubr.c (revision c29070920f8cf285c4cfdd9281be9f0f4e697156)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 /*
30  * University Copyright- Copyright (c) 1982, 1986, 1988
31  * The Regents of the University of California
32  * All Rights Reserved
33  *
34  * University Acknowledgment- Portions of this document are derived from
35  * software developed by the University of California, Berkeley, and its
36  * contributors.
37  */
38 
39 
40 #pragma ident	"%Z%%M%	%I%	%E% SMI"
41 
42 #include <sys/types.h>
43 #include <sys/t_lock.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/buf.h>
47 #include <sys/conf.h>
48 #include <sys/cred.h>
49 #include <sys/kmem.h>
50 #include <sys/sysmacros.h>
51 #include <sys/vfs.h>
52 #include <sys/vnode.h>
53 #include <sys/fs/snode.h>
54 #include <sys/fs/fifonode.h>
55 #include <sys/debug.h>
56 #include <sys/errno.h>
57 #include <sys/time.h>
58 #include <sys/file.h>
59 #include <sys/open.h>
60 #include <sys/user.h>
61 #include <sys/termios.h>
62 #include <sys/stream.h>
63 #include <sys/strsubr.h>
64 #include <sys/autoconf.h>
65 #include <sys/esunddi.h>
66 #include <sys/flock.h>
67 #include <sys/modctl.h>
68 
69 struct vfs spec_vfs;
70 static dev_t specdev;
71 struct kmem_cache *snode_cache;
72 
73 static struct snode *sfind(dev_t, vtype_t, struct vnode *);
74 static struct vnode *get_cvp(dev_t, vtype_t, struct snode *, int *);
75 static void sinsert(struct snode *);
76 
77 struct vnode *
78 specvp_devfs(
79 	struct vnode	*realvp,
80 	dev_t		dev,
81 	vtype_t		vtyp,
82 	struct cred	*cr,
83 	dev_info_t	*dip)
84 {
85 	struct vnode	*vp;
86 
87 	ASSERT(realvp && dip);
88 	vp = specvp(realvp, dev, vtyp, cr);
89 	ASSERT(vp);
90 
91 	/* associate a dip hold with the common snode's s_dip pointer */
92 	spec_assoc_vp_with_devi(vp, dip);
93 	return (vp);
94 }
95 
96 /*
97  * Return a shadow special vnode for the given dev.
98  * If no snode exists for this dev create one and put it
99  * in a table hashed by <dev, realvp>.  If the snode for
100  * this dev is already in the table return it (ref count is
101  * incremented by sfind).  The snode will be flushed from the
102  * table when spec_inactive calls sdelete.
103  *
104  * The fsid is inherited from the real vnode so that clones
105  * can be found.
106  *
107  */
108 struct vnode *
109 specvp(
110 	struct vnode	*vp,
111 	dev_t		dev,
112 	vtype_t		type,
113 	struct cred	*cr)
114 {
115 	struct snode *sp;
116 	struct snode *nsp;
117 	struct snode *csp;
118 	struct vnode *svp;
119 	struct vattr va;
120 	int	rc;
121 	int	used_csp = 0;		/* Did we use pre-allocated csp */
122 
123 	if (vp == NULL)
124 		return (NULL);
125 	if (vp->v_type == VFIFO)
126 		return (fifovp(vp, cr));
127 
128 	ASSERT(vp->v_type == type);
129 	ASSERT(vp->v_rdev == dev);
130 
131 	/*
132 	 * Pre-allocate snodes before holding any locks in case we block
133 	 */
134 	nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
135 	csp = kmem_cache_alloc(snode_cache, KM_SLEEP);
136 
137 	/*
138 	 * Get the time attributes outside of the stable lock since
139 	 * this operation may block. Unfortunately, it may not have
140 	 * been required if the snode is in the cache.
141 	 */
142 	va.va_mask = AT_FSID | AT_TIMES;
143 	rc = VOP_GETATTR(vp, &va, 0, cr);	/* XXX may block! */
144 
145 	mutex_enter(&stable_lock);
146 	if ((sp = sfind(dev, type, vp)) == NULL) {
147 		struct vnode *cvp;
148 
149 		sp = nsp;	/* Use pre-allocated snode */
150 		svp = STOV(sp);
151 
152 		sp->s_realvp	= vp;
153 		VN_HOLD(vp);
154 		sp->s_commonvp	= NULL;
155 		sp->s_dev	= dev;
156 		sp->s_dip	= NULL;
157 		sp->s_nextr	= NULL;
158 		sp->s_list	= NULL;
159 		sp->s_plcy	= NULL;
160 		sp->s_size	= 0;
161 		sp->s_flag	= 0;
162 		if (rc == 0) {
163 			/*
164 			 * Set times in snode to those in the vnode.
165 			 */
166 			sp->s_fsid = va.va_fsid;
167 			sp->s_atime = va.va_atime.tv_sec;
168 			sp->s_mtime = va.va_mtime.tv_sec;
169 			sp->s_ctime = va.va_ctime.tv_sec;
170 		} else {
171 			sp->s_fsid = specdev;
172 			sp->s_atime = 0;
173 			sp->s_mtime = 0;
174 			sp->s_ctime = 0;
175 		}
176 		sp->s_count	= 0;
177 		sp->s_mapcnt	= 0;
178 
179 		vn_reinit(svp);
180 		svp->v_flag	= (vp->v_flag & VROOT);
181 		svp->v_vfsp	= vp->v_vfsp;
182 		VFS_HOLD(svp->v_vfsp);
183 		svp->v_type	= type;
184 		svp->v_rdev	= dev;
185 		(void) vn_copypath(vp, svp);
186 		if (type == VBLK || type == VCHR) {
187 			cvp = get_cvp(dev, type, csp, &used_csp);
188 			svp->v_stream = cvp->v_stream;
189 
190 			sp->s_commonvp = cvp;
191 		}
192 		vn_exists(svp);
193 		sinsert(sp);
194 		mutex_exit(&stable_lock);
195 		if (used_csp == 0) {
196 			/* Didn't use pre-allocated snode so free it */
197 			kmem_cache_free(snode_cache, csp);
198 		}
199 	} else {
200 		mutex_exit(&stable_lock);
201 		/* free unused snode memory */
202 		kmem_cache_free(snode_cache, nsp);
203 		kmem_cache_free(snode_cache, csp);
204 	}
205 	return (STOV(sp));
206 }
207 
208 /*
209  * Return a special vnode for the given dev; no vnode is supplied
210  * for it to shadow.  Always create a new snode and put it in the
211  * table hashed by <dev, NULL>.  The snode will be flushed from the
212  * table when spec_inactive() calls sdelete().  The association of
213  * this node with a attached instance of hardware is not made until
214  * spec_open time.
215  *
216  * N.B. Assumes caller takes on responsibility of making sure no one
217  * else is creating a snode for (dev, type) at this time.
218  */
219 struct vnode *
220 makespecvp(dev_t dev, vtype_t type)
221 {
222 	struct snode *sp;
223 	struct vnode *svp, *cvp;
224 	time_t now;
225 
226 	sp = kmem_cache_alloc(snode_cache, KM_SLEEP);
227 	svp = STOV(sp);
228 	cvp = commonvp(dev, type);
229 	now = gethrestime_sec();
230 
231 	sp->s_realvp	= NULL;
232 	sp->s_commonvp	= cvp;
233 	sp->s_dev	= dev;
234 	sp->s_dip	= NULL;
235 	sp->s_nextr	= NULL;
236 	sp->s_list	= NULL;
237 	sp->s_plcy	= NULL;
238 	sp->s_size	= 0;
239 	sp->s_flag	= 0;
240 	sp->s_fsid	= specdev;
241 	sp->s_atime	= now;
242 	sp->s_mtime	= now;
243 	sp->s_ctime	= now;
244 	sp->s_count	= 0;
245 	sp->s_mapcnt	= 0;
246 
247 	vn_reinit(svp);
248 	svp->v_vfsp	= &spec_vfs;
249 	svp->v_stream	= cvp->v_stream;
250 	svp->v_type	= type;
251 	svp->v_rdev	= dev;
252 
253 	vn_exists(svp);
254 	mutex_enter(&stable_lock);
255 	sinsert(sp);
256 	mutex_exit(&stable_lock);
257 
258 	return (svp);
259 }
260 
261 /*
262  * Associate the common snode with a devinfo node.  This is called from:
263  *
264  *   1) specvp_devfs to associate a specfs node with the dip attached
265  *	by devfs.
266  *
267  *   2) spec_open after path reconstruction and attach.
268  *
269  *   3) From dacf processing to associate a makespecvp node with
270  *	the dip that dacf postattach processing is being performed on.
271  *	This association is made prior to open to avoid recursion issues.
272  *
273  *   4) From ddi_assoc_queue_with_devi to change vnode association as part of
274  *	DL_ATTACH/DL_DETACH processing (SDIPSET already set).  The call
275  *	from ddi_assoc_queue_with_devi may specify a NULL dip.
276  *
277  * We put an extra hold on the devinfo node passed in as we establish it as
278  * the new s_dip pointer.  Any hold associated with the prior s_dip pointer
279  * is released. The new hold will stay active until another call to
280  * spec_assoc_vp_with_devi or until the common snode is destroyed by
281  * spec_inactive after the last VN_RELE of the common node. This devinfo hold
282  * transfers across a clone open except in the clone_dev case, where the clone
283  * driver is no longer required after open.
284  *
285  * When SDIPSET is set and s_dip is NULL, the vnode has an association with
286  * the driver even though there is currently no association with a specific
287  * hardware instance.
288  */
289 void
290 spec_assoc_vp_with_devi(struct vnode *vp, dev_info_t *dip)
291 {
292 	struct snode	*csp;
293 	dev_info_t	*olddip;
294 
295 	ASSERT(vp);
296 
297 	/*
298 	 * Don't establish a NULL association for a vnode associated with the
299 	 * clone driver.  The qassociate(, -1) call from a streams driver's
300 	 * open implementation to indicate support for qassociate has the
301 	 * side-effect of this type of spec_assoc_vp_with_devi call. This
302 	 * call should not change the the association of the pre-clone
303 	 * vnode associated with the clone driver, the post-clone newdev
304 	 * association will be established later by spec_clone().
305 	 */
306 	if ((dip == NULL) && (getmajor(vp->v_rdev) == clone_major))
307 		return;
308 
309 	/* hold the new */
310 	if (dip)
311 		e_ddi_hold_devi(dip);
312 
313 	csp = VTOS(VTOS(vp)->s_commonvp);
314 	mutex_enter(&csp->s_lock);
315 	olddip = csp->s_dip;
316 	csp->s_dip = dip;
317 	csp->s_flag |= SDIPSET;
318 
319 	/* If association changes then invalidate cached size */
320 	if (olddip != dip)
321 		csp->s_flag &= ~SSIZEVALID;
322 	mutex_exit(&csp->s_lock);
323 
324 	/* release the old */
325 	if (olddip)
326 		ddi_release_devi(olddip);
327 }
328 
329 /*
330  * Return the held dip associated with the specified snode.
331  */
332 dev_info_t *
333 spec_hold_devi_by_vp(struct vnode *vp)
334 {
335 	struct snode	*csp;
336 	dev_info_t	*dip;
337 
338 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
339 
340 	csp = VTOS(VTOS(vp)->s_commonvp);
341 	dip = csp->s_dip;
342 	if (dip)
343 		e_ddi_hold_devi(dip);
344 	return (dip);
345 }
346 
347 /*
348  * Find a special vnode that refers to the given device
349  * of the given type.  Never return a "common" vnode.
350  * Return NULL if a special vnode does not exist.
351  * HOLD the vnode before returning it.
352  */
353 struct vnode *
354 specfind(dev_t dev, vtype_t type)
355 {
356 	struct snode *st;
357 	struct vnode *nvp;
358 
359 	mutex_enter(&stable_lock);
360 	st = stable[STABLEHASH(dev)];
361 	while (st != NULL) {
362 		if (st->s_dev == dev) {
363 			nvp = STOV(st);
364 			if (nvp->v_type == type && st->s_commonvp != nvp) {
365 				VN_HOLD(nvp);
366 				mutex_exit(&stable_lock);
367 				return (nvp);
368 			}
369 		}
370 		st = st->s_next;
371 	}
372 	mutex_exit(&stable_lock);
373 	return (NULL);
374 }
375 
376 /*
377  * Loop through the snode cache looking for snodes referencing dip.
378  *
379  * This function determines if a devinfo node is "BUSY" from the perspective
380  * of having an active vnode associated with the device, which represents a
381  * dependency on the device's services.  This function is needed because a
382  * devinfo node can have a non-zero devi_ref and still NOT be "BUSY" when,
383  * for instance, the framework is manipulating the node (has an open
384  * ndi_hold_devi).
385  *
386  * Returns:
387  *	DEVI_REFERENCED		- if dip is referenced
388  *	DEVI_NOT_REFERENCED	- if dip is not referenced
389  */
390 int
391 devi_stillreferenced(dev_info_t *dip)
392 {
393 	struct snode	*sp;
394 	int		i;
395 
396 	/* if no hold then there can't be an snode with s_dip == dip */
397 	if (e_ddi_devi_holdcnt(dip) == 0)
398 		return (DEVI_NOT_REFERENCED);
399 
400 	mutex_enter(&stable_lock);
401 	for (i = 0; i < STABLESIZE; i++) {
402 		for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
403 			if (sp->s_dip == dip) {
404 				mutex_exit(&stable_lock);
405 				return (DEVI_REFERENCED);
406 			}
407 		}
408 	}
409 	mutex_exit(&stable_lock);
410 	return (DEVI_NOT_REFERENCED);
411 }
412 
413 /*
414  * Given an snode, returns the open count and the dip
415  * associated with that snode
416  * Assumes the caller holds the approriate locks
417  * to prevent snode and/or dip from going away.
418  * Returns:
419  *	-1	No associated dip
420  *	>= 0	Number of opens.
421  */
422 int
423 spec_devi_open_count(struct snode *sp, dev_info_t **dipp)
424 {
425 	dev_info_t *dip;
426 	uint_t count;
427 	struct vnode *vp;
428 
429 	ASSERT(sp);
430 	ASSERT(dipp);
431 
432 	vp = STOV(sp);
433 
434 	*dipp = NULL;
435 
436 	/*
437 	 * We are only interested in common snodes. Only common snodes
438 	 * get their s_count fields bumped up on opens.
439 	 */
440 	if (sp->s_commonvp != vp || (dip = sp->s_dip) == NULL)
441 		return (-1);
442 
443 	mutex_enter(&sp->s_lock);
444 	count = sp->s_count + sp->s_mapcnt;
445 	if (sp->s_flag & SLOCKED)
446 		count++;
447 	mutex_exit(&sp->s_lock);
448 
449 	*dipp = dip;
450 
451 	return (count);
452 }
453 
454 /*
455  * Given a device vnode, return the common
456  * vnode associated with it.
457  */
458 struct vnode *
459 common_specvp(struct vnode *vp)
460 {
461 	struct snode *sp;
462 
463 	if ((vp->v_type != VBLK) && (vp->v_type != VCHR) ||
464 	    !vn_matchops(vp, spec_getvnodeops()))
465 		return (vp);
466 	sp = VTOS(vp);
467 	return (sp->s_commonvp);
468 }
469 
470 /*
471  * Returns a special vnode for the given dev.  The vnode is the
472  * one which is "common" to all the snodes which represent the
473  * same device.
474  * Similar to commonvp() but doesn't acquire the stable_lock, and
475  * may use a pre-allocated snode provided by caller.
476  */
477 static struct vnode *
478 get_cvp(
479 	dev_t		dev,
480 	vtype_t		type,
481 	struct snode	*nsp,		/* pre-allocated snode */
482 	int		*used_nsp)	/* flag indicating if we use nsp */
483 {
484 	struct snode *sp;
485 	struct vnode *svp;
486 
487 	ASSERT(MUTEX_HELD(&stable_lock));
488 	if ((sp = sfind(dev, type, NULL)) == NULL) {
489 		sp = nsp;		/* Use pre-allocated snode */
490 		*used_nsp = 1;		/* return value */
491 		svp = STOV(sp);
492 
493 		sp->s_realvp	= NULL;
494 		sp->s_commonvp	= svp;		/* points to itself */
495 		sp->s_dev	= dev;
496 		sp->s_dip	= NULL;
497 		sp->s_nextr	= NULL;
498 		sp->s_list	= NULL;
499 		sp->s_plcy	= NULL;
500 		sp->s_size	= UNKNOWN_SIZE;
501 		sp->s_flag	= 0;
502 		sp->s_fsid	= specdev;
503 		sp->s_atime	= 0;
504 		sp->s_mtime	= 0;
505 		sp->s_ctime	= 0;
506 		sp->s_count	= 0;
507 		sp->s_mapcnt	= 0;
508 
509 		vn_reinit(svp);
510 		svp->v_vfsp	= &spec_vfs;
511 		svp->v_type	= type;
512 		svp->v_rdev	= dev;
513 		vn_exists(svp);
514 		sinsert(sp);
515 	} else
516 		*used_nsp = 0;
517 	return (STOV(sp));
518 }
519 
520 /*
521  * Returns a special vnode for the given dev.  The vnode is the
522  * one which is "common" to all the snodes which represent the
523  * same device.  For use ONLY by SPECFS.
524  */
525 struct vnode *
526 commonvp(dev_t dev, vtype_t type)
527 {
528 	struct snode *sp, *nsp;
529 	struct vnode *svp;
530 
531 	/* Pre-allocate snode in case we might block */
532 	nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
533 
534 	mutex_enter(&stable_lock);
535 	if ((sp = sfind(dev, type, NULL)) == NULL) {
536 		sp = nsp;		/* Use pre-alloced snode */
537 		svp = STOV(sp);
538 
539 		sp->s_realvp	= NULL;
540 		sp->s_commonvp	= svp;		/* points to itself */
541 		sp->s_dev	= dev;
542 		sp->s_dip	= NULL;
543 		sp->s_nextr	= NULL;
544 		sp->s_list	= NULL;
545 		sp->s_plcy	= NULL;
546 		sp->s_size	= UNKNOWN_SIZE;
547 		sp->s_flag	= 0;
548 		sp->s_fsid	= specdev;
549 		sp->s_atime	= 0;
550 		sp->s_mtime	= 0;
551 		sp->s_ctime	= 0;
552 		sp->s_count	= 0;
553 		sp->s_mapcnt	= 0;
554 
555 		vn_reinit(svp);
556 		svp->v_vfsp	= &spec_vfs;
557 		svp->v_type	= type;
558 		svp->v_rdev	= dev;
559 		vn_exists(svp);
560 		sinsert(sp);
561 		mutex_exit(&stable_lock);
562 	} else {
563 		mutex_exit(&stable_lock);
564 		/* Didn't need the pre-allocated snode */
565 		kmem_cache_free(snode_cache, nsp);
566 	}
567 	return (STOV(sp));
568 }
569 
570 /*
571  * Snode lookup stuff.
572  * These routines maintain a table of snodes hashed by dev so
573  * that the snode for an dev can be found if it already exists.
574  */
575 struct snode *stable[STABLESIZE];
576 int		stablesz = STABLESIZE;
577 kmutex_t	stable_lock;
578 
579 /*
580  * Put a snode in the table.
581  */
582 static void
583 sinsert(struct snode *sp)
584 {
585 	ASSERT(MUTEX_HELD(&stable_lock));
586 	sp->s_next = stable[STABLEHASH(sp->s_dev)];
587 	stable[STABLEHASH(sp->s_dev)] = sp;
588 }
589 
590 /*
591  * Remove an snode from the hash table.
592  * The realvp is not released here because spec_inactive() still
593  * needs it to do a spec_fsync().
594  */
595 void
596 sdelete(struct snode *sp)
597 {
598 	struct snode *st;
599 	struct snode *stprev = NULL;
600 
601 	ASSERT(MUTEX_HELD(&stable_lock));
602 	st = stable[STABLEHASH(sp->s_dev)];
603 	while (st != NULL) {
604 		if (st == sp) {
605 			if (stprev == NULL)
606 				stable[STABLEHASH(sp->s_dev)] = st->s_next;
607 			else
608 				stprev->s_next = st->s_next;
609 			break;
610 		}
611 		stprev = st;
612 		st = st->s_next;
613 	}
614 }
615 
616 /*
617  * Lookup an snode by <dev, type, vp>.
618  * ONLY looks for snodes with non-NULL s_realvp members and
619  * common snodes (with s_commonvp pointing to its vnode).
620  *
621  * If vp is NULL, only return commonvp. Otherwise return
622  * shadow vp with both shadow and common vp's VN_HELD.
623  */
624 static struct snode *
625 sfind(
626 	dev_t	dev,
627 	vtype_t	type,
628 	struct vnode *vp)
629 {
630 	struct snode *st;
631 	struct vnode *svp;
632 
633 	ASSERT(MUTEX_HELD(&stable_lock));
634 	st = stable[STABLEHASH(dev)];
635 	while (st != NULL) {
636 		svp = STOV(st);
637 		if (st->s_dev == dev && svp->v_type == type &&
638 		    VN_CMP(st->s_realvp, vp) &&
639 		    (vp != NULL || st->s_commonvp == svp) &&
640 		    (vp == NULL || st->s_realvp->v_vfsp == vp->v_vfsp)) {
641 			VN_HOLD(svp);
642 			return (st);
643 		}
644 		st = st->s_next;
645 	}
646 	return (NULL);
647 }
648 
649 /*
650  * Mark the accessed, updated, or changed times in an snode
651  * with the current time.
652  */
653 void
654 smark(struct snode *sp, int flag)
655 {
656 	time_t	now = gethrestime_sec();
657 
658 	/* check for change to avoid unnecessary locking */
659 	ASSERT((flag & ~(SACC|SUPD|SCHG)) == 0);
660 	if (((flag & sp->s_flag) != flag) ||
661 	    ((flag & SACC) && (sp->s_atime != now)) ||
662 	    ((flag & SUPD) && (sp->s_mtime != now)) ||
663 	    ((flag & SCHG) && (sp->s_ctime != now))) {
664 		/* lock and update */
665 		mutex_enter(&sp->s_lock);
666 		sp->s_flag |= flag;
667 		if (flag & SACC)
668 			sp->s_atime = now;
669 		if (flag & SUPD)
670 			sp->s_mtime = now;
671 		if (flag & SCHG)
672 			sp->s_ctime = now;
673 		mutex_exit(&sp->s_lock);
674 	}
675 }
676 
677 /*
678  * Return the maximum file offset permitted for this device.
679  * -1 means unrestricted.  SLOFFSET is associated with D_64BIT.
680  *
681  * On a 32-bit kernel this will limit:
682  *   o	D_64BIT devices to SPEC_MAXOFFSET_T.
683  *   o	non-D_64BIT character drivers to a 32-bit offset (MAXOFF_T).
684  */
685 offset_t
686 spec_maxoffset(struct vnode *vp)
687 {
688 	struct snode *sp = VTOS(vp);
689 	struct snode *csp = VTOS(sp->s_commonvp);
690 
691 	if (STREAMSTAB(getmajor(sp->s_dev)))
692 		return ((offset_t)-1);
693 	else if (csp->s_flag & SANYOFFSET)	/* D_U64BIT */
694 		return ((offset_t)-1);
695 #ifdef _ILP32
696 	if (csp->s_flag & SLOFFSET)		/* D_64BIT */
697 		return (SPEC_MAXOFFSET_T);
698 #endif	/* _ILP32 */
699 	return (MAXOFF_T);
700 }
701 
702 /*ARGSUSED*/
703 static int
704 snode_constructor(void *buf, void *cdrarg, int kmflags)
705 {
706 	struct snode *sp = buf;
707 	struct vnode *vp;
708 
709 	vp = vn_alloc(KM_SLEEP);
710 
711 	sp->s_vnode = vp;
712 
713 	vn_setops(vp, spec_getvnodeops());
714 	vp->v_data = (caddr_t)sp;
715 
716 	mutex_init(&sp->s_lock, NULL, MUTEX_DEFAULT, NULL);
717 	cv_init(&sp->s_cv, NULL, CV_DEFAULT, NULL);
718 	return (0);
719 }
720 
721 /*ARGSUSED1*/
722 static void
723 snode_destructor(void *buf, void *cdrarg)
724 {
725 	struct snode *sp = buf;
726 	struct vnode *vp = STOV(sp);
727 
728 	mutex_destroy(&sp->s_lock);
729 	cv_destroy(&sp->s_cv);
730 
731 	vn_free(vp);
732 }
733 
734 
735 int
736 specinit(int fstype, char *name)
737 {
738 	static const fs_operation_def_t spec_vfsops_template[] = {
739 		VFSNAME_SYNC, (fs_generic_func_p) spec_sync,
740 		NULL, NULL
741 	};
742 	extern struct vnodeops *spec_vnodeops;
743 	extern const fs_operation_def_t spec_vnodeops_template[];
744 	struct vfsops *spec_vfsops;
745 	int error;
746 	dev_t dev;
747 
748 	/*
749 	 * Associate vfs and vnode operations.
750 	 */
751 	error = vfs_setfsops(fstype, spec_vfsops_template, &spec_vfsops);
752 	if (error != 0) {
753 		cmn_err(CE_WARN, "specinit: bad vfs ops template");
754 		return (error);
755 	}
756 
757 	error = vn_make_ops(name, spec_vnodeops_template, &spec_vnodeops);
758 	if (error != 0) {
759 		(void) vfs_freevfsops_by_type(fstype);
760 		cmn_err(CE_WARN, "specinit: bad vnode ops template");
761 		return (error);
762 	}
763 
764 	mutex_init(&stable_lock, NULL, MUTEX_DEFAULT, NULL);
765 	mutex_init(&spec_syncbusy, NULL, MUTEX_DEFAULT, NULL);
766 
767 	/*
768 	 * Create snode cache
769 	 */
770 	snode_cache = kmem_cache_create("snode_cache", sizeof (struct snode),
771 		0, snode_constructor, snode_destructor, NULL, NULL, NULL, 0);
772 
773 	/*
774 	 * Associate vfs operations with spec_vfs
775 	 */
776 	VFS_INIT(&spec_vfs, spec_vfsops, (caddr_t)NULL);
777 	if ((dev = getudev()) == -1)
778 		dev = 0;
779 	specdev = makedevice(dev, 0);
780 	return (0);
781 }
782 
783 int
784 device_close(struct vnode *vp, int flag, struct cred *cr)
785 {
786 	struct snode *sp = VTOS(vp);
787 	enum vtype type = vp->v_type;
788 	struct vnode *cvp;
789 	dev_t dev;
790 	int error;
791 
792 	dev = sp->s_dev;
793 	cvp = sp->s_commonvp;
794 
795 	switch (type) {
796 
797 	case VCHR:
798 		if (STREAMSTAB(getmajor(dev))) {
799 			if (cvp->v_stream != NULL)
800 				error = strclose(cvp, flag, cr);
801 			vp->v_stream = NULL;
802 		} else
803 			error = dev_close(dev, flag, OTYP_CHR, cr);
804 		break;
805 
806 	case VBLK:
807 		/*
808 		 * On last close a block device we must
809 		 * invalidate any in-core blocks so that we
810 		 * can, for example, change floppy disks.
811 		 */
812 		(void) spec_putpage(cvp, (offset_t)0,
813 		    (size_t)0, B_INVAL|B_FORCE, cr);
814 		bflush(dev);
815 		binval(dev);
816 		error = dev_close(dev, flag, OTYP_BLK, cr);
817 		break;
818 	default:
819 		panic("device_close: not a device");
820 		/*NOTREACHED*/
821 	}
822 
823 	return (error);
824 }
825 
826 struct vnode *
827 makectty(vnode_t *ovp)
828 {
829 	vnode_t *vp;
830 
831 	if (vp = makespecvp(ovp->v_rdev, VCHR)) {
832 		struct snode *sp;
833 		struct snode *csp;
834 		struct vnode *cvp;
835 
836 		sp = VTOS(vp);
837 		cvp = sp->s_commonvp;
838 		csp = VTOS(cvp);
839 		mutex_enter(&csp->s_lock);
840 		csp->s_count++;
841 		mutex_exit(&csp->s_lock);
842 	}
843 
844 	return (vp);
845 }
846 
847 void
848 spec_snode_walk(int (*callback)(struct snode *sp, void *arg), void *arg)
849 {
850 	struct snode	*sp;
851 	int		i;
852 
853 	ASSERT(callback);
854 
855 	mutex_enter(&stable_lock);
856 	for (i = 0; i < STABLESIZE; i++) {
857 		for (sp = stable[i]; sp; sp = sp->s_next) {
858 			if (callback(sp, arg) != DDI_WALK_CONTINUE)
859 				goto out;
860 		}
861 	}
862 out:
863 	mutex_exit(&stable_lock);
864 }
865 
866 int
867 spec_is_clone(vnode_t *vp)
868 {
869 	struct snode *sp;
870 
871 	if (vn_matchops(vp, spec_getvnodeops())) {
872 		sp = VTOS(vp);
873 		return ((sp->s_flag & SCLONE) ? 1 : 0);
874 	}
875 
876 	return (0);
877 }
878 
879 int
880 spec_is_selfclone(vnode_t *vp)
881 {
882 	struct snode *sp;
883 
884 	if (vn_matchops(vp, spec_getvnodeops())) {
885 		sp = VTOS(vp);
886 		return ((sp->s_flag & SSELFCLONE) ? 1 : 0);
887 	}
888 
889 	return (0);
890 }
891