xref: /titanic_50/usr/src/uts/common/fs/specfs/specsubr.c (revision 03831d35f7499c87d51205817c93e9a8d42c4bae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 /*
31  * University Copyright- Copyright (c) 1982, 1986, 1988
32  * The Regents of the University of California
33  * All Rights Reserved
34  *
35  * University Acknowledgment- Portions of this document are derived from
36  * software developed by the University of California, Berkeley, and its
37  * contributors.
38  */
39 
40 
41 #pragma ident	"%Z%%M%	%I%	%E% SMI"
42 
43 #include <sys/types.h>
44 #include <sys/t_lock.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/buf.h>
48 #include <sys/conf.h>
49 #include <sys/cred.h>
50 #include <sys/kmem.h>
51 #include <sys/sysmacros.h>
52 #include <sys/vfs.h>
53 #include <sys/vnode.h>
54 #include <sys/fs/snode.h>
55 #include <sys/fs/fifonode.h>
56 #include <sys/debug.h>
57 #include <sys/errno.h>
58 #include <sys/time.h>
59 #include <sys/file.h>
60 #include <sys/open.h>
61 #include <sys/user.h>
62 #include <sys/termios.h>
63 #include <sys/stream.h>
64 #include <sys/strsubr.h>
65 #include <sys/autoconf.h>
66 #include <sys/esunddi.h>
67 #include <sys/flock.h>
68 #include <sys/modctl.h>
69 
70 struct vfs spec_vfs;
71 static dev_t specdev;
72 struct kmem_cache *snode_cache;
73 
74 static struct snode *sfind(dev_t, vtype_t, struct vnode *);
75 static struct vnode *get_cvp(dev_t, vtype_t, struct snode *, int *);
76 static void sinsert(struct snode *);
77 
78 struct vnode *
79 specvp_devfs(
80 	struct vnode	*realvp,
81 	dev_t		dev,
82 	vtype_t		vtyp,
83 	struct cred	*cr,
84 	dev_info_t	*dip)
85 {
86 	struct vnode	*vp;
87 
88 	ASSERT(realvp && dip);
89 	vp = specvp(realvp, dev, vtyp, cr);
90 	ASSERT(vp);
91 
92 	/* associate a dip hold with the common snode's s_dip pointer */
93 	spec_assoc_vp_with_devi(vp, dip);
94 	return (vp);
95 }
96 
97 /*
98  * Return a shadow special vnode for the given dev.
99  * If no snode exists for this dev create one and put it
100  * in a table hashed by <dev, realvp>.  If the snode for
101  * this dev is already in the table return it (ref count is
102  * incremented by sfind).  The snode will be flushed from the
103  * table when spec_inactive calls sdelete.
104  *
105  * The fsid is inherited from the real vnode so that clones
106  * can be found.
107  *
108  */
109 struct vnode *
110 specvp(
111 	struct vnode	*vp,
112 	dev_t		dev,
113 	vtype_t		type,
114 	struct cred	*cr)
115 {
116 	struct snode *sp;
117 	struct snode *nsp;
118 	struct snode *csp;
119 	struct vnode *svp;
120 	struct vattr va;
121 	int	rc;
122 	int	used_csp = 0;		/* Did we use pre-allocated csp */
123 
124 	if (vp == NULL)
125 		return (NULL);
126 	if (vp->v_type == VFIFO)
127 		return (fifovp(vp, cr));
128 
129 	ASSERT(vp->v_type == type);
130 	ASSERT(vp->v_rdev == dev);
131 
132 	/*
133 	 * Pre-allocate snodes before holding any locks in case we block
134 	 */
135 	nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
136 	csp = kmem_cache_alloc(snode_cache, KM_SLEEP);
137 
138 	/*
139 	 * Get the time attributes outside of the stable lock since
140 	 * this operation may block. Unfortunately, it may not have
141 	 * been required if the snode is in the cache.
142 	 */
143 	va.va_mask = AT_FSID | AT_TIMES;
144 	rc = VOP_GETATTR(vp, &va, 0, cr);	/* XXX may block! */
145 
146 	mutex_enter(&stable_lock);
147 	if ((sp = sfind(dev, type, vp)) == NULL) {
148 		struct vnode *cvp;
149 
150 		sp = nsp;	/* Use pre-allocated snode */
151 		svp = STOV(sp);
152 
153 		sp->s_realvp	= vp;
154 		VN_HOLD(vp);
155 		sp->s_commonvp	= NULL;
156 		sp->s_dev	= dev;
157 		sp->s_dip	= NULL;
158 		sp->s_nextr	= NULL;
159 		sp->s_list	= NULL;
160 		sp->s_plcy	= NULL;
161 		sp->s_size	= 0;
162 		sp->s_flag	= 0;
163 		if (rc == 0) {
164 			/*
165 			 * Set times in snode to those in the vnode.
166 			 */
167 			sp->s_fsid = va.va_fsid;
168 			sp->s_atime = va.va_atime.tv_sec;
169 			sp->s_mtime = va.va_mtime.tv_sec;
170 			sp->s_ctime = va.va_ctime.tv_sec;
171 		} else {
172 			sp->s_fsid = specdev;
173 			sp->s_atime = 0;
174 			sp->s_mtime = 0;
175 			sp->s_ctime = 0;
176 		}
177 		sp->s_count	= 0;
178 		sp->s_mapcnt	= 0;
179 
180 		vn_reinit(svp);
181 		svp->v_flag	= (vp->v_flag & VROOT);
182 		svp->v_vfsp	= vp->v_vfsp;
183 		VFS_HOLD(svp->v_vfsp);
184 		svp->v_type	= type;
185 		svp->v_rdev	= dev;
186 		(void) vn_copypath(vp, svp);
187 		if (type == VBLK || type == VCHR) {
188 			cvp = get_cvp(dev, type, csp, &used_csp);
189 			svp->v_stream = cvp->v_stream;
190 
191 			sp->s_commonvp = cvp;
192 		}
193 		vn_exists(svp);
194 		sinsert(sp);
195 		mutex_exit(&stable_lock);
196 		if (used_csp == 0) {
197 			/* Didn't use pre-allocated snode so free it */
198 			kmem_cache_free(snode_cache, csp);
199 		}
200 	} else {
201 		mutex_exit(&stable_lock);
202 		/* free unused snode memory */
203 		kmem_cache_free(snode_cache, nsp);
204 		kmem_cache_free(snode_cache, csp);
205 	}
206 	return (STOV(sp));
207 }
208 
209 /*
210  * Return a special vnode for the given dev; no vnode is supplied
211  * for it to shadow.  Always create a new snode and put it in the
212  * table hashed by <dev, NULL>.  The snode will be flushed from the
213  * table when spec_inactive() calls sdelete().  The association of
214  * this node with a attached instance of hardware is not made until
215  * spec_open time.
216  *
217  * N.B. Assumes caller takes on responsibility of making sure no one
218  * else is creating a snode for (dev, type) at this time.
219  */
220 struct vnode *
221 makespecvp(dev_t dev, vtype_t type)
222 {
223 	struct snode *sp;
224 	struct vnode *svp, *cvp;
225 	time_t now;
226 
227 	sp = kmem_cache_alloc(snode_cache, KM_SLEEP);
228 	svp = STOV(sp);
229 	cvp = commonvp(dev, type);
230 	now = gethrestime_sec();
231 
232 	sp->s_realvp	= NULL;
233 	sp->s_commonvp	= cvp;
234 	sp->s_dev	= dev;
235 	sp->s_dip	= NULL;
236 	sp->s_nextr	= NULL;
237 	sp->s_list	= NULL;
238 	sp->s_plcy	= NULL;
239 	sp->s_size	= 0;
240 	sp->s_flag	= 0;
241 	sp->s_fsid	= specdev;
242 	sp->s_atime	= now;
243 	sp->s_mtime	= now;
244 	sp->s_ctime	= now;
245 	sp->s_count	= 0;
246 	sp->s_mapcnt	= 0;
247 
248 	vn_reinit(svp);
249 	svp->v_vfsp	= &spec_vfs;
250 	svp->v_stream	= cvp->v_stream;
251 	svp->v_type	= type;
252 	svp->v_rdev	= dev;
253 
254 	vn_exists(svp);
255 	mutex_enter(&stable_lock);
256 	sinsert(sp);
257 	mutex_exit(&stable_lock);
258 
259 	return (svp);
260 }
261 
262 /*
263  * Associate the common snode with a devinfo node.  This is called from:
264  *
265  *   1) specvp_devfs to associate a specfs node with the dip attached
266  *	by devfs.
267  *
268  *   2) spec_open after path reconstruction and attach.
269  *
270  *   3) From dacf processing to associate a makespecvp node with
271  *	the dip that dacf postattach processing is being performed on.
272  *	This association is made prior to open to avoid recursion issues.
273  *
274  *   4) From ddi_assoc_queue_with_devi to change vnode association as part of
275  *	DL_ATTACH/DL_DETACH processing (SDIPSET already set).  The call
276  *	from ddi_assoc_queue_with_devi may specify a NULL dip.
277  *
278  * We put an extra hold on the devinfo node passed in as we establish it as
279  * the new s_dip pointer.  Any hold associated with the prior s_dip pointer
280  * is released. The new hold will stay active until another call to
281  * spec_assoc_vp_with_devi or until the common snode is destroyed by
282  * spec_inactive after the last VN_RELE of the common node. This devinfo hold
283  * transfers across a clone open except in the clone_dev case, where the clone
284  * driver is no longer required after open.
285  *
286  * When SDIPSET is set and s_dip is NULL, the vnode has an association with
287  * the driver even though there is currently no association with a specific
288  * hardware instance.
289  */
290 void
291 spec_assoc_vp_with_devi(struct vnode *vp, dev_info_t *dip)
292 {
293 	struct snode	*csp;
294 	dev_info_t	*olddip;
295 
296 	ASSERT(vp);
297 
298 	/*
299 	 * Don't establish a NULL association for a vnode associated with the
300 	 * clone driver.  The qassociate(, -1) call from a streams driver's
301 	 * open implementation to indicate support for qassociate has the
302 	 * side-effect of this type of spec_assoc_vp_with_devi call. This
303 	 * call should not change the the association of the pre-clone
304 	 * vnode associated with the clone driver, the post-clone newdev
305 	 * association will be established later by spec_clone().
306 	 */
307 	if ((dip == NULL) && (getmajor(vp->v_rdev) == clone_major))
308 		return;
309 
310 	/* hold the new */
311 	if (dip)
312 		e_ddi_hold_devi(dip);
313 
314 	csp = VTOS(VTOS(vp)->s_commonvp);
315 	mutex_enter(&csp->s_lock);
316 	olddip = csp->s_dip;
317 	csp->s_dip = dip;
318 	csp->s_flag |= SDIPSET;
319 
320 	/* If association changes then invalidate cached size */
321 	if (olddip != dip)
322 		csp->s_flag &= ~SSIZEVALID;
323 	mutex_exit(&csp->s_lock);
324 
325 	/* release the old */
326 	if (olddip)
327 		ddi_release_devi(olddip);
328 }
329 
330 /*
331  * Return the held dip associated with the specified snode.
332  */
333 dev_info_t *
334 spec_hold_devi_by_vp(struct vnode *vp)
335 {
336 	struct snode	*csp;
337 	dev_info_t	*dip;
338 
339 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
340 
341 	csp = VTOS(VTOS(vp)->s_commonvp);
342 	dip = csp->s_dip;
343 	if (dip)
344 		e_ddi_hold_devi(dip);
345 	return (dip);
346 }
347 
348 /*
349  * Find a special vnode that refers to the given device
350  * of the given type.  Never return a "common" vnode.
351  * Return NULL if a special vnode does not exist.
352  * HOLD the vnode before returning it.
353  */
354 struct vnode *
355 specfind(dev_t dev, vtype_t type)
356 {
357 	struct snode *st;
358 	struct vnode *nvp;
359 
360 	mutex_enter(&stable_lock);
361 	st = stable[STABLEHASH(dev)];
362 	while (st != NULL) {
363 		if (st->s_dev == dev) {
364 			nvp = STOV(st);
365 			if (nvp->v_type == type && st->s_commonvp != nvp) {
366 				VN_HOLD(nvp);
367 				mutex_exit(&stable_lock);
368 				return (nvp);
369 			}
370 		}
371 		st = st->s_next;
372 	}
373 	mutex_exit(&stable_lock);
374 	return (NULL);
375 }
376 
377 /*
378  * Loop through the snode cache looking for snodes referencing dip.
379  *
380  * This function determines if a devinfo node is "BUSY" from the perspective
381  * of having an active vnode associated with the device, which represents a
382  * dependency on the device's services.  This function is needed because a
383  * devinfo node can have a non-zero devi_ref and still NOT be "BUSY" when,
384  * for instance, the framework is manipulating the node (has an open
385  * ndi_hold_devi).
386  *
387  * Returns:
388  *	DEVI_REFERENCED		- if dip is referenced
389  *	DEVI_NOT_REFERENCED	- if dip is not referenced
390  */
391 int
392 devi_stillreferenced(dev_info_t *dip)
393 {
394 	struct snode	*sp;
395 	int		i;
396 
397 	/* if no hold then there can't be an snode with s_dip == dip */
398 	if (e_ddi_devi_holdcnt(dip) == 0)
399 		return (DEVI_NOT_REFERENCED);
400 
401 	mutex_enter(&stable_lock);
402 	for (i = 0; i < STABLESIZE; i++) {
403 		for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
404 			if (sp->s_dip == dip) {
405 				mutex_exit(&stable_lock);
406 				return (DEVI_REFERENCED);
407 			}
408 		}
409 	}
410 	mutex_exit(&stable_lock);
411 	return (DEVI_NOT_REFERENCED);
412 }
413 
414 /*
415  * Given an snode, returns the open count and the dip
416  * associated with that snode
417  * Assumes the caller holds the approriate locks
418  * to prevent snode and/or dip from going away.
419  * Returns:
420  *	-1	No associated dip
421  *	>= 0	Number of opens.
422  */
423 int
424 spec_devi_open_count(struct snode *sp, dev_info_t **dipp)
425 {
426 	dev_info_t *dip;
427 	uint_t count;
428 	struct vnode *vp;
429 
430 	ASSERT(sp);
431 	ASSERT(dipp);
432 
433 	vp = STOV(sp);
434 
435 	*dipp = NULL;
436 
437 	/*
438 	 * We are only interested in common snodes. Only common snodes
439 	 * get their s_count fields bumped up on opens.
440 	 */
441 	if (sp->s_commonvp != vp || (dip = sp->s_dip) == NULL)
442 		return (-1);
443 
444 	mutex_enter(&sp->s_lock);
445 	count = sp->s_count + sp->s_mapcnt;
446 	if (sp->s_flag & SLOCKED)
447 		count++;
448 	mutex_exit(&sp->s_lock);
449 
450 	*dipp = dip;
451 
452 	return (count);
453 }
454 
455 /*
456  * Given a device vnode, return the common
457  * vnode associated with it.
458  */
459 struct vnode *
460 common_specvp(struct vnode *vp)
461 {
462 	struct snode *sp;
463 
464 	if ((vp->v_type != VBLK) && (vp->v_type != VCHR) ||
465 	    !vn_matchops(vp, spec_getvnodeops()))
466 		return (vp);
467 	sp = VTOS(vp);
468 	return (sp->s_commonvp);
469 }
470 
471 /*
472  * Returns a special vnode for the given dev.  The vnode is the
473  * one which is "common" to all the snodes which represent the
474  * same device.
475  * Similar to commonvp() but doesn't acquire the stable_lock, and
476  * may use a pre-allocated snode provided by caller.
477  */
478 static struct vnode *
479 get_cvp(
480 	dev_t		dev,
481 	vtype_t		type,
482 	struct snode	*nsp,		/* pre-allocated snode */
483 	int		*used_nsp)	/* flag indicating if we use nsp */
484 {
485 	struct snode *sp;
486 	struct vnode *svp;
487 
488 	ASSERT(MUTEX_HELD(&stable_lock));
489 	if ((sp = sfind(dev, type, NULL)) == NULL) {
490 		sp = nsp;		/* Use pre-allocated snode */
491 		*used_nsp = 1;		/* return value */
492 		svp = STOV(sp);
493 
494 		sp->s_realvp	= NULL;
495 		sp->s_commonvp	= svp;		/* points to itself */
496 		sp->s_dev	= dev;
497 		sp->s_dip	= NULL;
498 		sp->s_nextr	= NULL;
499 		sp->s_list	= NULL;
500 		sp->s_plcy	= NULL;
501 		sp->s_size	= UNKNOWN_SIZE;
502 		sp->s_flag	= 0;
503 		sp->s_fsid	= specdev;
504 		sp->s_atime	= 0;
505 		sp->s_mtime	= 0;
506 		sp->s_ctime	= 0;
507 		sp->s_count	= 0;
508 		sp->s_mapcnt	= 0;
509 
510 		vn_reinit(svp);
511 		svp->v_vfsp	= &spec_vfs;
512 		svp->v_type	= type;
513 		svp->v_rdev	= dev;
514 		vn_exists(svp);
515 		sinsert(sp);
516 	} else
517 		*used_nsp = 0;
518 	return (STOV(sp));
519 }
520 
521 /*
522  * Returns a special vnode for the given dev.  The vnode is the
523  * one which is "common" to all the snodes which represent the
524  * same device.  For use ONLY by SPECFS.
525  */
526 struct vnode *
527 commonvp(dev_t dev, vtype_t type)
528 {
529 	struct snode *sp, *nsp;
530 	struct vnode *svp;
531 
532 	/* Pre-allocate snode in case we might block */
533 	nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
534 
535 	mutex_enter(&stable_lock);
536 	if ((sp = sfind(dev, type, NULL)) == NULL) {
537 		sp = nsp;		/* Use pre-alloced snode */
538 		svp = STOV(sp);
539 
540 		sp->s_realvp	= NULL;
541 		sp->s_commonvp	= svp;		/* points to itself */
542 		sp->s_dev	= dev;
543 		sp->s_dip	= NULL;
544 		sp->s_nextr	= NULL;
545 		sp->s_list	= NULL;
546 		sp->s_plcy	= NULL;
547 		sp->s_size	= UNKNOWN_SIZE;
548 		sp->s_flag	= 0;
549 		sp->s_fsid	= specdev;
550 		sp->s_atime	= 0;
551 		sp->s_mtime	= 0;
552 		sp->s_ctime	= 0;
553 		sp->s_count	= 0;
554 		sp->s_mapcnt	= 0;
555 
556 		vn_reinit(svp);
557 		svp->v_vfsp	= &spec_vfs;
558 		svp->v_type	= type;
559 		svp->v_rdev	= dev;
560 		vn_exists(svp);
561 		sinsert(sp);
562 		mutex_exit(&stable_lock);
563 	} else {
564 		mutex_exit(&stable_lock);
565 		/* Didn't need the pre-allocated snode */
566 		kmem_cache_free(snode_cache, nsp);
567 	}
568 	return (STOV(sp));
569 }
570 
571 /*
572  * Snode lookup stuff.
573  * These routines maintain a table of snodes hashed by dev so
574  * that the snode for an dev can be found if it already exists.
575  */
576 struct snode *stable[STABLESIZE];
577 int		stablesz = STABLESIZE;
578 kmutex_t	stable_lock;
579 
580 /*
581  * Put a snode in the table.
582  */
583 static void
584 sinsert(struct snode *sp)
585 {
586 	ASSERT(MUTEX_HELD(&stable_lock));
587 	sp->s_next = stable[STABLEHASH(sp->s_dev)];
588 	stable[STABLEHASH(sp->s_dev)] = sp;
589 }
590 
591 /*
592  * Remove an snode from the hash table.
593  * The realvp is not released here because spec_inactive() still
594  * needs it to do a spec_fsync().
595  */
596 void
597 sdelete(struct snode *sp)
598 {
599 	struct snode *st;
600 	struct snode *stprev = NULL;
601 
602 	ASSERT(MUTEX_HELD(&stable_lock));
603 	st = stable[STABLEHASH(sp->s_dev)];
604 	while (st != NULL) {
605 		if (st == sp) {
606 			if (stprev == NULL)
607 				stable[STABLEHASH(sp->s_dev)] = st->s_next;
608 			else
609 				stprev->s_next = st->s_next;
610 			break;
611 		}
612 		stprev = st;
613 		st = st->s_next;
614 	}
615 }
616 
617 /*
618  * Lookup an snode by <dev, type, vp>.
619  * ONLY looks for snodes with non-NULL s_realvp members and
620  * common snodes (with s_commonvp pointing to its vnode).
621  *
622  * If vp is NULL, only return commonvp. Otherwise return
623  * shadow vp with both shadow and common vp's VN_HELD.
624  */
625 static struct snode *
626 sfind(
627 	dev_t	dev,
628 	vtype_t	type,
629 	struct vnode *vp)
630 {
631 	struct snode *st;
632 	struct vnode *svp;
633 
634 	ASSERT(MUTEX_HELD(&stable_lock));
635 	st = stable[STABLEHASH(dev)];
636 	while (st != NULL) {
637 		svp = STOV(st);
638 		if (st->s_dev == dev && svp->v_type == type &&
639 		    VN_CMP(st->s_realvp, vp) &&
640 		    (vp != NULL || st->s_commonvp == svp)) {
641 			VN_HOLD(svp);
642 			return (st);
643 		}
644 		st = st->s_next;
645 	}
646 	return (NULL);
647 }
648 
649 /*
650  * Mark the accessed, updated, or changed times in an snode
651  * with the current time.
652  */
653 void
654 smark(struct snode *sp, int flag)
655 {
656 	time_t	now = gethrestime_sec();
657 
658 	/* check for change to avoid unnecessary locking */
659 	ASSERT((flag & ~(SACC|SUPD|SCHG)) == 0);
660 	if (((flag & sp->s_flag) != flag) ||
661 	    ((flag & SACC) && (sp->s_atime != now)) ||
662 	    ((flag & SUPD) && (sp->s_mtime != now)) ||
663 	    ((flag & SCHG) && (sp->s_ctime != now))) {
664 		/* lock and update */
665 		mutex_enter(&sp->s_lock);
666 		sp->s_flag |= flag;
667 		if (flag & SACC)
668 			sp->s_atime = now;
669 		if (flag & SUPD)
670 			sp->s_mtime = now;
671 		if (flag & SCHG)
672 			sp->s_ctime = now;
673 		mutex_exit(&sp->s_lock);
674 	}
675 }
676 
677 /*
678  * Return the maximum file offset permitted for this device.
679  * -1 means unrestricted.  SLOFFSET is associated with D_64BIT.
680  *
681  * On a 32-bit kernel this will limit:
682  *   o	D_64BIT devices to SPEC_MAXOFFSET_T.
683  *   o	non-D_64BIT character drivers to a 32-bit offset (MAXOFF_T).
684  */
685 offset_t
686 spec_maxoffset(struct vnode *vp)
687 {
688 	struct snode *sp = VTOS(vp);
689 	struct snode *csp = VTOS(sp->s_commonvp);
690 
691 	if (STREAMSTAB(getmajor(sp->s_dev)))
692 		return ((offset_t)-1);
693 	else if (csp->s_flag & SANYOFFSET)	/* D_U64BIT */
694 		return ((offset_t)-1);
695 #ifdef _ILP32
696 	if (csp->s_flag & SLOFFSET)		/* D_64BIT */
697 		return (SPEC_MAXOFFSET_T);
698 #endif	/* _ILP32 */
699 	return (MAXOFF_T);
700 }
701 
702 /*ARGSUSED*/
703 static int
704 snode_constructor(void *buf, void *cdrarg, int kmflags)
705 {
706 	struct snode *sp = buf;
707 	struct vnode *vp;
708 
709 	vp = vn_alloc(KM_SLEEP);
710 
711 	sp->s_vnode = vp;
712 
713 	vn_setops(vp, spec_getvnodeops());
714 	vp->v_data = (caddr_t)sp;
715 
716 	mutex_init(&sp->s_lock, NULL, MUTEX_DEFAULT, NULL);
717 	cv_init(&sp->s_cv, NULL, CV_DEFAULT, NULL);
718 	return (0);
719 }
720 
721 /*ARGSUSED1*/
722 static void
723 snode_destructor(void *buf, void *cdrarg)
724 {
725 	struct snode *sp = buf;
726 	struct vnode *vp = STOV(sp);
727 
728 	mutex_destroy(&sp->s_lock);
729 	cv_destroy(&sp->s_cv);
730 
731 	vn_free(vp);
732 }
733 
734 
735 int
736 specinit(int fstype, char *name)
737 {
738 	static const fs_operation_def_t spec_vfsops_template[] = {
739 		VFSNAME_SYNC, (fs_generic_func_p) spec_sync,
740 		NULL, NULL
741 	};
742 	extern struct vnodeops *spec_vnodeops;
743 	extern const fs_operation_def_t spec_vnodeops_template[];
744 	struct vfsops *spec_vfsops;
745 	int error;
746 	dev_t dev;
747 
748 	/*
749 	 * Associate vfs and vnode operations.
750 	 */
751 	error = vfs_setfsops(fstype, spec_vfsops_template, &spec_vfsops);
752 	if (error != 0) {
753 		cmn_err(CE_WARN, "specinit: bad vfs ops template");
754 		return (error);
755 	}
756 
757 	error = vn_make_ops(name, spec_vnodeops_template, &spec_vnodeops);
758 	if (error != 0) {
759 		(void) vfs_freevfsops_by_type(fstype);
760 		cmn_err(CE_WARN, "specinit: bad vnode ops template");
761 		return (error);
762 	}
763 
764 	mutex_init(&stable_lock, NULL, MUTEX_DEFAULT, NULL);
765 	mutex_init(&spec_syncbusy, NULL, MUTEX_DEFAULT, NULL);
766 
767 	/*
768 	 * Create snode cache
769 	 */
770 	snode_cache = kmem_cache_create("snode_cache", sizeof (struct snode),
771 		0, snode_constructor, snode_destructor, NULL, NULL, NULL, 0);
772 
773 	/*
774 	 * Associate vfs operations with spec_vfs
775 	 */
776 	VFS_INIT(&spec_vfs, spec_vfsops, (caddr_t)NULL);
777 	if ((dev = getudev()) == -1)
778 		dev = 0;
779 	specdev = makedevice(dev, 0);
780 	return (0);
781 }
782 
783 int
784 device_close(struct vnode *vp, int flag, struct cred *cr)
785 {
786 	struct snode *sp = VTOS(vp);
787 	enum vtype type = vp->v_type;
788 	struct vnode *cvp;
789 	dev_t dev;
790 	int error;
791 
792 	dev = sp->s_dev;
793 	cvp = sp->s_commonvp;
794 
795 	switch (type) {
796 
797 	case VCHR:
798 		if (STREAMSTAB(getmajor(dev))) {
799 			if (cvp->v_stream != NULL)
800 				error = strclose(cvp, flag, cr);
801 			vp->v_stream = NULL;
802 		} else
803 			error = dev_close(dev, flag, OTYP_CHR, cr);
804 		break;
805 
806 	case VBLK:
807 		/*
808 		 * On last close a block device we must
809 		 * invalidate any in-core blocks so that we
810 		 * can, for example, change floppy disks.
811 		 */
812 		(void) spec_putpage(cvp, (offset_t)0,
813 		    (size_t)0, B_INVAL|B_FORCE, cr);
814 		bflush(dev);
815 		binval(dev);
816 		error = dev_close(dev, flag, OTYP_BLK, cr);
817 		break;
818 	default:
819 		panic("device_close: not a device");
820 		/*NOTREACHED*/
821 	}
822 
823 	return (error);
824 }
825 
826 struct vnode *
827 makectty(vnode_t *ovp)
828 {
829 	vnode_t *vp;
830 
831 	if (vp = makespecvp(ovp->v_rdev, VCHR)) {
832 		struct snode *sp;
833 		struct snode *csp;
834 		struct vnode *cvp;
835 
836 		sp = VTOS(vp);
837 		cvp = sp->s_commonvp;
838 		csp = VTOS(cvp);
839 		mutex_enter(&csp->s_lock);
840 		csp->s_count++;
841 		mutex_exit(&csp->s_lock);
842 	}
843 
844 	return (vp);
845 }
846 
847 void
848 spec_snode_walk(int (*callback)(struct snode *sp, void *arg), void *arg)
849 {
850 	struct snode	*sp;
851 	int		i;
852 
853 	ASSERT(callback);
854 
855 	mutex_enter(&stable_lock);
856 	for (i = 0; i < STABLESIZE; i++) {
857 		for (sp = stable[i]; sp; sp = sp->s_next) {
858 			if (callback(sp, arg) != DDI_WALK_CONTINUE)
859 				goto out;
860 		}
861 	}
862 out:
863 	mutex_exit(&stable_lock);
864 }
865 
866 int
867 spec_is_clone(vnode_t *vp)
868 {
869 	struct snode *sp;
870 
871 	if (vn_matchops(vp, spec_getvnodeops())) {
872 		sp = VTOS(vp);
873 		return ((sp->s_flag & SCLONE) ? 1 : 0);
874 	}
875 
876 	return (0);
877 }
878 
879 int
880 spec_is_selfclone(vnode_t *vp)
881 {
882 	struct snode *sp;
883 
884 	if (vn_matchops(vp, spec_getvnodeops())) {
885 		sp = VTOS(vp);
886 		return ((sp->s_flag & SSELFCLONE) ? 1 : 0);
887 	}
888 
889 	return (0);
890 }
891