xref: /titanic_51/usr/src/uts/common/fs/gfs.c (revision 2a9459bdd821c1cf59590a7a9069ac9c591e8a6b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /* Portions Copyright 2007 Shivakumar GN */
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/cmn_err.h>
31 #include <sys/debug.h>
32 #include <sys/dirent.h>
33 #include <sys/kmem.h>
34 #include <sys/mman.h>
35 #include <sys/mutex.h>
36 #include <sys/sysmacros.h>
37 #include <sys/systm.h>
38 #include <sys/uio.h>
39 #include <sys/vmsystm.h>
40 #include <sys/vfs.h>
41 #include <sys/vnode.h>
42 
43 #include <vm/as.h>
44 #include <vm/seg_vn.h>
45 
46 #include <sys/gfs.h>
47 
48 /*
49  * Generic pseudo-filesystem routines.
50  *
51  * There are significant similarities between the implementation of certain file
52  * system entry points across different filesystems.  While one could attempt to
53  * "choke up on the bat" and incorporate common functionality into a VOP
54  * preamble or postamble, such an approach is limited in the benefit it can
55  * provide.  In this file we instead define a toolkit of routines which can be
56  * called from a filesystem (with in-kernel pseudo-filesystems being the focus
57  * of the exercise) in a more component-like fashion.
58  *
59  * There are three basic classes of routines:
60  *
61  * 1) Lowlevel support routines
62  *
63  *    These routines are designed to play a support role for existing
64  *    pseudo-filesystems (such as procfs).  They simplify common tasks,
65  *    without forcing the filesystem to hand over management to GFS.  The
66  *    routines covered are:
67  *
68  *	gfs_readdir_init()
69  *	gfs_readdir_emit()
70  *	gfs_readdir_emitn()
71  *	gfs_readdir_pred()
72  *	gfs_readdir_fini()
73  *	gfs_lookup_dot()
74  *
75  * 2) Complete GFS management
76  *
77  *    These routines take a more active role in management of the
78  *    pseudo-filesystem.  They handle the relationship between vnode private
79  *    data and VFS data, as well as the relationship between vnodes in the
80  *    directory hierarchy.
81  *
82  *    In order to use these interfaces, the first member of every private
83  *    v_data must be a gfs_file_t or a gfs_dir_t.  This hands over all control
84  *    to GFS.
85  *
86  * 	gfs_file_create()
87  * 	gfs_dir_create()
88  * 	gfs_root_create()
89  *
90  *	gfs_file_inactive()
91  *	gfs_dir_inactive()
92  *	gfs_dir_lookup()
93  *	gfs_dir_readdir()
94  *
95  * 	gfs_vop_inactive()
96  * 	gfs_vop_lookup()
97  * 	gfs_vop_readdir()
98  * 	gfs_vop_map()
99  *
100  * 3) Single File pseudo-filesystems
101  *
102  *    This routine creates a rooted file to be overlayed ontop of another
103  *    file in the physical filespace.
104  *
105  *    Note that the parent is NULL (actually the vfs), but there is nothing
106  *    technically keeping such a file from utilizing the "Complete GFS
107  *    management" set of routines.
108  *
109  * 	gfs_root_create_file()
110  */
111 
112 /*
113  * gfs_make_opsvec: take an array of vnode type definitions and create
114  * their vnodeops_t structures
115  *
116  * This routine takes an array of gfs_opsvec_t's.  It could
117  * alternatively take an array of gfs_opsvec_t*'s, which would allow
118  * vnode types to be completely defined in files external to the caller
119  * of gfs_make_opsvec().  As it stands, much more sharing takes place --
120  * both the caller and the vnode type provider need to access gfsv_ops
121  * and gfsv_template, and the caller also needs to know gfsv_name.
122  */
123 int
124 gfs_make_opsvec(gfs_opsvec_t *vec)
125 {
126 	int error, i;
127 
128 	for (i = 0; ; i++) {
129 		if (vec[i].gfsv_name == NULL)
130 			return (0);
131 		error = vn_make_ops(vec[i].gfsv_name, vec[i].gfsv_template,
132 		    vec[i].gfsv_ops);
133 		if (error)
134 			break;
135 	}
136 
137 	cmn_err(CE_WARN, "gfs_make_opsvec: bad vnode ops template for '%s'",
138 	    vec[i].gfsv_name);
139 	for (i--; i >= 0; i--) {
140 		vn_freevnodeops(*vec[i].gfsv_ops);
141 		*vec[i].gfsv_ops = NULL;
142 	}
143 	return (error);
144 }
145 
146 /*
147  * Low level directory routines
148  *
149  * These routines provide some simple abstractions for reading directories.
150  * They are designed to be used by existing pseudo filesystems (namely procfs)
151  * that already have a complicated management infrastructure.
152  */
153 
154 /*
155  * gfs_readdir_init: initiate a generic readdir
156  *   st		- a pointer to an uninitialized gfs_readdir_state_t structure
157  *   name_max	- the directory's maximum file name length
158  *   ureclen	- the exported file-space record length (1 for non-legacy FSs)
159  *   uiop	- the uiop passed to readdir
160  *   parent	- the parent directory's inode
161  *   self	- this directory's inode
162  *
163  * Returns 0 or a non-zero errno.
164  *
165  * Typical VOP_READDIR usage of gfs_readdir_*:
166  *
167  *	if ((error = gfs_readdir_init(...)) != 0)
168  *		return (error);
169  *	eof = 0;
170  *	while ((error = gfs_readdir_pred(..., &voffset)) != 0) {
171  *		if (!consumer_entry_at(voffset))
172  *			voffset = consumer_next_entry(voffset);
173  *		if (consumer_eof(voffset)) {
174  *			eof = 1
175  *			break;
176  *		}
177  *		if ((error = gfs_readdir_emit(..., voffset,
178  *		    consumer_ino(voffset), consumer_name(voffset))) != 0)
179  *			break;
180  *	}
181  *	return (gfs_readdir_fini(..., error, eofp, eof));
182  *
183  * As you can see, a zero result from gfs_readdir_pred() or
184  * gfs_readdir_emit() indicates that processing should continue,
185  * whereas a non-zero result indicates that the loop should terminate.
186  * Most consumers need do nothing more than let gfs_readdir_fini()
187  * determine what the cause of failure was and return the appropriate
188  * value.
189  */
190 int
191 gfs_readdir_init(gfs_readdir_state_t *st, int name_max, int ureclen,
192     uio_t *uiop, ino64_t parent, ino64_t self)
193 {
194 	if (uiop->uio_loffset < 0 || uiop->uio_resid <= 0 ||
195 	    (uiop->uio_loffset % ureclen) != 0)
196 		return (EINVAL);
197 
198 	st->grd_ureclen = ureclen;
199 	st->grd_oresid = uiop->uio_resid;
200 	st->grd_namlen = name_max;
201 	st->grd_dirent = kmem_zalloc(DIRENT64_RECLEN(st->grd_namlen), KM_SLEEP);
202 	st->grd_parent = parent;
203 	st->grd_self = self;
204 
205 	return (0);
206 }
207 
208 /*
209  * gfs_readdir_emit_int: internal routine to emit directory entry
210  *
211  *   st		- the current readdir state, which must have d_ino and d_name
212  *                set
213  *   uiop	- caller-supplied uio pointer
214  *   next	- the offset of the next entry
215  */
216 static int
217 gfs_readdir_emit_int(gfs_readdir_state_t *st, uio_t *uiop, offset_t next)
218 {
219 	int reclen;
220 
221 	reclen = DIRENT64_RECLEN(strlen(st->grd_dirent->d_name));
222 
223 	if (reclen > uiop->uio_resid) {
224 		/*
225 		 * Error if no entries were returned yet
226 		 */
227 		if (uiop->uio_resid == st->grd_oresid)
228 			return (EINVAL);
229 		return (-1);
230 	}
231 
232 	st->grd_dirent->d_off = next;
233 	st->grd_dirent->d_reclen = (ushort_t)reclen;
234 
235 	if (uiomove((caddr_t)st->grd_dirent, reclen, UIO_READ, uiop))
236 		return (EFAULT);
237 
238 	uiop->uio_loffset = next;
239 
240 	return (0);
241 }
242 
243 /*
244  * gfs_readdir_emit: emit a directory entry
245  *   voff       - the virtual offset (obtained from gfs_readdir_pred)
246  *   ino        - the entry's inode
247  *   name       - the entry's name
248  *
249  * Returns a 0 on success, a non-zero errno on failure, or -1 if the
250  * readdir loop should terminate.  A non-zero result (either errno or
251  * -1) from this function is typically passed directly to
252  * gfs_readdir_fini().
253  */
254 int
255 gfs_readdir_emit(gfs_readdir_state_t *st, uio_t *uiop, offset_t voff,
256     ino64_t ino, const char *name)
257 {
258 	offset_t off = (voff + 2) * st->grd_ureclen;
259 
260 	st->grd_dirent->d_ino = ino;
261 	(void) strncpy(st->grd_dirent->d_name, name, st->grd_namlen);
262 
263 	/*
264 	 * Inter-entry offsets are invalid, so we assume a record size of
265 	 * grd_ureclen and explicitly set the offset appropriately.
266 	 */
267 	return (gfs_readdir_emit_int(st, uiop, off + st->grd_ureclen));
268 }
269 
270 /*
271  * gfs_readdir_emitn: like gfs_readdir_emit(), but takes an integer
272  * instead of a string for the entry's name.
273  */
274 int
275 gfs_readdir_emitn(gfs_readdir_state_t *st, uio_t *uiop, offset_t voff,
276     ino64_t ino, unsigned long num)
277 {
278 	char buf[40];
279 
280 	numtos(num, buf);
281 	return (gfs_readdir_emit(st, uiop, voff, ino, buf));
282 }
283 
284 /*
285  * gfs_readdir_pred: readdir loop predicate
286  *   voffp - a pointer in which the next virtual offset should be stored
287  *
288  * Returns a 0 on success, a non-zero errno on failure, or -1 if the
289  * readdir loop should terminate.  A non-zero result (either errno or
290  * -1) from this function is typically passed directly to
291  * gfs_readdir_fini().
292  */
293 int
294 gfs_readdir_pred(gfs_readdir_state_t *st, uio_t *uiop, offset_t *voffp)
295 {
296 	offset_t off, voff;
297 	int error;
298 
299 top:
300 	if (uiop->uio_resid <= 0)
301 		return (-1);
302 
303 	off = uiop->uio_loffset / st->grd_ureclen;
304 	voff = off - 2;
305 	if (off == 0) {
306 		if ((error = gfs_readdir_emit(st, uiop, voff, st->grd_self,
307 		    ".")) == 0)
308 			goto top;
309 	} else if (off == 1) {
310 		if ((error = gfs_readdir_emit(st, uiop, voff, st->grd_parent,
311 		    "..")) == 0)
312 			goto top;
313 	} else {
314 		*voffp = voff;
315 		return (0);
316 	}
317 
318 	return (error);
319 }
320 
321 /*
322  * gfs_readdir_fini: generic readdir cleanup
323  *   error	- if positive, an error to return
324  *   eofp	- the eofp passed to readdir
325  *   eof	- the eof value
326  *
327  * Returns a 0 on success, a non-zero errno on failure.  This result
328  * should be returned from readdir.
329  */
330 int
331 gfs_readdir_fini(gfs_readdir_state_t *st, int error, int *eofp, int eof)
332 {
333 	kmem_free(st->grd_dirent, DIRENT64_RECLEN(st->grd_namlen));
334 	if (error > 0)
335 		return (error);
336 	if (eofp)
337 		*eofp = eof;
338 	return (0);
339 }
340 
341 /*
342  * gfs_lookup_dot
343  *
344  * Performs a basic check for "." and ".." directory entries.
345  */
346 int
347 gfs_lookup_dot(vnode_t **vpp, vnode_t *dvp, vnode_t *pvp, const char *nm)
348 {
349 	if (*nm == '\0' || strcmp(nm, ".") == 0) {
350 		VN_HOLD(dvp);
351 		*vpp = dvp;
352 		return (0);
353 	} else if (strcmp(nm, "..") == 0) {
354 		if (pvp == NULL) {
355 			ASSERT(dvp->v_flag & VROOT);
356 			VN_HOLD(dvp);
357 			*vpp = dvp;
358 		} else {
359 			VN_HOLD(pvp);
360 			*vpp = pvp;
361 		}
362 		return (0);
363 	}
364 
365 	return (-1);
366 }
367 
368 /*
369  * gfs_file_create(): create a new GFS file
370  *
371  *   size	- size of private data structure (v_data)
372  *   pvp	- parent vnode (GFS directory)
373  *   ops	- vnode operations vector
374  *
375  * In order to use this interface, the parent vnode must have been created by
376  * gfs_dir_create(), and the private data stored in v_data must have a
377  * 'gfs_file_t' as its first field.
378  *
379  * Given these constraints, this routine will automatically:
380  *
381  * 	- Allocate v_data for the vnode
382  * 	- Initialize necessary fields in the vnode
383  * 	- Hold the parent
384  */
385 vnode_t *
386 gfs_file_create(size_t size, vnode_t *pvp, vnodeops_t *ops)
387 {
388 	gfs_file_t *fp;
389 	vnode_t *vp;
390 
391 	/*
392 	 * Allocate vnode and internal data structure
393 	 */
394 	fp = kmem_zalloc(size, KM_SLEEP);
395 	vp = vn_alloc(KM_SLEEP);
396 
397 	/*
398 	 * Set up various pointers
399 	 */
400 	fp->gfs_vnode = vp;
401 	fp->gfs_parent = pvp;
402 	vp->v_data = fp;
403 	fp->gfs_size = size;
404 	fp->gfs_type = GFS_FILE;
405 
406 	/*
407 	 * Initialize vnode and hold parent.
408 	 */
409 	vn_setops(vp, ops);
410 	if (pvp) {
411 		VN_SET_VFS_TYPE_DEV(vp, pvp->v_vfsp, VREG, 0);
412 		VN_HOLD(pvp);
413 	}
414 
415 	return (vp);
416 }
417 
418 /*
419  * gfs_dir_create: creates a new directory in the parent
420  *
421  *   size	- size of private data structure (v_data)
422  *   pvp	- parent vnode (GFS directory)
423  *   ops	- vnode operations vector
424  *   entries	- NULL-terminated list of static entries (if any)
425  *   maxlen	- maximum length of a directory entry
426  *   readdir_cb	- readdir callback (see gfs_dir_readdir)
427  *   inode_cb	- inode callback (see gfs_dir_readdir)
428  *   lookup_cb	- lookup callback (see gfs_dir_lookup)
429  *
430  * In order to use this function, the first member of the private vnode
431  * structure (v_data) must be a gfs_dir_t.  For each directory, there are
432  * static entries, defined when the structure is initialized, and dynamic
433  * entries, retrieved through callbacks.
434  *
435  * If a directory has static entries, then it must supply a inode callback,
436  * which will compute the inode number based on the parent and the index.
437  * For a directory with dynamic entries, the caller must supply a readdir
438  * callback and a lookup callback.  If a static lookup fails, we fall back to
439  * the supplied lookup callback, if any.
440  *
441  * This function also performs the same initialization as gfs_file_create().
442  */
443 vnode_t *
444 gfs_dir_create(size_t struct_size, vnode_t *pvp, vnodeops_t *ops,
445     gfs_dirent_t *entries, gfs_inode_cb inode_cb, int maxlen,
446     gfs_readdir_cb readdir_cb, gfs_lookup_cb lookup_cb)
447 {
448 	vnode_t *vp;
449 	gfs_dir_t *dp;
450 	gfs_dirent_t *de;
451 
452 	vp = gfs_file_create(struct_size, pvp, ops);
453 	vp->v_type = VDIR;
454 
455 	dp = vp->v_data;
456 	dp->gfsd_file.gfs_type = GFS_DIR;
457 	dp->gfsd_maxlen = maxlen;
458 
459 	if (entries != NULL) {
460 		for (de = entries; de->gfse_name != NULL; de++)
461 			dp->gfsd_nstatic++;
462 
463 		dp->gfsd_static = kmem_alloc(
464 		    dp->gfsd_nstatic * sizeof (gfs_dirent_t), KM_SLEEP);
465 		bcopy(entries, dp->gfsd_static,
466 		    dp->gfsd_nstatic * sizeof (gfs_dirent_t));
467 	}
468 
469 	dp->gfsd_readdir = readdir_cb;
470 	dp->gfsd_lookup = lookup_cb;
471 	dp->gfsd_inode = inode_cb;
472 
473 	mutex_init(&dp->gfsd_lock, NULL, MUTEX_DEFAULT, NULL);
474 
475 	return (vp);
476 }
477 
478 /*
479  * gfs_root_create(): create a root vnode for a GFS filesystem
480  *
481  * Similar to gfs_dir_create(), this creates a root vnode for a filesystem.  The
482  * only difference is that it takes a vfs_t instead of a vnode_t as its parent.
483  */
484 vnode_t *
485 gfs_root_create(size_t size, vfs_t *vfsp, vnodeops_t *ops, ino64_t ino,
486     gfs_dirent_t *entries, gfs_inode_cb inode_cb, int maxlen,
487     gfs_readdir_cb readdir_cb, gfs_lookup_cb lookup_cb)
488 {
489 	vnode_t *vp = gfs_dir_create(size, NULL, ops, entries, inode_cb,
490 	    maxlen, readdir_cb, lookup_cb);
491 
492 	/* Manually set the inode */
493 	((gfs_file_t *)vp->v_data)->gfs_ino = ino;
494 
495 	VFS_HOLD(vfsp);
496 	VN_SET_VFS_TYPE_DEV(vp, vfsp, VDIR, 0);
497 	vp->v_flag |= VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT;
498 
499 	return (vp);
500 }
501 
502 /*
503  * gfs_root_create_file(): create a root vnode for a GFS file as a filesystem
504  *
505  * Similar to gfs_root_create(), this creates a root vnode for a file to
506  * be the pseudo-filesystem.
507  */
508 vnode_t *
509 gfs_root_create_file(size_t size, vfs_t *vfsp, vnodeops_t *ops, ino64_t ino)
510 {
511 	vnode_t	*vp = gfs_file_create(size, NULL, ops);
512 
513 	((gfs_file_t *)vp->v_data)->gfs_ino = ino;
514 
515 	VFS_HOLD(vfsp);
516 	VN_SET_VFS_TYPE_DEV(vp, vfsp, VREG, 0);
517 	vp->v_flag |= VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT;
518 
519 	return (vp);
520 }
521 
522 /*
523  * gfs_file_inactive()
524  *
525  * Called from the VOP_INACTIVE() routine.  If necessary, this routine will
526  * remove the given vnode from the parent directory and clean up any references
527  * in the VFS layer.
528  *
529  * If the vnode was not removed (due to a race with vget), then NULL is
530  * returned.  Otherwise, a pointer to the private data is returned.
531  */
532 void *
533 gfs_file_inactive(vnode_t *vp)
534 {
535 	int i;
536 	gfs_dirent_t *ge = NULL;
537 	gfs_file_t *fp = vp->v_data;
538 	gfs_dir_t *dp = NULL;
539 	void *data;
540 
541 	if (fp->gfs_parent == NULL || (vp->v_flag & V_XATTRDIR))
542 		goto found;
543 
544 	dp = fp->gfs_parent->v_data;
545 
546 	/*
547 	 * First, see if this vnode is cached in the parent.
548 	 */
549 	gfs_dir_lock(dp);
550 
551 	/*
552 	 * Find it in the set of static entries.
553 	 */
554 	for (i = 0; i < dp->gfsd_nstatic; i++)  {
555 		ge = &dp->gfsd_static[i];
556 
557 		if (ge->gfse_vnode == vp)
558 			goto found;
559 	}
560 
561 	/*
562 	 * If 'ge' is NULL, then it is a dynamic entry.
563 	 */
564 	ge = NULL;
565 
566 found:
567 	if (vp->v_flag & V_XATTRDIR) {
568 		mutex_enter(&fp->gfs_parent->v_lock);
569 	}
570 	mutex_enter(&vp->v_lock);
571 	if (vp->v_count == 1) {
572 		/*
573 		 * Really remove this vnode
574 		 */
575 		data = vp->v_data;
576 		if (ge != NULL) {
577 			/*
578 			 * If this was a statically cached entry, simply set the
579 			 * cached vnode to NULL.
580 			 */
581 			ge->gfse_vnode = NULL;
582 		}
583 		if (vp->v_flag & V_XATTRDIR) {
584 			fp->gfs_parent->v_xattrdir = NULL;
585 			mutex_exit(&fp->gfs_parent->v_lock);
586 		}
587 		mutex_exit(&vp->v_lock);
588 
589 		/*
590 		 * Free vnode and release parent
591 		 */
592 		if (fp->gfs_parent) {
593 			if (dp) {
594 				gfs_dir_unlock(dp);
595 			}
596 			VN_RELE(fp->gfs_parent);
597 		} else {
598 			ASSERT(vp->v_vfsp != NULL);
599 			VFS_RELE(vp->v_vfsp);
600 		}
601 		vn_free(vp);
602 	} else {
603 		vp->v_count--;
604 		data = NULL;
605 		mutex_exit(&vp->v_lock);
606 		if (vp->v_flag & V_XATTRDIR) {
607 			mutex_exit(&fp->gfs_parent->v_lock);
608 		}
609 		if (dp)
610 			gfs_dir_unlock(dp);
611 	}
612 
613 	return (data);
614 }
615 
616 /*
617  * gfs_dir_inactive()
618  *
619  * Same as above, but for directories.
620  */
621 void *
622 gfs_dir_inactive(vnode_t *vp)
623 {
624 	gfs_dir_t *dp;
625 
626 	ASSERT(vp->v_type == VDIR);
627 
628 	if ((dp = gfs_file_inactive(vp)) != NULL) {
629 		mutex_destroy(&dp->gfsd_lock);
630 		if (dp->gfsd_nstatic)
631 			kmem_free(dp->gfsd_static,
632 			    dp->gfsd_nstatic * sizeof (gfs_dirent_t));
633 	}
634 
635 	return (dp);
636 }
637 
638 /*
639  * gfs_dir_lookup()
640  *
641  * Looks up the given name in the directory and returns the corresponding vnode,
642  * if found.
643  *
644  * First, we search statically defined entries, if any.  If a match is found,
645  * and GFS_CACHE_VNODE is set and the vnode exists, we simply return the
646  * existing vnode.  Otherwise, we call the static entry's callback routine,
647  * caching the result if necessary.
648  *
649  * If no static entry is found, we invoke the lookup callback, if any.  The
650  * arguments to this callback are:
651  *
652  * int gfs_lookup_cb(vnode_t *pvp, const char *nm, vnode_t **vpp, cred_t *cr);
653  *
654  *	pvp	- parent vnode
655  *	nm	- name of entry
656  *	vpp	- pointer to resulting vnode
657  *	cr	- pointer to cred
658  *
659  * 	Returns 0 on success, non-zero on error.
660  */
661 int
662 gfs_dir_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp, cred_t *cr)
663 {
664 	int i;
665 	gfs_dirent_t *ge;
666 	vnode_t *vp;
667 	gfs_dir_t *dp = dvp->v_data;
668 	int ret = 0;
669 
670 	ASSERT(dvp->v_type == VDIR);
671 
672 	if (gfs_lookup_dot(vpp, dvp, dp->gfsd_file.gfs_parent, nm) == 0)
673 		return (0);
674 
675 	gfs_dir_lock(dp);
676 
677 	/*
678 	 * Search static entries.
679 	 */
680 	for (i = 0; i < dp->gfsd_nstatic; i++) {
681 		ge = &dp->gfsd_static[i];
682 
683 		if (strcmp(ge->gfse_name, nm) == 0) {
684 			if (ge->gfse_vnode) {
685 				ASSERT(ge->gfse_flags & GFS_CACHE_VNODE);
686 				vp = ge->gfse_vnode;
687 				VN_HOLD(vp);
688 				goto out;
689 			}
690 
691 			/*
692 			 * We drop the directory lock, as the constructor will
693 			 * need to do KM_SLEEP allocations.  If we return from
694 			 * the constructor only to find that a parallel
695 			 * operation has completed, and GFS_CACHE_VNODE is set
696 			 * for this entry, we discard the result in favor of the
697 			 * cached vnode.
698 			 */
699 			gfs_dir_unlock(dp);
700 			vp = ge->gfse_ctor(dvp);
701 			gfs_dir_lock(dp);
702 
703 			((gfs_file_t *)vp->v_data)->gfs_index = i;
704 
705 			/* Set the inode according to the callback. */
706 			((gfs_file_t *)vp->v_data)->gfs_ino =
707 			    dp->gfsd_inode(dvp, i);
708 
709 			if (ge->gfse_flags & GFS_CACHE_VNODE) {
710 				if (ge->gfse_vnode == NULL) {
711 					ge->gfse_vnode = vp;
712 				} else {
713 					/*
714 					 * A parallel constructor beat us to it;
715 					 * return existing vnode.  We have to be
716 					 * careful because we can't release the
717 					 * current vnode while holding the
718 					 * directory lock; its inactive routine
719 					 * will try to lock this directory.
720 					 */
721 					vnode_t *oldvp = vp;
722 					vp = ge->gfse_vnode;
723 					VN_HOLD(vp);
724 
725 					gfs_dir_unlock(dp);
726 					VN_RELE(oldvp);
727 					gfs_dir_lock(dp);
728 				}
729 			}
730 
731 			goto out;
732 		}
733 	}
734 
735 	/*
736 	 * See if there is a dynamic constructor.
737 	 */
738 	if (dp->gfsd_lookup) {
739 		ino64_t ino;
740 		gfs_file_t *fp;
741 
742 		/*
743 		 * Once again, drop the directory lock, as the lookup routine
744 		 * will need to allocate memory, or otherwise deadlock on this
745 		 * directory.
746 		 */
747 		gfs_dir_unlock(dp);
748 		ret = dp->gfsd_lookup(dvp, nm, &vp, &ino, cr);
749 		gfs_dir_lock(dp);
750 		if (ret != 0)
751 			goto out;
752 
753 		/*
754 		 * The lookup_cb might be returning a non-GFS vnode.
755 		 * Currently this is true for extended attributes,
756 		 * where we're returning a vnode with v_data from an
757 		 * underlying fs.
758 		 */
759 		if ((dvp->v_flag & V_XATTRDIR) == 0) {
760 			fp = (gfs_file_t *)vp->v_data;
761 			fp->gfs_index = -1;
762 			fp->gfs_ino = ino;
763 		}
764 	} else {
765 		/*
766 		 * No static entry found, and there is no lookup callback, so
767 		 * return ENOENT.
768 		 */
769 		ret = ENOENT;
770 	}
771 
772 out:
773 	gfs_dir_unlock(dp);
774 
775 	if (ret == 0)
776 		*vpp = vp;
777 	else
778 		*vpp = NULL;
779 
780 	return (ret);
781 }
782 
783 /*
784  * gfs_dir_readdir: does a readdir() on the given directory
785  *
786  *    dvp	- directory vnode
787  *    uiop	- uio structure
788  *    eofp	- eof pointer
789  *    data	- arbitrary data passed to readdir callback
790  *
791  * This routine does all the readdir() dirty work.  Even so, the caller must
792  * supply two callbacks in order to get full compatibility.
793  *
794  * If the directory contains static entries, an inode callback must be
795  * specified.  This avoids having to create every vnode and call VOP_GETATTR()
796  * when reading the directory.  This function has the following arguments:
797  *
798  *	ino_t gfs_inode_cb(vnode_t *vp, int index);
799  *
800  * 	vp	- vnode for the directory
801  * 	index	- index in original gfs_dirent_t array
802  *
803  * 	Returns the inode number for the given entry.
804  *
805  * For directories with dynamic entries, a readdir callback must be provided.
806  * This is significantly more complex, thanks to the particulars of
807  * VOP_READDIR().
808  *
809  *	int gfs_readdir_cb(vnode_t *vp, struct dirent64 *dp, int *eofp,
810  *	    offset_t *off, offset_t *nextoff, void *data)
811  *
812  *	vp	- directory vnode
813  *	dp	- directory entry, sized according to maxlen given to
814  *		  gfs_dir_create().  callback must fill in d_name and
815  *		  d_ino.
816  *	eofp	- callback must set to 1 when EOF has been reached
817  *	off	- on entry, the last offset read from the directory.  Callback
818  *		  must set to the offset of the current entry, typically left
819  *		  untouched.
820  *	nextoff	- callback must set to offset of next entry.  Typically
821  *		  (off + 1)
822  *	data	- caller-supplied data
823  *
824  *	Return 0 on success, or error on failure.
825  */
826 int
827 gfs_dir_readdir(vnode_t *dvp, uio_t *uiop, int *eofp, void *data, cred_t *cr,
828     caller_context_t *ct)
829 {
830 	gfs_readdir_state_t gstate;
831 	int error, eof = 0;
832 	ino64_t ino, pino;
833 	offset_t off, next;
834 	gfs_dir_t *dp = dvp->v_data;
835 	vnode_t *parent;
836 
837 	ino = dp->gfsd_file.gfs_ino;
838 	parent = dp->gfsd_file.gfs_parent;
839 
840 	if (parent == NULL)
841 		pino = ino;		/* root of filesystem */
842 	else if (dvp->v_flag & V_XATTRDIR) {
843 		vattr_t va;
844 
845 		va.va_mask = AT_NODEID;
846 		error = VOP_GETATTR(parent, &va, 0, cr, ct);
847 		if (error)
848 			return (error);
849 		pino = va.va_nodeid;
850 	} else
851 		pino = ((gfs_file_t *)(parent->v_data))->gfs_ino;
852 
853 	if ((error = gfs_readdir_init(&gstate, dp->gfsd_maxlen, 1, uiop,
854 	    pino, ino)) != 0)
855 		return (error);
856 
857 	while ((error = gfs_readdir_pred(&gstate, uiop, &off)) == 0 &&
858 	    !eof) {
859 
860 		if (off >= 0 && off < dp->gfsd_nstatic) {
861 			ino = dp->gfsd_inode(dvp, off);
862 
863 			if ((error = gfs_readdir_emit(&gstate, uiop,
864 			    off, ino, dp->gfsd_static[off].gfse_name))
865 			    != 0)
866 				break;
867 
868 		} else if (dp->gfsd_readdir) {
869 			off -= dp->gfsd_nstatic;
870 
871 			if ((error = dp->gfsd_readdir(dvp,
872 			    gstate.grd_dirent, &eof, &off, &next,
873 			    data)) != 0 || eof)
874 				break;
875 
876 			off += dp->gfsd_nstatic + 2;
877 			next += dp->gfsd_nstatic + 2;
878 
879 			if ((error = gfs_readdir_emit_int(&gstate, uiop,
880 			    next)) != 0)
881 				break;
882 		} else {
883 			/*
884 			 * Offset is beyond the end of the static entries, and
885 			 * we have no dynamic entries.  Set EOF.
886 			 */
887 			eof = 1;
888 		}
889 	}
890 
891 	return (gfs_readdir_fini(&gstate, error, eofp, eof));
892 }
893 
894 
895 /*
896  * gfs_vop_lookup: VOP_LOOKUP() entry point
897  *
898  * For use directly in vnode ops table.  Given a GFS directory, calls
899  * gfs_dir_lookup() as necessary.
900  */
901 /* ARGSUSED */
902 int
903 gfs_vop_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
904     int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
905     int *direntflags, pathname_t *realpnp)
906 {
907 	return (gfs_dir_lookup(dvp, nm, vpp, cr));
908 }
909 
910 /*
911  * gfs_vop_readdir: VOP_READDIR() entry point
912  *
913  * For use directly in vnode ops table.  Given a GFS directory, calls
914  * gfs_dir_readdir() as necessary.
915  */
916 /* ARGSUSED */
917 int
918 gfs_vop_readdir(vnode_t *vp, uio_t *uiop, cred_t *cr, int *eofp,
919     caller_context_t *ct, int flags)
920 {
921 	return (gfs_dir_readdir(vp, uiop, eofp, NULL, cr, ct));
922 }
923 
924 
925 /*
926  * gfs_vop_map: VOP_MAP() entry point
927  *
928  * Convenient routine for handling pseudo-files that wish to allow mmap() calls.
929  * This function only works for readonly files, and uses the read function for
930  * the vnode to fill in the data.  The mapped data is immediately faulted in and
931  * filled with the necessary data during this call; there are no getpage() or
932  * putpage() routines.
933  */
934 /* ARGSUSED */
935 int
936 gfs_vop_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
937     size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cred,
938     caller_context_t *ct)
939 {
940 	int rv;
941 	ssize_t resid = len;
942 
943 	/*
944 	 * Check for bad parameters
945 	 */
946 #ifdef _ILP32
947 	if (len > MAXOFF_T)
948 		return (ENOMEM);
949 #endif
950 	if (vp->v_flag & VNOMAP)
951 		return (ENOTSUP);
952 	if (off > MAXOFF_T)
953 		return (EFBIG);
954 	if ((long)off < 0 || (long)(off + len) < 0)
955 		return (EINVAL);
956 	if (vp->v_type != VREG)
957 		return (ENODEV);
958 	if ((prot & (PROT_EXEC | PROT_WRITE)) != 0)
959 		return (EACCES);
960 
961 	/*
962 	 * Find appropriate address if needed, otherwise clear address range.
963 	 */
964 	as_rangelock(as);
965 	if ((flags & MAP_FIXED) == 0) {
966 		map_addr(addrp, len, (offset_t)off, 1, flags);
967 		if (*addrp == NULL) {
968 			as_rangeunlock(as);
969 			return (ENOMEM);
970 		}
971 	} else {
972 		(void) as_unmap(as, *addrp, len);
973 	}
974 
975 	/*
976 	 * Create mapping
977 	 */
978 	rv = as_map(as, *addrp, len, segvn_create, zfod_argsp);
979 	as_rangeunlock(as);
980 	if (rv != 0)
981 		return (rv);
982 
983 	/*
984 	 * Fill with data from read()
985 	 */
986 	rv = vn_rdwr(UIO_READ, vp, *addrp, len, off, UIO_USERSPACE,
987 	    0, (rlim64_t)0, cred, &resid);
988 
989 	if (rv == 0 && resid != 0)
990 		rv = ENXIO;
991 
992 	if (rv != 0) {
993 		as_rangelock(as);
994 		(void) as_unmap(as, *addrp, len);
995 		as_rangeunlock(as);
996 	}
997 
998 	return (rv);
999 }
1000 
1001 /*
1002  * gfs_vop_inactive: VOP_INACTIVE() entry point
1003  *
1004  * Given a vnode that is a GFS file or directory, call gfs_file_inactive() or
1005  * gfs_dir_inactive() as necessary, and kmem_free()s associated private data.
1006  */
1007 /* ARGSUSED */
1008 void
1009 gfs_vop_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
1010 {
1011 	gfs_file_t *fp = vp->v_data;
1012 	void *data;
1013 
1014 	if (fp->gfs_type == GFS_DIR)
1015 		data = gfs_dir_inactive(vp);
1016 	else
1017 		data = gfs_file_inactive(vp);
1018 
1019 	if (data != NULL)
1020 		kmem_free(data, fp->gfs_size);
1021 }
1022