xref: /titanic_52/usr/src/uts/common/fs/gfs.c (revision 0a44ef6d9afbfe052a7e975f55ea0d2954b62a82)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/cmn_err.h>
31 #include <sys/debug.h>
32 #include <sys/dirent.h>
33 #include <sys/kmem.h>
34 #include <sys/mman.h>
35 #include <sys/mutex.h>
36 #include <sys/sysmacros.h>
37 #include <sys/systm.h>
38 #include <sys/uio.h>
39 #include <sys/vmsystm.h>
40 #include <sys/vfs.h>
41 #include <sys/vnode.h>
42 
43 #include <vm/as.h>
44 #include <vm/seg_vn.h>
45 
46 #include <sys/gfs.h>
47 
48 /*
49  * Generic pseudo-filesystem routines.
50  *
51  * There are significant similarities between the implementation of certain file
52  * system entry points across different filesystems.  While one could attempt to
53  * "choke up on the bat" and incorporate common functionality into a VOP
54  * preamable or postamble, such an approach is limited in the benefit it can
55  * provide.  In this file we instead define a toolkit of routines which can be
56  * called from a filesystem (with in-kernel pseudo-filesystems being the focus
57  * of the exercise) in a more component-like fashion.
58  *
59  * There are three basic classes of routines:
60  *
61  * 1) Lowlevel support routines
62  *
63  *    These routines are designed to play a support role for existing
64  *    pseudo-filesystems (such as procfs).  They simplif ycommon tasks,
65  *    without enforcing the filesystem to hand over management to GFS.  The
66  *    routines covered are:
67  *
68  *	gfs_readdir_init()
69  *	gfs_readdir_emit()
70  *	gfs_readdir_emitn()
71  *	gfs_readdir_pred()
72  *	gfs_readdir_fini()
73  *	gfs_lookup_dot()
74  *
75  * 2) Complete GFS management
76  *
77  *    These routines take a more active role in management of the
78  *    pseudo-filesystem.  They handle the relationship between vnode private
79  *    data and VFS data, as well as the relationship between vnodes in the
80  *    directory heirarchy.
81  *
82  *    In order to use these interfaces, the first member of every private
83  *    v_data must be a gfs_file_t or a gfs_dir_t.  This hands over all control
84  *    to GFS.
85  *
86  * 	gfs_file_create()
87  * 	gfs_dir_create()
88  * 	gfs_root_create()
89  *
90  *	gfs_file_inactive()
91  *	gfs_dir_inactive()
92  *	gfs_dir_lookup()
93  *	gfs_dir_readdir()
94  *
95  * 	gfs_vop_inactive()
96  * 	gfs_vop_lookup()
97  * 	gfs_vop_readdir()
98  * 	gfs_vop_map()
99  */
100 
101 /*
102  * gfs_make_opsvec: take an array of vnode type definitions and create
103  * their vnodeops_t structures
104  *
105  * This routine takes an array of gfs_opsvec_t's.  It could
106  * alternatively take an array of gfs_opsvec_t*'s, which would allow
107  * vnode types to be completely defined in files external to the caller
108  * of gfs_make_opsvec().  As it stands, much more sharing takes place --
109  * both the caller and the vnode type provider need to access gfsv_ops
110  * and gfsv_template, and the caller also needs to know gfsv_name.
111  */
112 int
113 gfs_make_opsvec(gfs_opsvec_t *vec)
114 {
115 	int error, i;
116 
117 	for (i = 0; ; i++) {
118 		if (vec[i].gfsv_name == NULL)
119 			return (0);
120 		error = vn_make_ops(vec[i].gfsv_name, vec[i].gfsv_template,
121 		    vec[i].gfsv_ops);
122 		if (error)
123 			break;
124 	}
125 
126 	cmn_err(CE_WARN, "gfs_make_opsvec: bad vnode ops template for '%s'",
127 	    vec[i].gfsv_name);
128 	for (i--; i >= 0; i--) {
129 		vn_freevnodeops(*vec[i].gfsv_ops);
130 		*vec[i].gfsv_ops = NULL;
131 	}
132 	return (error);
133 }
134 
135 /*
136  * Low level directory routines
137  *
138  * These routines provide some simple abstractions for reading directories.
139  * They are designed to be used by existing pseudo filesystems (namely procfs)
140  * that already have a complicated management infrastructure.
141  */
142 
143 /*
144  * gfs_readdir_init: initiate a generic readdir
145  *   st		- a pointer to an uninitialized gfs_readdir_state_t structure
146  *   name_max	- the directory's maximum file name length
147  *   ureclen	- the exported file-space record length (1 for non-legacy FSs)
148  *   uiop	- the uiop passed to readdir
149  *   parent	- the parent directory's inode
150  *   self	- this directory's inode
151  *
152  * Returns 0 or a non-zero errno.
153  *
154  * Typical VOP_READDIR usage of gfs_readdir_*:
155  *
156  *	if ((error = gfs_readdir_init(...)) != 0)
157  *		return (error);
158  *	eof = 0;
159  *	while ((error = gfs_readdir_pred(..., &voffset)) != 0) {
160  *		if (!consumer_entry_at(voffset))
161  *			voffset = consumer_next_entry(voffset);
162  *		if (consumer_eof(voffset)) {
163  *			eof = 1
164  *			break;
165  *		}
166  *		if ((error = gfs_readdir_emit(..., voffset,
167  *		    consumer_ino(voffset), consumer_name(voffset))) != 0)
168  *			break;
169  *	}
170  *	return (gfs_readdir_fini(..., error, eofp, eof));
171  *
172  * As you can see, a zero result from gfs_readdir_pred() or
173  * gfs_readdir_emit() indicates that processing should continue,
174  * whereas a non-zero result indicates that the loop should terminate.
175  * Most consumers need do nothing more than let gfs_readdir_fini()
176  * determine what the cause of failure was and return the appropriate
177  * value.
178  */
179 int
180 gfs_readdir_init(gfs_readdir_state_t *st, int name_max, int ureclen,
181     uio_t *uiop, ino64_t parent, ino64_t self)
182 {
183 	if (uiop->uio_loffset < 0 || uiop->uio_resid <= 0 ||
184 	    (uiop->uio_loffset % ureclen) != 0)
185 		return (EINVAL);
186 
187 	st->grd_ureclen = ureclen;
188 	st->grd_oresid = uiop->uio_resid;
189 	st->grd_namlen = name_max;
190 	st->grd_dirent = kmem_zalloc(DIRENT64_RECLEN(st->grd_namlen), KM_SLEEP);
191 	st->grd_parent = parent;
192 	st->grd_self = self;
193 
194 	return (0);
195 }
196 
197 /*
198  * gfs_readdir_emit_int: internal routine to emit directory entry
199  *
200  *   st		- the current readdir state, which must have d_ino and d_name
201  *                set
202  *   uiop	- caller-supplied uio pointer
203  *   next	- the offset of the next entry
204  */
205 static int
206 gfs_readdir_emit_int(gfs_readdir_state_t *st, uio_t *uiop, offset_t next)
207 {
208 	int reclen;
209 
210 	reclen = DIRENT64_RECLEN(strlen(st->grd_dirent->d_name));
211 
212 	if (reclen > uiop->uio_resid) {
213 		/*
214 		 * Error if no entries were returned yet
215 		 */
216 		if (uiop->uio_resid == st->grd_oresid)
217 			return (EINVAL);
218 		return (-1);
219 	}
220 
221 	st->grd_dirent->d_off = next;
222 	st->grd_dirent->d_reclen = (ushort_t)reclen;
223 
224 	if (uiomove((caddr_t)st->grd_dirent, reclen, UIO_READ, uiop))
225 		return (EFAULT);
226 
227 	uiop->uio_loffset = next;
228 
229 	return (0);
230 }
231 
232 /*
233  * gfs_readdir_emit: emit a directory entry
234  *   voff       - the virtual offset (obtained from gfs_readdir_pred)
235  *   ino        - the entry's inode
236  *   name       - the entry's name
237  *
238  * Returns a 0 on success, a non-zero errno on failure, or -1 if the
239  * readdir loop should terminate.  A non-zero result (either errno or
240  * -1) from this function is typically passed directly to
241  * gfs_readdir_fini().
242  */
243 int
244 gfs_readdir_emit(gfs_readdir_state_t *st, uio_t *uiop, offset_t voff,
245     ino64_t ino, const char *name)
246 {
247 	offset_t off = (voff + 2) * st->grd_ureclen;
248 
249 	st->grd_dirent->d_ino = ino;
250 	(void) strncpy(st->grd_dirent->d_name, name, st->grd_namlen);
251 
252 	/*
253 	 * Inter-entry offsets are invalid, so we assume a record size of
254 	 * grd_ureclen and explicitly set the offset appropriately.
255 	 */
256 	return (gfs_readdir_emit_int(st, uiop, off + st->grd_ureclen));
257 }
258 
259 /*
260  * gfs_readdir_emitn: like gfs_readdir_emit(), but takes an integer
261  * instead of a string for the entry's name.
262  */
263 int
264 gfs_readdir_emitn(gfs_readdir_state_t *st, uio_t *uiop, offset_t voff,
265     ino64_t ino, unsigned long num)
266 {
267 	char buf[40];
268 
269 	numtos(num, buf);
270 	return (gfs_readdir_emit(st, uiop, voff, ino, buf));
271 }
272 
273 /*
274  * gfs_readdir_pred: readdir loop predicate
275  *   voffp - a pointer in which the next virtual offset should be stored
276  *
277  * Returns a 0 on success, a non-zero errno on failure, or -1 if the
278  * readdir loop should terminate.  A non-zero result (either errno or
279  * -1) from this function is typically passed directly to
280  * gfs_readdir_fini().
281  */
282 int
283 gfs_readdir_pred(gfs_readdir_state_t *st, uio_t *uiop, offset_t *voffp)
284 {
285 	offset_t off, voff;
286 	int error;
287 
288 top:
289 	if (uiop->uio_resid <= 0)
290 		return (-1);
291 
292 	off = uiop->uio_loffset / st->grd_ureclen;
293 	voff = off - 2;
294 	if (off == 0) {
295 		if ((error = gfs_readdir_emit(st, uiop, voff, st->grd_self,
296 		    ".")) == 0)
297 			goto top;
298 	} else if (off == 1) {
299 		if ((error = gfs_readdir_emit(st, uiop, voff, st->grd_parent,
300 		    "..")) == 0)
301 			goto top;
302 	} else {
303 		*voffp = voff;
304 		return (0);
305 	}
306 
307 	return (error);
308 }
309 
310 /*
311  * gfs_readdir_fini: generic readdir cleanup
312  *   error	- if positive, an error to return
313  *   eofp	- the eofp passed to readdir
314  *   eof	- the eof value
315  *
316  * Returns a 0 on success, a non-zero errno on failure.  This result
317  * should be returned from readdir.
318  */
319 int
320 gfs_readdir_fini(gfs_readdir_state_t *st, int error, int *eofp, int eof)
321 {
322 	kmem_free(st->grd_dirent, DIRENT64_RECLEN(st->grd_namlen));
323 	if (error > 0)
324 		return (error);
325 	if (eofp)
326 		*eofp = eof;
327 	return (0);
328 }
329 
330 /*
331  * gfs_lookup_dot
332  *
333  * Performs a basic check for "." and ".." directory entries.
334  */
335 int
336 gfs_lookup_dot(vnode_t **vpp, vnode_t *dvp, vnode_t *pvp, const char *nm)
337 {
338 	if (*nm == '\0' || strcmp(nm, ".") == 0) {
339 		VN_HOLD(dvp);
340 		*vpp = dvp;
341 		return (0);
342 	} else if (strcmp(nm, "..") == 0) {
343 		if (pvp == NULL) {
344 			ASSERT(dvp->v_flag & VROOT);
345 			VN_HOLD(dvp);
346 			*vpp = dvp;
347 		} else {
348 			VN_HOLD(pvp);
349 			*vpp = pvp;
350 		}
351 		return (0);
352 	}
353 
354 	return (-1);
355 }
356 
357 /*
358  * gfs_file_create(): create a new GFS file
359  *
360  *   size	- size of private data structure (v_data)
361  *   pvp	- parent vnode (GFS directory)
362  *   ops	- vnode operations vector
363  *
364  * In order to use this interface, the parent vnode must have been created by
365  * gfs_dir_create(), and the private data stored in v_data must have a
366  * 'gfs_file_t' as its first field.
367  *
368  * Given these constraints, this routine will automatically:
369  *
370  * 	- Allocate v_data for the vnode
371  * 	- Initialize necessary fields in the vnode
372  * 	- Hold the parent
373  */
374 vnode_t *
375 gfs_file_create(size_t size, vnode_t *pvp, vnodeops_t *ops)
376 {
377 	gfs_file_t *fp;
378 	vnode_t *vp;
379 
380 	/*
381 	 * Allocate vnode and internal data structure
382 	 */
383 	fp = kmem_zalloc(size, KM_SLEEP);
384 	vp = vn_alloc(KM_SLEEP);
385 
386 	/*
387 	 * Set up various pointers
388 	 */
389 	fp->gfs_vnode = vp;
390 	fp->gfs_parent = pvp;
391 	vp->v_data = fp;
392 	fp->gfs_size = size;
393 	fp->gfs_type = GFS_FILE;
394 
395 	/*
396 	 * Initialize vnode and hold parent.
397 	 */
398 	vn_setops(vp, ops);
399 	if (pvp) {
400 		VN_SET_VFS_TYPE_DEV(vp, pvp->v_vfsp, VREG, 0);
401 		VN_HOLD(pvp);
402 	}
403 
404 	return (vp);
405 }
406 
407 /*
408  * gfs_dir_create: creates a new directory in the parent
409  *
410  *   size	- size of private data structure (v_data)
411  *   pvp	- parent vnode (GFS directory)
412  *   ops	- vnode operations vector
413  *   entries	- NULL-terminated list of static entries (if any)
414  *   maxlen	- maximum length of a directory entry
415  *   readdir_cb	- readdir callback (see gfs_dir_readdir)
416  *   inode_cb	- inode callback (see gfs_dir_readdir)
417  *   lookup_cb	- lookup callback (see gfs_dir_lookup)
418  *
419  * In order to use this function, the first member of the private vnode
420  * structure (v_data) must be a gfs_dir_t.  For each directory, there are
421  * static entries, defined when the structure is initialized, and dynamic
422  * entries, retrieved through callbacks.
423  *
424  * If a directory has static entries, then it must supply a inode callback,
425  * which will compute the inode number based on the parent and the index.
426  * For a directory with dynamic entries, the caller must supply a readdir
427  * callback and a lookup callback.  If a static lookup fails, we fall back to
428  * the supplied lookup callback, if any.
429  *
430  * This function also performs the same initialization as gfs_file_create().
431  */
432 vnode_t *
433 gfs_dir_create(size_t struct_size, vnode_t *pvp, vnodeops_t *ops,
434     gfs_dirent_t *entries, gfs_inode_cb inode_cb, int maxlen,
435     gfs_readdir_cb readdir_cb, gfs_lookup_cb lookup_cb)
436 {
437 	vnode_t *vp;
438 	gfs_dir_t *dp;
439 	gfs_dirent_t *de;
440 
441 	vp = gfs_file_create(struct_size, pvp, ops);
442 	vp->v_type = VDIR;
443 
444 	dp = vp->v_data;
445 	dp->gfsd_file.gfs_type = GFS_DIR;
446 	dp->gfsd_maxlen = maxlen;
447 
448 	if (entries != NULL) {
449 		for (de = entries; de->gfse_name != NULL; de++)
450 			dp->gfsd_nstatic++;
451 
452 		dp->gfsd_static = kmem_alloc(
453 		    dp->gfsd_nstatic * sizeof (gfs_dirent_t), KM_SLEEP);
454 		bcopy(entries, dp->gfsd_static,
455 		    dp->gfsd_nstatic * sizeof (gfs_dirent_t));
456 	}
457 
458 	dp->gfsd_readdir = readdir_cb;
459 	dp->gfsd_lookup = lookup_cb;
460 	dp->gfsd_inode = inode_cb;
461 
462 	mutex_init(&dp->gfsd_lock, NULL, MUTEX_DEFAULT, NULL);
463 
464 	return (vp);
465 }
466 
467 /*
468  * gfs_root_create(): create a root vnode for a GFS filesystem
469  *
470  * Similar to gfs_dir_create(), this creates a root vnode for a filesystem.  The
471  * only difference is that it takes a vfs_t instead of a vnode_t as its parent.
472  */
473 vnode_t *
474 gfs_root_create(size_t size, vfs_t *vfsp, vnodeops_t *ops, ino64_t ino,
475     gfs_dirent_t *entries, gfs_inode_cb inode_cb, int maxlen,
476     gfs_readdir_cb readdir_cb, gfs_lookup_cb lookup_cb)
477 {
478 	vnode_t *vp = gfs_dir_create(size, NULL, ops, entries, inode_cb,
479 	    maxlen, readdir_cb, lookup_cb);
480 
481 	/* Manually set the inode */
482 	((gfs_file_t *)vp->v_data)->gfs_ino = ino;
483 
484 	VFS_HOLD(vfsp);
485 	VN_SET_VFS_TYPE_DEV(vp, vfsp, VDIR, 0);
486 	vp->v_flag |= VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT;
487 
488 	return (vp);
489 }
490 
491 /*
492  * gfs_file_inactive()
493  *
494  * Called from the VOP_INACTIVE() routine.  If necessary, this routine will
495  * remove the given vnode from the parent directory and clean up any references
496  * in the VFS layer.
497  *
498  * If the vnode was not removed (due to a race with vget), then NULL is
499  * returned.  Otherwise, a pointer to the private data is returned.
500  */
501 void *
502 gfs_file_inactive(vnode_t *vp)
503 {
504 	int i;
505 	gfs_dirent_t *ge = NULL;
506 	gfs_file_t *fp = vp->v_data;
507 	gfs_dir_t *dp = NULL;
508 	void *data;
509 
510 	if (fp->gfs_parent == NULL)
511 		goto found;
512 
513 	dp = fp->gfs_parent->v_data;
514 
515 	/*
516 	 * First, see if this vnode is cached in the parent.
517 	 */
518 	gfs_dir_lock(dp);
519 
520 	/*
521 	 * Find it in the set of static entries.
522 	 */
523 	for (i = 0; i < dp->gfsd_nstatic; i++)  {
524 		ge = &dp->gfsd_static[i];
525 
526 		if (ge->gfse_vnode == vp)
527 			goto found;
528 	}
529 
530 	/*
531 	 * If 'ge' is NULL, then it is a dynamic entry.
532 	 */
533 	ge = NULL;
534 
535 found:
536 	mutex_enter(&vp->v_lock);
537 	if (vp->v_count == 1) {
538 		/*
539 		 * Really remove this vnode
540 		 */
541 		data = vp->v_data;
542 		if (ge != NULL) {
543 			/*
544 			 * If this was a statically cached entry, simply set the
545 			 * cached vnode to NULL.
546 			 */
547 			ge->gfse_vnode = NULL;
548 		}
549 		mutex_exit(&vp->v_lock);
550 
551 		/*
552 		 * Free vnode and release parent
553 		 */
554 		if (fp->gfs_parent) {
555 			gfs_dir_unlock(dp);
556 			VN_RELE(fp->gfs_parent);
557 		} else {
558 			ASSERT(vp->v_vfsp != NULL);
559 			VFS_RELE(vp->v_vfsp);
560 		}
561 		vn_free(vp);
562 	} else {
563 		vp->v_count--;
564 		data = NULL;
565 		mutex_exit(&vp->v_lock);
566 		if (dp)
567 			gfs_dir_unlock(dp);
568 	}
569 
570 	return (data);
571 }
572 
573 /*
574  * gfs_dir_inactive()
575  *
576  * Same as above, but for directories.
577  */
578 void *
579 gfs_dir_inactive(vnode_t *vp)
580 {
581 	gfs_dir_t *dp;
582 
583 	ASSERT(vp->v_type == VDIR);
584 
585 	if ((dp = gfs_file_inactive(vp)) != NULL) {
586 		mutex_destroy(&dp->gfsd_lock);
587 		if (dp->gfsd_nstatic)
588 			kmem_free(dp->gfsd_static,
589 			    dp->gfsd_nstatic * sizeof (gfs_dirent_t));
590 	}
591 
592 	return (dp);
593 }
594 
595 /*
596  * gfs_dir_lookup()
597  *
598  * Looks up the given name in the directory and returns the corresponding vnode,
599  * if found.
600  *
601  * First, we search statically defined entries, if any.  If a match is found,
602  * and GFS_CACHE_VNODE is set and the vnode exists, we simply return the
603  * existing vnode.  Otherwise, we call the static entry's callback routine,
604  * caching the result if necessary.
605  *
606  * If no static entry is found, we invoke the lookup callback, if any.  The
607  * arguments to this callback are:
608  *
609  *	int gfs_lookup_cb(vnode_t *pvp, const char *nm, vnode_t **vpp);
610  *
611  *	pvp	- parent vnode
612  *	nm	- name of entry
613  *	vpp	- pointer to resulting vnode
614  *
615  * 	Returns 0 on success, non-zero on error.
616  */
617 int
618 gfs_dir_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp)
619 {
620 	int i;
621 	gfs_dirent_t *ge;
622 	vnode_t *vp;
623 	gfs_dir_t *dp = dvp->v_data;
624 	int ret = 0;
625 
626 	ASSERT(dvp->v_type == VDIR);
627 
628 	if (gfs_lookup_dot(vpp, dvp, dp->gfsd_file.gfs_parent, nm) == 0)
629 		return (0);
630 
631 	gfs_dir_lock(dp);
632 
633 	/*
634 	 * Search static entries.
635 	 */
636 	for (i = 0; i < dp->gfsd_nstatic; i++) {
637 		ge = &dp->gfsd_static[i];
638 
639 		if (strcmp(ge->gfse_name, nm) == 0) {
640 			if (ge->gfse_vnode) {
641 				ASSERT(ge->gfse_flags & GFS_CACHE_VNODE);
642 				vp = ge->gfse_vnode;
643 				VN_HOLD(vp);
644 				goto out;
645 			}
646 
647 			/*
648 			 * We drop the directory lock, as the constuctor will
649 			 * need to do KM_SLEEP allocations.  If we return from
650 			 * the constructor only to find that a parallel
651 			 * operation has completed, and GFS_CACHE_VNODE is set
652 			 * for this entry, we discard the result in favor of the
653 			 * cached vnode.
654 			 */
655 			gfs_dir_unlock(dp);
656 			vp = ge->gfse_ctor(dvp);
657 			gfs_dir_lock(dp);
658 
659 			((gfs_file_t *)vp->v_data)->gfs_index = i;
660 
661 			/* Set the inode according to the callback. */
662 			((gfs_file_t *)vp->v_data)->gfs_ino =
663 			    dp->gfsd_inode(dvp, i);
664 
665 			if (ge->gfse_flags & GFS_CACHE_VNODE) {
666 				if (ge->gfse_vnode == NULL) {
667 					ge->gfse_vnode = vp;
668 				} else {
669 					/*
670 					 * A parallel constructor beat us to it;
671 					 * return existing vnode.  We have to be
672 					 * careful because we can't release the
673 					 * current vnode while holding the
674 					 * directory lock; its inactive routine
675 					 * will try to lock this directory.
676 					 */
677 					vnode_t *oldvp = vp;
678 					vp = ge->gfse_vnode;
679 					VN_HOLD(vp);
680 
681 					gfs_dir_unlock(dp);
682 					VN_RELE(oldvp);
683 					gfs_dir_lock(dp);
684 				}
685 			}
686 
687 			goto out;
688 		}
689 	}
690 
691 	/*
692 	 * See if there is a dynamic constructor.
693 	 */
694 	if (dp->gfsd_lookup) {
695 		ino64_t ino;
696 		gfs_file_t *fp;
697 
698 		/*
699 		 * Once again, drop the directory lock, as the lookup routine
700 		 * will need to allocate memory, or otherwise deadlock on this
701 		 * directory.
702 		 */
703 		gfs_dir_unlock(dp);
704 		ret = dp->gfsd_lookup(dvp, nm, &vp, &ino);
705 		gfs_dir_lock(dp);
706 		if (ret != 0)
707 			goto out;
708 
709 		fp = (gfs_file_t *)vp->v_data;
710 		fp->gfs_index = -1;
711 		fp->gfs_ino = ino;
712 	} else {
713 		/*
714 		 * No static entry found, and there is no lookup callback, so
715 		 * return ENOENT.
716 		 */
717 		ret = ENOENT;
718 	}
719 
720 out:
721 	gfs_dir_unlock(dp);
722 
723 	if (ret == 0)
724 		*vpp = vp;
725 	else
726 		*vpp = NULL;
727 
728 	return (ret);
729 }
730 
731 /*
732  * gfs_dir_readdir: does a readdir() on the given directory
733  *
734  *    dvp	- directory vnode
735  *    uiop	- uio structure
736  *    eofp	- eof pointer
737  *    data	- arbitrary data passed to readdir callback
738  *
739  * This routine does all the readdir() dirty work.  Even so, the caller must
740  * supply two callbacks in order to get full compatibility.
741  *
742  * If the directory contains static entries, an inode callback must be
743  * specified.  This avoids having to create every vnode and call VOP_GETATTR()
744  * when reading the directory.  This function has the following arguments:
745  *
746  *	ino_t gfs_inode_cb(vnode_t *vp, int index);
747  *
748  * 	vp	- vnode for the directory
749  * 	index	- index in original gfs_dirent_t array
750  *
751  * 	Returns the inode number for the given entry.
752  *
753  * For directories with dynamic entries, a readdir callback must be provided.
754  * This is significantly more complex, thanks to the particulars of
755  * VOP_READDIR().
756  *
757  *	int gfs_readdir_cb(vnode_t *vp, struct dirent64 *dp, int *eofp,
758  *	    offset_t *off, offset_t *nextoff, void *data)
759  *
760  *	vp	- directory vnode
761  *	dp	- directory entry, sized according to maxlen given to
762  *		  gfs_dir_create().  callback must fill in d_name and
763  *		  d_ino.
764  *	eofp	- callback must set to 1 when EOF has been reached
765  *	off	- on entry, the last offset read from the directory.  Callback
766  *		  must set to the offset of the current entry, typically left
767  *		  untouched.
768  *	nextoff	- callback must set to offset of next entry.  Typically
769  *		  (off + 1)
770  *	data	- caller-supplied data
771  *
772  *	Return 0 on success, or error on failure.
773  */
774 int
775 gfs_dir_readdir(vnode_t *dvp, uio_t *uiop, int *eofp, void *data)
776 {
777 	gfs_readdir_state_t gstate;
778 	int error, eof = 0;
779 	ino64_t ino, pino;
780 	offset_t off, next;
781 	gfs_dir_t *dp = dvp->v_data;
782 
783 	ino = dp->gfsd_file.gfs_ino;
784 
785 	if (dp->gfsd_file.gfs_parent == NULL)
786 		pino = ino;		/* root of filesystem */
787 	else
788 		pino = ((gfs_file_t *)
789 		    (dp->gfsd_file.gfs_parent->v_data))->gfs_ino;
790 
791 	if ((error = gfs_readdir_init(&gstate, dp->gfsd_maxlen, 1, uiop,
792 	    pino, ino)) != 0)
793 		return (error);
794 
795 	while ((error = gfs_readdir_pred(&gstate, uiop, &off)) == 0 &&
796 	    !eof) {
797 
798 		if (off >= 0 && off < dp->gfsd_nstatic) {
799 			ino = dp->gfsd_inode(dvp, off);
800 
801 			if ((error = gfs_readdir_emit(&gstate, uiop,
802 			    off, ino, dp->gfsd_static[off].gfse_name))
803 			    != 0)
804 				break;
805 
806 		} else if (dp->gfsd_readdir) {
807 			off -= dp->gfsd_nstatic;
808 
809 			if ((error = dp->gfsd_readdir(dvp,
810 			    gstate.grd_dirent, &eof, &off, &next,
811 			    data)) != 0 || eof)
812 				break;
813 
814 			off += dp->gfsd_nstatic + 2;
815 			next += dp->gfsd_nstatic + 2;
816 
817 			if ((error = gfs_readdir_emit_int(&gstate, uiop,
818 			    next)) != 0)
819 				break;
820 		} else {
821 			/*
822 			 * Offset is beyond the end of the static entries, and
823 			 * we have no dynamic entries.  Set EOF.
824 			 */
825 			eof = 1;
826 		}
827 	}
828 
829 	return (gfs_readdir_fini(&gstate, error, eofp, eof));
830 }
831 
832 
833 /*
834  * gfs_vop_lookup: VOP_LOOKUP() entry point
835  *
836  * For use directly in vnode ops table.  Given a GFS directory, calls
837  * gfs_dir_lookup() as necessary.
838  */
839 /* ARGSUSED */
840 int
841 gfs_vop_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
842     int flags, vnode_t *rdir, cred_t *cr)
843 {
844 	return (gfs_dir_lookup(dvp, nm, vpp));
845 }
846 
847 /*
848  * gfs_vop_readdir: VOP_READDIR() entry point
849  *
850  * For use directly in vnode ops table.  Given a GFS directory, calls
851  * gfs_dir_readdir() as necessary.
852  */
853 /* ARGSUSED */
854 int
855 gfs_vop_readdir(vnode_t *vp, uio_t *uiop, cred_t *cr, int *eofp)
856 {
857 	return (gfs_dir_readdir(vp, uiop, eofp, NULL));
858 }
859 
860 
861 /*
862  * gfs_vop_map: VOP_MAP() entry point
863  *
864  * Convenient routine for handling pseudo-files that wish to allow mmap() calls.
865  * This function only works for readonly files, and uses the read function for
866  * the vnode to fill in the data.  The mapped data is immediately faulted in and
867  * filled with the necessary data during this call; there are no getpage() or
868  * putpage() routines.
869  */
870 /* ARGSUSED */
871 int
872 gfs_vop_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
873     size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cred)
874 {
875 	int rv;
876 	ssize_t resid = len;
877 
878 	/*
879 	 * Check for bad parameters
880 	 */
881 #ifdef _ILP32
882 	if (len > MAXOFF_T)
883 		return (ENOMEM);
884 #endif
885 	if (vp->v_flag & VNOMAP)
886 		return (ENOTSUP);
887 	if (off > MAXOFF_T)
888 		return (EFBIG);
889 	if ((long)off < 0 || (long)(off + len) < 0)
890 		return (EINVAL);
891 	if (vp->v_type != VREG)
892 		return (ENODEV);
893 	if ((prot & (PROT_EXEC | PROT_WRITE)) != 0)
894 		return (EACCES);
895 
896 	/*
897 	 * Find appropriate address if needed, otherwise clear address range.
898 	 */
899 	as_rangelock(as);
900 	if ((flags & MAP_FIXED) == 0) {
901 		map_addr(addrp, len, (offset_t)off, 1, flags);
902 		if (*addrp == NULL) {
903 			as_rangeunlock(as);
904 			return (ENOMEM);
905 		}
906 	} else {
907 		(void) as_unmap(as, *addrp, len);
908 	}
909 
910 	/*
911 	 * Create mapping
912 	 */
913 	rv = as_map(as, *addrp, len, segvn_create, zfod_argsp);
914 	as_rangeunlock(as);
915 	if (rv != 0)
916 		return (rv);
917 
918 	/*
919 	 * Fill with data from read()
920 	 */
921 	rv = vn_rdwr(UIO_READ, vp, *addrp, len, off, UIO_USERSPACE,
922 	    0, (rlim64_t)0, cred, &resid);
923 
924 	if (rv == 0 && resid != 0)
925 		rv = ENXIO;
926 
927 	if (rv != 0) {
928 		as_rangelock(as);
929 		(void) as_unmap(as, *addrp, len);
930 		as_rangeunlock(as);
931 	}
932 
933 	return (rv);
934 }
935 
936 /*
937  * gfs_vop_inactive: VOP_INACTIVE() entry point
938  *
939  * Given a vnode that is a GFS file or directory, call gfs_file_inactive() or
940  * gfs_dir_inactive() as necessary, and kmem_free()s associated private data.
941  */
942 /* ARGSUSED */
943 void
944 gfs_vop_inactive(vnode_t *vp, cred_t *cr)
945 {
946 	gfs_file_t *fp = vp->v_data;
947 	void *data;
948 
949 	if (fp->gfs_type == GFS_DIR)
950 		data = gfs_dir_inactive(vp);
951 	else
952 		data = gfs_file_inactive(vp);
953 
954 	if (data != NULL)
955 		kmem_free(data, fp->gfs_size);
956 }
957