xref: /freebsd/sys/kern/vfs_default.c (revision 195ebc7e9e4b129de810833791a19dfb4349d6a9)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/event.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/lockf.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/fcntl.h>
53 #include <sys/unistd.h>
54 #include <sys/vnode.h>
55 #include <sys/dirent.h>
56 #include <sys/poll.h>
57 
58 #include <vm/vm.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_extern.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_pager.h>
65 #include <vm/vnode_pager.h>
66 
67 static int	vop_nolookup(struct vop_lookup_args *);
68 static int	vop_nostrategy(struct vop_strategy_args *);
69 static int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
70 				char *dirbuf, int dirbuflen, off_t *off,
71 				char **cpos, int *len, int *eofflag,
72 				struct thread *td);
73 static int	dirent_exists(struct vnode *vp, const char *dirname,
74 			      struct thread *td);
75 
76 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
77 
78 /*
79  * This vnode table stores what we want to do if the filesystem doesn't
80  * implement a particular VOP.
81  *
82  * If there is no specific entry here, we will return EOPNOTSUPP.
83  *
84  */
85 
86 struct vop_vector default_vnodeops = {
87 	.vop_default =		NULL,
88 	.vop_bypass =		VOP_EOPNOTSUPP,
89 
90 	.vop_accessx =		vop_stdaccessx,
91 	.vop_advlock =		vop_stdadvlock,
92 	.vop_advlockasync =	vop_stdadvlockasync,
93 	.vop_bmap =		vop_stdbmap,
94 	.vop_close =		VOP_NULL,
95 	.vop_fsync =		VOP_NULL,
96 	.vop_getpages =		vop_stdgetpages,
97 	.vop_getwritemount = 	vop_stdgetwritemount,
98 	.vop_inactive =		VOP_NULL,
99 	.vop_ioctl =		VOP_ENOTTY,
100 	.vop_kqfilter =		vop_stdkqfilter,
101 	.vop_islocked =		vop_stdislocked,
102 	.vop_lock1 =		vop_stdlock,
103 	.vop_lookup =		vop_nolookup,
104 	.vop_open =		VOP_NULL,
105 	.vop_pathconf =		VOP_EINVAL,
106 	.vop_poll =		vop_nopoll,
107 	.vop_putpages =		vop_stdputpages,
108 	.vop_readlink =		VOP_EINVAL,
109 	.vop_revoke =		VOP_PANIC,
110 	.vop_strategy =		vop_nostrategy,
111 	.vop_unlock =		vop_stdunlock,
112 	.vop_vptocnp =		vop_stdvptocnp,
113 	.vop_vptofh =		vop_stdvptofh,
114 };
115 
116 /*
117  * Series of placeholder functions for various error returns for
118  * VOPs.
119  */
120 
121 int
122 vop_eopnotsupp(struct vop_generic_args *ap)
123 {
124 	/*
125 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
126 	*/
127 
128 	return (EOPNOTSUPP);
129 }
130 
131 int
132 vop_ebadf(struct vop_generic_args *ap)
133 {
134 
135 	return (EBADF);
136 }
137 
138 int
139 vop_enotty(struct vop_generic_args *ap)
140 {
141 
142 	return (ENOTTY);
143 }
144 
145 int
146 vop_einval(struct vop_generic_args *ap)
147 {
148 
149 	return (EINVAL);
150 }
151 
152 int
153 vop_enoent(struct vop_generic_args *ap)
154 {
155 
156 	return (ENOENT);
157 }
158 
159 int
160 vop_null(struct vop_generic_args *ap)
161 {
162 
163 	return (0);
164 }
165 
166 /*
167  * Helper function to panic on some bad VOPs in some filesystems.
168  */
169 int
170 vop_panic(struct vop_generic_args *ap)
171 {
172 
173 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
174 }
175 
176 /*
177  * vop_std<something> and vop_no<something> are default functions for use by
178  * filesystems that need the "default reasonable" implementation for a
179  * particular operation.
180  *
181  * The documentation for the operations they implement exists (if it exists)
182  * in the VOP_<SOMETHING>(9) manpage (all uppercase).
183  */
184 
185 /*
186  * Default vop for filesystems that do not support name lookup
187  */
188 static int
189 vop_nolookup(ap)
190 	struct vop_lookup_args /* {
191 		struct vnode *a_dvp;
192 		struct vnode **a_vpp;
193 		struct componentname *a_cnp;
194 	} */ *ap;
195 {
196 
197 	*ap->a_vpp = NULL;
198 	return (ENOTDIR);
199 }
200 
201 /*
202  *	vop_nostrategy:
203  *
204  *	Strategy routine for VFS devices that have none.
205  *
206  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
207  *	routine.  Typically this is done for a BIO_READ strategy call.
208  *	Typically B_INVAL is assumed to already be clear prior to a write
209  *	and should not be cleared manually unless you just made the buffer
210  *	invalid.  BIO_ERROR should be cleared either way.
211  */
212 
213 static int
214 vop_nostrategy (struct vop_strategy_args *ap)
215 {
216 	printf("No strategy for buffer at %p\n", ap->a_bp);
217 	vprint("vnode", ap->a_vp);
218 	ap->a_bp->b_ioflags |= BIO_ERROR;
219 	ap->a_bp->b_error = EOPNOTSUPP;
220 	bufdone(ap->a_bp);
221 	return (EOPNOTSUPP);
222 }
223 
224 static int
225 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
226 		int dirbuflen, off_t *off, char **cpos, int *len,
227 		int *eofflag, struct thread *td)
228 {
229 	int error, reclen;
230 	struct uio uio;
231 	struct iovec iov;
232 	struct dirent *dp;
233 
234 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
235 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
236 
237 	if (*len == 0) {
238 		iov.iov_base = dirbuf;
239 		iov.iov_len = dirbuflen;
240 
241 		uio.uio_iov = &iov;
242 		uio.uio_iovcnt = 1;
243 		uio.uio_offset = *off;
244 		uio.uio_resid = dirbuflen;
245 		uio.uio_segflg = UIO_SYSSPACE;
246 		uio.uio_rw = UIO_READ;
247 		uio.uio_td = td;
248 
249 		*eofflag = 0;
250 
251 #ifdef MAC
252 		error = mac_vnode_check_readdir(td->td_ucred, vp);
253 		if (error == 0)
254 #endif
255 			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
256 		    		NULL, NULL);
257 		if (error)
258 			return (error);
259 
260 		*off = uio.uio_offset;
261 
262 		*cpos = dirbuf;
263 		*len = (dirbuflen - uio.uio_resid);
264 	}
265 
266 	dp = (struct dirent *)(*cpos);
267 	reclen = dp->d_reclen;
268 	*dpp = dp;
269 
270 	/* check for malformed directory.. */
271 	if (reclen < DIRENT_MINSIZE)
272 		return (EINVAL);
273 
274 	*cpos += reclen;
275 	*len -= reclen;
276 
277 	return (0);
278 }
279 
280 /*
281  * Check if a named file exists in a given directory vnode.
282  */
283 static int
284 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
285 {
286 	char *dirbuf, *cpos;
287 	int error, eofflag, dirbuflen, len, found;
288 	off_t off;
289 	struct dirent *dp;
290 	struct vattr va;
291 
292 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
293 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
294 
295 	found = 0;
296 
297 	error = VOP_GETATTR(vp, &va, td->td_ucred);
298 	if (error)
299 		return (found);
300 
301 	dirbuflen = DEV_BSIZE;
302 	if (dirbuflen < va.va_blocksize)
303 		dirbuflen = va.va_blocksize;
304 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
305 
306 	off = 0;
307 	len = 0;
308 	do {
309 		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
310 					&cpos, &len, &eofflag, td);
311 		if (error)
312 			goto out;
313 
314 		if ((dp->d_type != DT_WHT) &&
315 		    !strcmp(dp->d_name, dirname)) {
316 			found = 1;
317 			goto out;
318 		}
319 	} while (len > 0 || !eofflag);
320 
321 out:
322 	free(dirbuf, M_TEMP);
323 	return (found);
324 }
325 
326 int
327 vop_stdaccessx(struct vop_accessx_args *ap)
328 {
329 	int error;
330 	accmode_t accmode = ap->a_accmode;
331 
332 	error = vfs_unixify_accmode(&accmode);
333 	if (error != 0)
334 		return (error);
335 
336 	if (accmode == 0)
337 		return (0);
338 
339 	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
340 }
341 
342 /*
343  * Advisory record locking support
344  */
345 int
346 vop_stdadvlock(struct vop_advlock_args *ap)
347 {
348 	struct vnode *vp;
349 	struct ucred *cred;
350 	struct vattr vattr;
351 	int error;
352 
353 	vp = ap->a_vp;
354 	cred = curthread->td_ucred;
355 	vn_lock(vp, LK_SHARED | LK_RETRY);
356 	error = VOP_GETATTR(vp, &vattr, cred);
357 	VOP_UNLOCK(vp, 0);
358 	if (error)
359 		return (error);
360 
361 	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
362 }
363 
364 int
365 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
366 {
367 	struct vnode *vp;
368 	struct ucred *cred;
369 	struct vattr vattr;
370 	int error;
371 
372 	vp = ap->a_vp;
373 	cred = curthread->td_ucred;
374 	vn_lock(vp, LK_SHARED | LK_RETRY);
375 	error = VOP_GETATTR(vp, &vattr, cred);
376 	VOP_UNLOCK(vp, 0);
377 	if (error)
378 		return (error);
379 
380 	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
381 }
382 
383 /*
384  * vop_stdpathconf:
385  *
386  * Standard implementation of POSIX pathconf, to get information about limits
387  * for a filesystem.
388  * Override per filesystem for the case where the filesystem has smaller
389  * limits.
390  */
391 int
392 vop_stdpathconf(ap)
393 	struct vop_pathconf_args /* {
394 	struct vnode *a_vp;
395 	int a_name;
396 	int *a_retval;
397 	} */ *ap;
398 {
399 
400 	switch (ap->a_name) {
401 		case _PC_NAME_MAX:
402 			*ap->a_retval = NAME_MAX;
403 			return (0);
404 		case _PC_PATH_MAX:
405 			*ap->a_retval = PATH_MAX;
406 			return (0);
407 		case _PC_LINK_MAX:
408 			*ap->a_retval = LINK_MAX;
409 			return (0);
410 		case _PC_MAX_CANON:
411 			*ap->a_retval = MAX_CANON;
412 			return (0);
413 		case _PC_MAX_INPUT:
414 			*ap->a_retval = MAX_INPUT;
415 			return (0);
416 		case _PC_PIPE_BUF:
417 			*ap->a_retval = PIPE_BUF;
418 			return (0);
419 		case _PC_CHOWN_RESTRICTED:
420 			*ap->a_retval = 1;
421 			return (0);
422 		case _PC_VDISABLE:
423 			*ap->a_retval = _POSIX_VDISABLE;
424 			return (0);
425 		default:
426 			return (EINVAL);
427 	}
428 	/* NOTREACHED */
429 }
430 
431 /*
432  * Standard lock, unlock and islocked functions.
433  */
434 int
435 vop_stdlock(ap)
436 	struct vop_lock1_args /* {
437 		struct vnode *a_vp;
438 		int a_flags;
439 		char *file;
440 		int line;
441 	} */ *ap;
442 {
443 	struct vnode *vp = ap->a_vp;
444 
445 	return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
446 	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
447 	    ap->a_line));
448 }
449 
450 /* See above. */
451 int
452 vop_stdunlock(ap)
453 	struct vop_unlock_args /* {
454 		struct vnode *a_vp;
455 		int a_flags;
456 	} */ *ap;
457 {
458 	struct vnode *vp = ap->a_vp;
459 
460 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
461 }
462 
463 /* See above. */
464 int
465 vop_stdislocked(ap)
466 	struct vop_islocked_args /* {
467 		struct vnode *a_vp;
468 	} */ *ap;
469 {
470 
471 	return (lockstatus(ap->a_vp->v_vnlock));
472 }
473 
474 /*
475  * Return true for select/poll.
476  */
477 int
478 vop_nopoll(ap)
479 	struct vop_poll_args /* {
480 		struct vnode *a_vp;
481 		int  a_events;
482 		struct ucred *a_cred;
483 		struct thread *a_td;
484 	} */ *ap;
485 {
486 
487 	return (poll_no_poll(ap->a_events));
488 }
489 
490 /*
491  * Implement poll for local filesystems that support it.
492  */
493 int
494 vop_stdpoll(ap)
495 	struct vop_poll_args /* {
496 		struct vnode *a_vp;
497 		int  a_events;
498 		struct ucred *a_cred;
499 		struct thread *a_td;
500 	} */ *ap;
501 {
502 	if (ap->a_events & ~POLLSTANDARD)
503 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
504 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
505 }
506 
507 /*
508  * Return our mount point, as we will take charge of the writes.
509  */
510 int
511 vop_stdgetwritemount(ap)
512 	struct vop_getwritemount_args /* {
513 		struct vnode *a_vp;
514 		struct mount **a_mpp;
515 	} */ *ap;
516 {
517 	struct mount *mp;
518 
519 	/*
520 	 * XXX Since this is called unlocked we may be recycled while
521 	 * attempting to ref the mount.  If this is the case or mountpoint
522 	 * will be set to NULL.  We only have to prevent this call from
523 	 * returning with a ref to an incorrect mountpoint.  It is not
524 	 * harmful to return with a ref to our previous mountpoint.
525 	 */
526 	mp = ap->a_vp->v_mount;
527 	if (mp != NULL) {
528 		vfs_ref(mp);
529 		if (mp != ap->a_vp->v_mount) {
530 			vfs_rel(mp);
531 			mp = NULL;
532 		}
533 	}
534 	*(ap->a_mpp) = mp;
535 	return (0);
536 }
537 
538 /* XXX Needs good comment and VOP_BMAP(9) manpage */
539 int
540 vop_stdbmap(ap)
541 	struct vop_bmap_args /* {
542 		struct vnode *a_vp;
543 		daddr_t  a_bn;
544 		struct bufobj **a_bop;
545 		daddr_t *a_bnp;
546 		int *a_runp;
547 		int *a_runb;
548 	} */ *ap;
549 {
550 
551 	if (ap->a_bop != NULL)
552 		*ap->a_bop = &ap->a_vp->v_bufobj;
553 	if (ap->a_bnp != NULL)
554 		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
555 	if (ap->a_runp != NULL)
556 		*ap->a_runp = 0;
557 	if (ap->a_runb != NULL)
558 		*ap->a_runb = 0;
559 	return (0);
560 }
561 
562 int
563 vop_stdfsync(ap)
564 	struct vop_fsync_args /* {
565 		struct vnode *a_vp;
566 		struct ucred *a_cred;
567 		int a_waitfor;
568 		struct thread *a_td;
569 	} */ *ap;
570 {
571 	struct vnode *vp = ap->a_vp;
572 	struct buf *bp;
573 	struct bufobj *bo;
574 	struct buf *nbp;
575 	int error = 0;
576 	int maxretry = 1000;     /* large, arbitrarily chosen */
577 
578 	bo = &vp->v_bufobj;
579 	BO_LOCK(bo);
580 loop1:
581 	/*
582 	 * MARK/SCAN initialization to avoid infinite loops.
583 	 */
584         TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
585                 bp->b_vflags &= ~BV_SCANNED;
586 		bp->b_error = 0;
587 	}
588 
589 	/*
590 	 * Flush all dirty buffers associated with a vnode.
591 	 */
592 loop2:
593 	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
594 		if ((bp->b_vflags & BV_SCANNED) != 0)
595 			continue;
596 		bp->b_vflags |= BV_SCANNED;
597 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
598 			continue;
599 		BO_UNLOCK(bo);
600 		KASSERT(bp->b_bufobj == bo,
601 		    ("bp %p wrong b_bufobj %p should be %p",
602 		    bp, bp->b_bufobj, bo));
603 		if ((bp->b_flags & B_DELWRI) == 0)
604 			panic("fsync: not dirty");
605 		if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
606 			vfs_bio_awrite(bp);
607 		} else {
608 			bremfree(bp);
609 			bawrite(bp);
610 		}
611 		BO_LOCK(bo);
612 		goto loop2;
613 	}
614 
615 	/*
616 	 * If synchronous the caller expects us to completely resolve all
617 	 * dirty buffers in the system.  Wait for in-progress I/O to
618 	 * complete (which could include background bitmap writes), then
619 	 * retry if dirty blocks still exist.
620 	 */
621 	if (ap->a_waitfor == MNT_WAIT) {
622 		bufobj_wwait(bo, 0, 0);
623 		if (bo->bo_dirty.bv_cnt > 0) {
624 			/*
625 			 * If we are unable to write any of these buffers
626 			 * then we fail now rather than trying endlessly
627 			 * to write them out.
628 			 */
629 			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
630 				if ((error = bp->b_error) == 0)
631 					continue;
632 			if (error == 0 && --maxretry >= 0)
633 				goto loop1;
634 			error = EAGAIN;
635 		}
636 	}
637 	BO_UNLOCK(bo);
638 	if (error == EAGAIN)
639 		vprint("fsync: giving up on dirty", vp);
640 
641 	return (error);
642 }
643 
644 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
645 int
646 vop_stdgetpages(ap)
647 	struct vop_getpages_args /* {
648 		struct vnode *a_vp;
649 		vm_page_t *a_m;
650 		int a_count;
651 		int a_reqpage;
652 		vm_ooffset_t a_offset;
653 	} */ *ap;
654 {
655 
656 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
657 	    ap->a_count, ap->a_reqpage);
658 }
659 
660 int
661 vop_stdkqfilter(struct vop_kqfilter_args *ap)
662 {
663 	return vfs_kqfilter(ap);
664 }
665 
666 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
667 int
668 vop_stdputpages(ap)
669 	struct vop_putpages_args /* {
670 		struct vnode *a_vp;
671 		vm_page_t *a_m;
672 		int a_count;
673 		int a_sync;
674 		int *a_rtvals;
675 		vm_ooffset_t a_offset;
676 	} */ *ap;
677 {
678 
679 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
680 	     ap->a_sync, ap->a_rtvals);
681 }
682 
683 int
684 vop_stdvptofh(struct vop_vptofh_args *ap)
685 {
686 	return (EOPNOTSUPP);
687 }
688 
689 int
690 vop_stdvptocnp(struct vop_vptocnp_args *ap)
691 {
692 	struct vnode *vp = ap->a_vp;
693 	struct vnode **dvp = ap->a_vpp;
694 	char *buf = ap->a_buf;
695 	int *buflen = ap->a_buflen;
696 	char *dirbuf, *cpos;
697 	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
698 	off_t off;
699 	ino_t fileno;
700 	struct vattr va;
701 	struct nameidata nd;
702 	struct thread *td;
703 	struct dirent *dp;
704 	struct vnode *mvp;
705 
706 	i = *buflen;
707 	error = 0;
708 	covered = 0;
709 	td = curthread;
710 
711 	if (vp->v_type != VDIR)
712 		return (ENOENT);
713 
714 	error = VOP_GETATTR(vp, &va, td->td_ucred);
715 	if (error)
716 		return (error);
717 
718 	VREF(vp);
719 	locked = VOP_ISLOCKED(vp);
720 	VOP_UNLOCK(vp, 0);
721 	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
722 	    "..", vp, td);
723 	flags = FREAD;
724 	error = vn_open(&nd, &flags, 0, NULL);
725 	if (error) {
726 		vn_lock(vp, locked | LK_RETRY);
727 		return (error);
728 	}
729 	NDFREE(&nd, NDF_ONLY_PNBUF);
730 
731 	mvp = *dvp = nd.ni_vp;
732 
733 	if (vp->v_mount != (*dvp)->v_mount &&
734 	    ((*dvp)->v_vflag & VV_ROOT) &&
735 	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
736 		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
737 		VREF(mvp);
738 		VOP_UNLOCK(mvp, 0);
739 		vn_close(mvp, FREAD, td->td_ucred, td);
740 		VREF(*dvp);
741 		vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
742 		covered = 1;
743 	}
744 
745 	fileno = va.va_fileid;
746 
747 	dirbuflen = DEV_BSIZE;
748 	if (dirbuflen < va.va_blocksize)
749 		dirbuflen = va.va_blocksize;
750 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
751 
752 	if ((*dvp)->v_type != VDIR) {
753 		error = ENOENT;
754 		goto out;
755 	}
756 
757 	off = 0;
758 	len = 0;
759 	do {
760 		/* call VOP_READDIR of parent */
761 		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
762 					&cpos, &len, &eofflag, td);
763 		if (error)
764 			goto out;
765 
766 		if ((dp->d_type != DT_WHT) &&
767 		    (dp->d_fileno == fileno)) {
768 			if (covered) {
769 				VOP_UNLOCK(*dvp, 0);
770 				vn_lock(mvp, LK_EXCLUSIVE | LK_RETRY);
771 				if (dirent_exists(mvp, dp->d_name, td)) {
772 					error = ENOENT;
773 					VOP_UNLOCK(mvp, 0);
774 					vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
775 					goto out;
776 				}
777 				VOP_UNLOCK(mvp, 0);
778 				vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
779 			}
780 			i -= dp->d_namlen;
781 
782 			if (i < 0) {
783 				error = ENOMEM;
784 				goto out;
785 			}
786 			bcopy(dp->d_name, buf + i, dp->d_namlen);
787 			error = 0;
788 			goto out;
789 		}
790 	} while (len > 0 || !eofflag);
791 	error = ENOENT;
792 
793 out:
794 	free(dirbuf, M_TEMP);
795 	if (!error) {
796 		*buflen = i;
797 		vhold(*dvp);
798 	}
799 	if (covered) {
800 		vput(*dvp);
801 		vrele(mvp);
802 	} else {
803 		VOP_UNLOCK(mvp, 0);
804 		vn_close(mvp, FREAD, td->td_ucred, td);
805 	}
806 	vn_lock(vp, locked | LK_RETRY);
807 	return (error);
808 }
809 
810 /*
811  * vfs default ops
812  * used to fill the vfs function table to get reasonable default return values.
813  */
814 int
815 vfs_stdroot (mp, flags, vpp)
816 	struct mount *mp;
817 	int flags;
818 	struct vnode **vpp;
819 {
820 
821 	return (EOPNOTSUPP);
822 }
823 
824 int
825 vfs_stdstatfs (mp, sbp)
826 	struct mount *mp;
827 	struct statfs *sbp;
828 {
829 
830 	return (EOPNOTSUPP);
831 }
832 
833 int
834 vfs_stdquotactl (mp, cmds, uid, arg)
835 	struct mount *mp;
836 	int cmds;
837 	uid_t uid;
838 	void *arg;
839 {
840 
841 	return (EOPNOTSUPP);
842 }
843 
844 int
845 vfs_stdsync(mp, waitfor)
846 	struct mount *mp;
847 	int waitfor;
848 {
849 	struct vnode *vp, *mvp;
850 	struct thread *td;
851 	int error, lockreq, allerror = 0;
852 
853 	td = curthread;
854 	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
855 	if (waitfor != MNT_WAIT)
856 		lockreq |= LK_NOWAIT;
857 	/*
858 	 * Force stale buffer cache information to be flushed.
859 	 */
860 	MNT_ILOCK(mp);
861 loop:
862 	MNT_VNODE_FOREACH(vp, mp, mvp) {
863 		/* bv_cnt is an acceptable race here. */
864 		if (vp->v_bufobj.bo_dirty.bv_cnt == 0)
865 			continue;
866 		VI_LOCK(vp);
867 		MNT_IUNLOCK(mp);
868 		if ((error = vget(vp, lockreq, td)) != 0) {
869 			MNT_ILOCK(mp);
870 			if (error == ENOENT) {
871 				MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
872 				goto loop;
873 			}
874 			continue;
875 		}
876 		error = VOP_FSYNC(vp, waitfor, td);
877 		if (error)
878 			allerror = error;
879 
880 		/* Do not turn this into vput.  td is not always curthread. */
881 		VOP_UNLOCK(vp, 0);
882 		vrele(vp);
883 		MNT_ILOCK(mp);
884 	}
885 	MNT_IUNLOCK(mp);
886 	return (allerror);
887 }
888 
889 int
890 vfs_stdnosync (mp, waitfor)
891 	struct mount *mp;
892 	int waitfor;
893 {
894 
895 	return (0);
896 }
897 
898 int
899 vfs_stdvget (mp, ino, flags, vpp)
900 	struct mount *mp;
901 	ino_t ino;
902 	int flags;
903 	struct vnode **vpp;
904 {
905 
906 	return (EOPNOTSUPP);
907 }
908 
909 int
910 vfs_stdfhtovp (mp, fhp, vpp)
911 	struct mount *mp;
912 	struct fid *fhp;
913 	struct vnode **vpp;
914 {
915 
916 	return (EOPNOTSUPP);
917 }
918 
919 int
920 vfs_stdinit (vfsp)
921 	struct vfsconf *vfsp;
922 {
923 
924 	return (0);
925 }
926 
927 int
928 vfs_stduninit (vfsp)
929 	struct vfsconf *vfsp;
930 {
931 
932 	return(0);
933 }
934 
935 int
936 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
937 	struct mount *mp;
938 	int cmd;
939 	struct vnode *filename_vp;
940 	int attrnamespace;
941 	const char *attrname;
942 {
943 
944 	if (filename_vp != NULL)
945 		VOP_UNLOCK(filename_vp, 0);
946 	return (EOPNOTSUPP);
947 }
948 
949 int
950 vfs_stdsysctl(mp, op, req)
951 	struct mount *mp;
952 	fsctlop_t op;
953 	struct sysctl_req *req;
954 {
955 
956 	return (EOPNOTSUPP);
957 }
958 
959 /* end of vfs default ops */
960