xref: /freebsd/sys/kern/vfs_default.c (revision 9517e866259191fcd39434a97ad849a9b59b9b9f)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/event.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/lockf.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/fcntl.h>
53 #include <sys/unistd.h>
54 #include <sys/vnode.h>
55 #include <sys/dirent.h>
56 #include <sys/poll.h>
57 
58 #include <security/mac/mac_framework.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_extern.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pager.h>
67 #include <vm/vnode_pager.h>
68 
69 static int	vop_nolookup(struct vop_lookup_args *);
70 static int	vop_nostrategy(struct vop_strategy_args *);
71 static int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
72 				char *dirbuf, int dirbuflen, off_t *off,
73 				char **cpos, int *len, int *eofflag,
74 				struct thread *td);
75 static int	dirent_exists(struct vnode *vp, const char *dirname,
76 			      struct thread *td);
77 
78 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
79 
80 /*
81  * This vnode table stores what we want to do if the filesystem doesn't
82  * implement a particular VOP.
83  *
84  * If there is no specific entry here, we will return EOPNOTSUPP.
85  *
86  */
87 
88 struct vop_vector default_vnodeops = {
89 	.vop_default =		NULL,
90 	.vop_bypass =		VOP_EOPNOTSUPP,
91 
92 	.vop_accessx =		vop_stdaccessx,
93 	.vop_advlock =		vop_stdadvlock,
94 	.vop_advlockasync =	vop_stdadvlockasync,
95 	.vop_bmap =		vop_stdbmap,
96 	.vop_close =		VOP_NULL,
97 	.vop_fsync =		VOP_NULL,
98 	.vop_getpages =		vop_stdgetpages,
99 	.vop_getwritemount = 	vop_stdgetwritemount,
100 	.vop_inactive =		VOP_NULL,
101 	.vop_ioctl =		VOP_ENOTTY,
102 	.vop_kqfilter =		vop_stdkqfilter,
103 	.vop_islocked =		vop_stdislocked,
104 	.vop_lock1 =		vop_stdlock,
105 	.vop_lookup =		vop_nolookup,
106 	.vop_open =		VOP_NULL,
107 	.vop_pathconf =		VOP_EINVAL,
108 	.vop_poll =		vop_nopoll,
109 	.vop_putpages =		vop_stdputpages,
110 	.vop_readlink =		VOP_EINVAL,
111 	.vop_revoke =		VOP_PANIC,
112 	.vop_strategy =		vop_nostrategy,
113 	.vop_unlock =		vop_stdunlock,
114 	.vop_vptocnp =		vop_stdvptocnp,
115 	.vop_vptofh =		vop_stdvptofh,
116 };
117 
118 /*
119  * Series of placeholder functions for various error returns for
120  * VOPs.
121  */
122 
123 int
124 vop_eopnotsupp(struct vop_generic_args *ap)
125 {
126 	/*
127 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
128 	*/
129 
130 	return (EOPNOTSUPP);
131 }
132 
133 int
134 vop_ebadf(struct vop_generic_args *ap)
135 {
136 
137 	return (EBADF);
138 }
139 
140 int
141 vop_enotty(struct vop_generic_args *ap)
142 {
143 
144 	return (ENOTTY);
145 }
146 
147 int
148 vop_einval(struct vop_generic_args *ap)
149 {
150 
151 	return (EINVAL);
152 }
153 
154 int
155 vop_enoent(struct vop_generic_args *ap)
156 {
157 
158 	return (ENOENT);
159 }
160 
161 int
162 vop_null(struct vop_generic_args *ap)
163 {
164 
165 	return (0);
166 }
167 
168 /*
169  * Helper function to panic on some bad VOPs in some filesystems.
170  */
171 int
172 vop_panic(struct vop_generic_args *ap)
173 {
174 
175 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
176 }
177 
178 /*
179  * vop_std<something> and vop_no<something> are default functions for use by
180  * filesystems that need the "default reasonable" implementation for a
181  * particular operation.
182  *
183  * The documentation for the operations they implement exists (if it exists)
184  * in the VOP_<SOMETHING>(9) manpage (all uppercase).
185  */
186 
187 /*
188  * Default vop for filesystems that do not support name lookup
189  */
190 static int
191 vop_nolookup(ap)
192 	struct vop_lookup_args /* {
193 		struct vnode *a_dvp;
194 		struct vnode **a_vpp;
195 		struct componentname *a_cnp;
196 	} */ *ap;
197 {
198 
199 	*ap->a_vpp = NULL;
200 	return (ENOTDIR);
201 }
202 
203 /*
204  *	vop_nostrategy:
205  *
206  *	Strategy routine for VFS devices that have none.
207  *
208  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
209  *	routine.  Typically this is done for a BIO_READ strategy call.
210  *	Typically B_INVAL is assumed to already be clear prior to a write
211  *	and should not be cleared manually unless you just made the buffer
212  *	invalid.  BIO_ERROR should be cleared either way.
213  */
214 
215 static int
216 vop_nostrategy (struct vop_strategy_args *ap)
217 {
218 	printf("No strategy for buffer at %p\n", ap->a_bp);
219 	vprint("vnode", ap->a_vp);
220 	ap->a_bp->b_ioflags |= BIO_ERROR;
221 	ap->a_bp->b_error = EOPNOTSUPP;
222 	bufdone(ap->a_bp);
223 	return (EOPNOTSUPP);
224 }
225 
226 static int
227 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
228 		int dirbuflen, off_t *off, char **cpos, int *len,
229 		int *eofflag, struct thread *td)
230 {
231 	int error, reclen;
232 	struct uio uio;
233 	struct iovec iov;
234 	struct dirent *dp;
235 
236 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
237 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
238 
239 	if (*len == 0) {
240 		iov.iov_base = dirbuf;
241 		iov.iov_len = dirbuflen;
242 
243 		uio.uio_iov = &iov;
244 		uio.uio_iovcnt = 1;
245 		uio.uio_offset = *off;
246 		uio.uio_resid = dirbuflen;
247 		uio.uio_segflg = UIO_SYSSPACE;
248 		uio.uio_rw = UIO_READ;
249 		uio.uio_td = td;
250 
251 		*eofflag = 0;
252 
253 #ifdef MAC
254 		error = mac_vnode_check_readdir(td->td_ucred, vp);
255 		if (error == 0)
256 #endif
257 			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
258 		    		NULL, NULL);
259 		if (error)
260 			return (error);
261 
262 		*off = uio.uio_offset;
263 
264 		*cpos = dirbuf;
265 		*len = (dirbuflen - uio.uio_resid);
266 	}
267 
268 	dp = (struct dirent *)(*cpos);
269 	reclen = dp->d_reclen;
270 	*dpp = dp;
271 
272 	/* check for malformed directory.. */
273 	if (reclen < DIRENT_MINSIZE)
274 		return (EINVAL);
275 
276 	*cpos += reclen;
277 	*len -= reclen;
278 
279 	return (0);
280 }
281 
282 /*
283  * Check if a named file exists in a given directory vnode.
284  */
285 static int
286 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
287 {
288 	char *dirbuf, *cpos;
289 	int error, eofflag, dirbuflen, len, found;
290 	off_t off;
291 	struct dirent *dp;
292 	struct vattr va;
293 
294 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
295 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
296 
297 	found = 0;
298 
299 	error = VOP_GETATTR(vp, &va, td->td_ucred);
300 	if (error)
301 		return (found);
302 
303 	dirbuflen = DEV_BSIZE;
304 	if (dirbuflen < va.va_blocksize)
305 		dirbuflen = va.va_blocksize;
306 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
307 
308 	off = 0;
309 	len = 0;
310 	do {
311 		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
312 					&cpos, &len, &eofflag, td);
313 		if (error)
314 			goto out;
315 
316 		if ((dp->d_type != DT_WHT) &&
317 		    !strcmp(dp->d_name, dirname)) {
318 			found = 1;
319 			goto out;
320 		}
321 	} while (len > 0 || !eofflag);
322 
323 out:
324 	free(dirbuf, M_TEMP);
325 	return (found);
326 }
327 
328 int
329 vop_stdaccessx(struct vop_accessx_args *ap)
330 {
331 	int error;
332 	accmode_t accmode = ap->a_accmode;
333 
334 	error = vfs_unixify_accmode(&accmode);
335 	if (error != 0)
336 		return (error);
337 
338 	if (accmode == 0)
339 		return (0);
340 
341 	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
342 }
343 
344 /*
345  * Advisory record locking support
346  */
347 int
348 vop_stdadvlock(struct vop_advlock_args *ap)
349 {
350 	struct vnode *vp;
351 	struct ucred *cred;
352 	struct vattr vattr;
353 	int error;
354 
355 	vp = ap->a_vp;
356 	cred = curthread->td_ucred;
357 	vn_lock(vp, LK_SHARED | LK_RETRY);
358 	error = VOP_GETATTR(vp, &vattr, cred);
359 	VOP_UNLOCK(vp, 0);
360 	if (error)
361 		return (error);
362 
363 	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
364 }
365 
366 int
367 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
368 {
369 	struct vnode *vp;
370 	struct ucred *cred;
371 	struct vattr vattr;
372 	int error;
373 
374 	vp = ap->a_vp;
375 	cred = curthread->td_ucred;
376 	vn_lock(vp, LK_SHARED | LK_RETRY);
377 	error = VOP_GETATTR(vp, &vattr, cred);
378 	VOP_UNLOCK(vp, 0);
379 	if (error)
380 		return (error);
381 
382 	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
383 }
384 
385 /*
386  * vop_stdpathconf:
387  *
388  * Standard implementation of POSIX pathconf, to get information about limits
389  * for a filesystem.
390  * Override per filesystem for the case where the filesystem has smaller
391  * limits.
392  */
393 int
394 vop_stdpathconf(ap)
395 	struct vop_pathconf_args /* {
396 	struct vnode *a_vp;
397 	int a_name;
398 	int *a_retval;
399 	} */ *ap;
400 {
401 
402 	switch (ap->a_name) {
403 		case _PC_NAME_MAX:
404 			*ap->a_retval = NAME_MAX;
405 			return (0);
406 		case _PC_PATH_MAX:
407 			*ap->a_retval = PATH_MAX;
408 			return (0);
409 		case _PC_LINK_MAX:
410 			*ap->a_retval = LINK_MAX;
411 			return (0);
412 		case _PC_MAX_CANON:
413 			*ap->a_retval = MAX_CANON;
414 			return (0);
415 		case _PC_MAX_INPUT:
416 			*ap->a_retval = MAX_INPUT;
417 			return (0);
418 		case _PC_PIPE_BUF:
419 			*ap->a_retval = PIPE_BUF;
420 			return (0);
421 		case _PC_CHOWN_RESTRICTED:
422 			*ap->a_retval = 1;
423 			return (0);
424 		case _PC_VDISABLE:
425 			*ap->a_retval = _POSIX_VDISABLE;
426 			return (0);
427 		default:
428 			return (EINVAL);
429 	}
430 	/* NOTREACHED */
431 }
432 
433 /*
434  * Standard lock, unlock and islocked functions.
435  */
436 int
437 vop_stdlock(ap)
438 	struct vop_lock1_args /* {
439 		struct vnode *a_vp;
440 		int a_flags;
441 		char *file;
442 		int line;
443 	} */ *ap;
444 {
445 	struct vnode *vp = ap->a_vp;
446 
447 	return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
448 	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
449 	    ap->a_line));
450 }
451 
452 /* See above. */
453 int
454 vop_stdunlock(ap)
455 	struct vop_unlock_args /* {
456 		struct vnode *a_vp;
457 		int a_flags;
458 	} */ *ap;
459 {
460 	struct vnode *vp = ap->a_vp;
461 
462 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
463 }
464 
465 /* See above. */
466 int
467 vop_stdislocked(ap)
468 	struct vop_islocked_args /* {
469 		struct vnode *a_vp;
470 	} */ *ap;
471 {
472 
473 	return (lockstatus(ap->a_vp->v_vnlock));
474 }
475 
476 /*
477  * Return true for select/poll.
478  */
479 int
480 vop_nopoll(ap)
481 	struct vop_poll_args /* {
482 		struct vnode *a_vp;
483 		int  a_events;
484 		struct ucred *a_cred;
485 		struct thread *a_td;
486 	} */ *ap;
487 {
488 
489 	return (poll_no_poll(ap->a_events));
490 }
491 
492 /*
493  * Implement poll for local filesystems that support it.
494  */
495 int
496 vop_stdpoll(ap)
497 	struct vop_poll_args /* {
498 		struct vnode *a_vp;
499 		int  a_events;
500 		struct ucred *a_cred;
501 		struct thread *a_td;
502 	} */ *ap;
503 {
504 	if (ap->a_events & ~POLLSTANDARD)
505 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
506 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
507 }
508 
509 /*
510  * Return our mount point, as we will take charge of the writes.
511  */
512 int
513 vop_stdgetwritemount(ap)
514 	struct vop_getwritemount_args /* {
515 		struct vnode *a_vp;
516 		struct mount **a_mpp;
517 	} */ *ap;
518 {
519 	struct mount *mp;
520 
521 	/*
522 	 * XXX Since this is called unlocked we may be recycled while
523 	 * attempting to ref the mount.  If this is the case or mountpoint
524 	 * will be set to NULL.  We only have to prevent this call from
525 	 * returning with a ref to an incorrect mountpoint.  It is not
526 	 * harmful to return with a ref to our previous mountpoint.
527 	 */
528 	mp = ap->a_vp->v_mount;
529 	if (mp != NULL) {
530 		vfs_ref(mp);
531 		if (mp != ap->a_vp->v_mount) {
532 			vfs_rel(mp);
533 			mp = NULL;
534 		}
535 	}
536 	*(ap->a_mpp) = mp;
537 	return (0);
538 }
539 
540 /* XXX Needs good comment and VOP_BMAP(9) manpage */
541 int
542 vop_stdbmap(ap)
543 	struct vop_bmap_args /* {
544 		struct vnode *a_vp;
545 		daddr_t  a_bn;
546 		struct bufobj **a_bop;
547 		daddr_t *a_bnp;
548 		int *a_runp;
549 		int *a_runb;
550 	} */ *ap;
551 {
552 
553 	if (ap->a_bop != NULL)
554 		*ap->a_bop = &ap->a_vp->v_bufobj;
555 	if (ap->a_bnp != NULL)
556 		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
557 	if (ap->a_runp != NULL)
558 		*ap->a_runp = 0;
559 	if (ap->a_runb != NULL)
560 		*ap->a_runb = 0;
561 	return (0);
562 }
563 
564 int
565 vop_stdfsync(ap)
566 	struct vop_fsync_args /* {
567 		struct vnode *a_vp;
568 		struct ucred *a_cred;
569 		int a_waitfor;
570 		struct thread *a_td;
571 	} */ *ap;
572 {
573 	struct vnode *vp = ap->a_vp;
574 	struct buf *bp;
575 	struct bufobj *bo;
576 	struct buf *nbp;
577 	int error = 0;
578 	int maxretry = 1000;     /* large, arbitrarily chosen */
579 
580 	bo = &vp->v_bufobj;
581 	BO_LOCK(bo);
582 loop1:
583 	/*
584 	 * MARK/SCAN initialization to avoid infinite loops.
585 	 */
586         TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
587                 bp->b_vflags &= ~BV_SCANNED;
588 		bp->b_error = 0;
589 	}
590 
591 	/*
592 	 * Flush all dirty buffers associated with a vnode.
593 	 */
594 loop2:
595 	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
596 		if ((bp->b_vflags & BV_SCANNED) != 0)
597 			continue;
598 		bp->b_vflags |= BV_SCANNED;
599 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
600 			continue;
601 		BO_UNLOCK(bo);
602 		KASSERT(bp->b_bufobj == bo,
603 		    ("bp %p wrong b_bufobj %p should be %p",
604 		    bp, bp->b_bufobj, bo));
605 		if ((bp->b_flags & B_DELWRI) == 0)
606 			panic("fsync: not dirty");
607 		if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
608 			vfs_bio_awrite(bp);
609 		} else {
610 			bremfree(bp);
611 			bawrite(bp);
612 		}
613 		BO_LOCK(bo);
614 		goto loop2;
615 	}
616 
617 	/*
618 	 * If synchronous the caller expects us to completely resolve all
619 	 * dirty buffers in the system.  Wait for in-progress I/O to
620 	 * complete (which could include background bitmap writes), then
621 	 * retry if dirty blocks still exist.
622 	 */
623 	if (ap->a_waitfor == MNT_WAIT) {
624 		bufobj_wwait(bo, 0, 0);
625 		if (bo->bo_dirty.bv_cnt > 0) {
626 			/*
627 			 * If we are unable to write any of these buffers
628 			 * then we fail now rather than trying endlessly
629 			 * to write them out.
630 			 */
631 			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
632 				if ((error = bp->b_error) == 0)
633 					continue;
634 			if (error == 0 && --maxretry >= 0)
635 				goto loop1;
636 			error = EAGAIN;
637 		}
638 	}
639 	BO_UNLOCK(bo);
640 	if (error == EAGAIN)
641 		vprint("fsync: giving up on dirty", vp);
642 
643 	return (error);
644 }
645 
646 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
647 int
648 vop_stdgetpages(ap)
649 	struct vop_getpages_args /* {
650 		struct vnode *a_vp;
651 		vm_page_t *a_m;
652 		int a_count;
653 		int a_reqpage;
654 		vm_ooffset_t a_offset;
655 	} */ *ap;
656 {
657 
658 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
659 	    ap->a_count, ap->a_reqpage);
660 }
661 
662 int
663 vop_stdkqfilter(struct vop_kqfilter_args *ap)
664 {
665 	return vfs_kqfilter(ap);
666 }
667 
668 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
669 int
670 vop_stdputpages(ap)
671 	struct vop_putpages_args /* {
672 		struct vnode *a_vp;
673 		vm_page_t *a_m;
674 		int a_count;
675 		int a_sync;
676 		int *a_rtvals;
677 		vm_ooffset_t a_offset;
678 	} */ *ap;
679 {
680 
681 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
682 	     ap->a_sync, ap->a_rtvals);
683 }
684 
685 int
686 vop_stdvptofh(struct vop_vptofh_args *ap)
687 {
688 	return (EOPNOTSUPP);
689 }
690 
691 int
692 vop_stdvptocnp(struct vop_vptocnp_args *ap)
693 {
694 	struct vnode *vp = ap->a_vp;
695 	struct vnode **dvp = ap->a_vpp;
696 	struct ucred *cred = ap->a_cred;
697 	char *buf = ap->a_buf;
698 	int *buflen = ap->a_buflen;
699 	char *dirbuf, *cpos;
700 	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
701 	off_t off;
702 	ino_t fileno;
703 	struct vattr va;
704 	struct nameidata nd;
705 	struct thread *td;
706 	struct dirent *dp;
707 	struct vnode *mvp;
708 
709 	i = *buflen;
710 	error = 0;
711 	covered = 0;
712 	td = curthread;
713 
714 	if (vp->v_type != VDIR)
715 		return (ENOENT);
716 
717 	error = VOP_GETATTR(vp, &va, cred);
718 	if (error)
719 		return (error);
720 
721 	VREF(vp);
722 	locked = VOP_ISLOCKED(vp);
723 	VOP_UNLOCK(vp, 0);
724 	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
725 	    "..", vp, td);
726 	flags = FREAD;
727 	error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
728 	if (error) {
729 		vn_lock(vp, locked | LK_RETRY);
730 		return (error);
731 	}
732 	NDFREE(&nd, NDF_ONLY_PNBUF);
733 
734 	mvp = *dvp = nd.ni_vp;
735 
736 	if (vp->v_mount != (*dvp)->v_mount &&
737 	    ((*dvp)->v_vflag & VV_ROOT) &&
738 	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
739 		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
740 		VREF(mvp);
741 		VOP_UNLOCK(mvp, 0);
742 		vn_close(mvp, FREAD, cred, td);
743 		VREF(*dvp);
744 		vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
745 		covered = 1;
746 	}
747 
748 	fileno = va.va_fileid;
749 
750 	dirbuflen = DEV_BSIZE;
751 	if (dirbuflen < va.va_blocksize)
752 		dirbuflen = va.va_blocksize;
753 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
754 
755 	if ((*dvp)->v_type != VDIR) {
756 		error = ENOENT;
757 		goto out;
758 	}
759 
760 	off = 0;
761 	len = 0;
762 	do {
763 		/* call VOP_READDIR of parent */
764 		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
765 					&cpos, &len, &eofflag, td);
766 		if (error)
767 			goto out;
768 
769 		if ((dp->d_type != DT_WHT) &&
770 		    (dp->d_fileno == fileno)) {
771 			if (covered) {
772 				VOP_UNLOCK(*dvp, 0);
773 				vn_lock(mvp, LK_EXCLUSIVE | LK_RETRY);
774 				if (dirent_exists(mvp, dp->d_name, td)) {
775 					error = ENOENT;
776 					VOP_UNLOCK(mvp, 0);
777 					vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
778 					goto out;
779 				}
780 				VOP_UNLOCK(mvp, 0);
781 				vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
782 			}
783 			i -= dp->d_namlen;
784 
785 			if (i < 0) {
786 				error = ENOMEM;
787 				goto out;
788 			}
789 			bcopy(dp->d_name, buf + i, dp->d_namlen);
790 			error = 0;
791 			goto out;
792 		}
793 	} while (len > 0 || !eofflag);
794 	error = ENOENT;
795 
796 out:
797 	free(dirbuf, M_TEMP);
798 	if (!error) {
799 		*buflen = i;
800 		vhold(*dvp);
801 	}
802 	if (covered) {
803 		vput(*dvp);
804 		vrele(mvp);
805 	} else {
806 		VOP_UNLOCK(mvp, 0);
807 		vn_close(mvp, FREAD, cred, td);
808 	}
809 	vn_lock(vp, locked | LK_RETRY);
810 	return (error);
811 }
812 
813 /*
814  * vfs default ops
815  * used to fill the vfs function table to get reasonable default return values.
816  */
817 int
818 vfs_stdroot (mp, flags, vpp)
819 	struct mount *mp;
820 	int flags;
821 	struct vnode **vpp;
822 {
823 
824 	return (EOPNOTSUPP);
825 }
826 
827 int
828 vfs_stdstatfs (mp, sbp)
829 	struct mount *mp;
830 	struct statfs *sbp;
831 {
832 
833 	return (EOPNOTSUPP);
834 }
835 
836 int
837 vfs_stdquotactl (mp, cmds, uid, arg)
838 	struct mount *mp;
839 	int cmds;
840 	uid_t uid;
841 	void *arg;
842 {
843 
844 	return (EOPNOTSUPP);
845 }
846 
847 int
848 vfs_stdsync(mp, waitfor)
849 	struct mount *mp;
850 	int waitfor;
851 {
852 	struct vnode *vp, *mvp;
853 	struct thread *td;
854 	int error, lockreq, allerror = 0;
855 
856 	td = curthread;
857 	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
858 	if (waitfor != MNT_WAIT)
859 		lockreq |= LK_NOWAIT;
860 	/*
861 	 * Force stale buffer cache information to be flushed.
862 	 */
863 	MNT_ILOCK(mp);
864 loop:
865 	MNT_VNODE_FOREACH(vp, mp, mvp) {
866 		/* bv_cnt is an acceptable race here. */
867 		if (vp->v_bufobj.bo_dirty.bv_cnt == 0)
868 			continue;
869 		VI_LOCK(vp);
870 		MNT_IUNLOCK(mp);
871 		if ((error = vget(vp, lockreq, td)) != 0) {
872 			MNT_ILOCK(mp);
873 			if (error == ENOENT) {
874 				MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
875 				goto loop;
876 			}
877 			continue;
878 		}
879 		error = VOP_FSYNC(vp, waitfor, td);
880 		if (error)
881 			allerror = error;
882 
883 		/* Do not turn this into vput.  td is not always curthread. */
884 		VOP_UNLOCK(vp, 0);
885 		vrele(vp);
886 		MNT_ILOCK(mp);
887 	}
888 	MNT_IUNLOCK(mp);
889 	return (allerror);
890 }
891 
892 int
893 vfs_stdnosync (mp, waitfor)
894 	struct mount *mp;
895 	int waitfor;
896 {
897 
898 	return (0);
899 }
900 
901 int
902 vfs_stdvget (mp, ino, flags, vpp)
903 	struct mount *mp;
904 	ino_t ino;
905 	int flags;
906 	struct vnode **vpp;
907 {
908 
909 	return (EOPNOTSUPP);
910 }
911 
912 int
913 vfs_stdfhtovp (mp, fhp, vpp)
914 	struct mount *mp;
915 	struct fid *fhp;
916 	struct vnode **vpp;
917 {
918 
919 	return (EOPNOTSUPP);
920 }
921 
922 int
923 vfs_stdinit (vfsp)
924 	struct vfsconf *vfsp;
925 {
926 
927 	return (0);
928 }
929 
930 int
931 vfs_stduninit (vfsp)
932 	struct vfsconf *vfsp;
933 {
934 
935 	return(0);
936 }
937 
938 int
939 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
940 	struct mount *mp;
941 	int cmd;
942 	struct vnode *filename_vp;
943 	int attrnamespace;
944 	const char *attrname;
945 {
946 
947 	if (filename_vp != NULL)
948 		VOP_UNLOCK(filename_vp, 0);
949 	return (EOPNOTSUPP);
950 }
951 
952 int
953 vfs_stdsysctl(mp, op, req)
954 	struct mount *mp;
955 	fsctlop_t op;
956 	struct sysctl_req *req;
957 {
958 
959 	return (EOPNOTSUPP);
960 }
961 
962 /* end of vfs default ops */
963