xref: /freebsd/sys/fs/unionfs/union_vnops.c (revision a14a0223ae1b172e96dd2a1d849e22026a98b692)
1 /*
2  * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3  * Copyright (c) 1992, 1993, 1994, 1995
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)union_vnops.c	8.32 (Berkeley) 6/23/95
38  * $FreeBSD$
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/fcntl.h>
45 #include <sys/stat.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/malloc.h>
51 #include <sys/buf.h>
52 #include <sys/lock.h>
53 #include <sys/sysctl.h>
54 #include <miscfs/union/union.h>
55 
56 #include <vm/vm.h>
57 #include <vm/vnode_pager.h>
58 
59 #include <vm/vm_page.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vm_extern.h>
63 
64 int uniondebug = 0;
65 
66 #if UDEBUG_ENABLED
67 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
68 #else
69 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
70 #endif
71 
72 static int	union_abortop __P((struct vop_abortop_args *ap));
73 static int	union_access __P((struct vop_access_args *ap));
74 static int	union_advlock __P((struct vop_advlock_args *ap));
75 static int	union_bmap __P((struct vop_bmap_args *ap));
76 static int	union_close __P((struct vop_close_args *ap));
77 static int	union_create __P((struct vop_create_args *ap));
78 static int	union_fsync __P((struct vop_fsync_args *ap));
79 static int	union_getattr __P((struct vop_getattr_args *ap));
80 static int	union_inactive __P((struct vop_inactive_args *ap));
81 static int	union_ioctl __P((struct vop_ioctl_args *ap));
82 static int	union_lease __P((struct vop_lease_args *ap));
83 static int	union_link __P((struct vop_link_args *ap));
84 static int	union_lock __P((struct vop_lock_args *ap));
85 static int	union_lookup __P((struct vop_lookup_args *ap));
86 static int	union_lookup1 __P((struct vnode *udvp, struct vnode **dvp,
87 				   struct vnode **vpp,
88 				   struct componentname *cnp));
89 static int	union_mkdir __P((struct vop_mkdir_args *ap));
90 static int	union_mknod __P((struct vop_mknod_args *ap));
91 static int	union_mmap __P((struct vop_mmap_args *ap));
92 static int	union_open __P((struct vop_open_args *ap));
93 static int	union_pathconf __P((struct vop_pathconf_args *ap));
94 static int	union_print __P((struct vop_print_args *ap));
95 static int	union_read __P((struct vop_read_args *ap));
96 static int	union_readdir __P((struct vop_readdir_args *ap));
97 static int	union_readlink __P((struct vop_readlink_args *ap));
98 static int	union_reclaim __P((struct vop_reclaim_args *ap));
99 static int	union_remove __P((struct vop_remove_args *ap));
100 static int	union_rename __P((struct vop_rename_args *ap));
101 static int	union_revoke __P((struct vop_revoke_args *ap));
102 static int	union_rmdir __P((struct vop_rmdir_args *ap));
103 static int	union_poll __P((struct vop_poll_args *ap));
104 static int	union_setattr __P((struct vop_setattr_args *ap));
105 static int	union_strategy __P((struct vop_strategy_args *ap));
106 static int	union_getpages __P((struct vop_getpages_args *ap));
107 static int	union_putpages __P((struct vop_putpages_args *ap));
108 static int	union_symlink __P((struct vop_symlink_args *ap));
109 static int	union_unlock __P((struct vop_unlock_args *ap));
110 static int	union_whiteout __P((struct vop_whiteout_args *ap));
111 static int	union_write __P((struct vop_read_args *ap));
112 
113 static __inline
114 struct vnode *
115 union_lock_upper(struct union_node *un, struct proc *p)
116 {
117 	struct vnode *uppervp;
118 
119 	if ((uppervp = un->un_uppervp) != NULL) {
120 		VREF(uppervp);
121 		vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
122 	}
123 	KASSERT((uppervp == NULL || uppervp->v_usecount > 0), ("uppervp usecount is 0"));
124 	return(uppervp);
125 }
126 
127 static __inline
128 void
129 union_unlock_upper(struct vnode *uppervp, struct proc *p)
130 {
131 	vput(uppervp);
132 }
133 
134 static __inline
135 struct vnode *
136 union_lock_other(struct union_node *un, struct proc *p)
137 {
138 	struct vnode *vp;
139 
140 	if (un->un_uppervp != NULL) {
141 		vp = union_lock_upper(un, p);
142 	} else if ((vp = un->un_lowervp) != NULL) {
143 		VREF(vp);
144 		vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
145 	}
146 	return(vp);
147 }
148 
149 static __inline
150 void
151 union_unlock_other(struct vnode *vp, struct proc *p)
152 {
153 	vput(vp);
154 }
155 
156 /*
157  *	union_lookup:
158  *
159  *	udvp	must be exclusively locked on call and will remain
160  *		exclusively locked on return.  This is the mount point
161  *		for out filesystem.
162  *
163  *	dvp	Our base directory, locked and referenced.
164  *		The passed dvp will be dereferenced and unlocked on return
165  *		and a new dvp will be returned which is locked and
166  *		referenced in the same variable.
167  *
168  *	vpp	is filled in with the result if no error occured,
169  *		locked and ref'd.
170  *
171  *		If an error is returned, *vpp is set to NULLVP.  If no
172  *		error occurs, *vpp is returned with a reference and an
173  *		exclusive lock.
174  */
175 
176 static int
177 union_lookup1(udvp, pdvp, vpp, cnp)
178 	struct vnode *udvp;
179 	struct vnode **pdvp;
180 	struct vnode **vpp;
181 	struct componentname *cnp;
182 {
183 	int error;
184 	struct proc *p = cnp->cn_proc;
185 	struct vnode *dvp = *pdvp;
186 	struct vnode *tdvp;
187 	struct mount *mp;
188 
189 	/*
190 	 * If stepping up the directory tree, check for going
191 	 * back across the mount point, in which case do what
192 	 * lookup would do by stepping back down the mount
193 	 * hierarchy.
194 	 */
195 	if (cnp->cn_flags & ISDOTDOT) {
196 		while ((dvp != udvp) && (dvp->v_flag & VROOT)) {
197 			/*
198 			 * Don't do the NOCROSSMOUNT check
199 			 * at this level.  By definition,
200 			 * union fs deals with namespaces, not
201 			 * filesystems.
202 			 */
203 			tdvp = dvp;
204 			dvp = dvp->v_mount->mnt_vnodecovered;
205 			VREF(dvp);
206 			vput(tdvp);
207 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
208 		}
209 	}
210 
211 	/*
212 	 * Set return dvp to be the upperdvp 'parent directory.
213 	 */
214 	*pdvp = dvp;
215 
216 	/*
217 	 * If the VOP_LOOKUP call generates an error, tdvp is invalid and no
218 	 * changes will have been made to dvp, so we are set to return.
219 	 */
220 
221         error = VOP_LOOKUP(dvp, &tdvp, cnp);
222 	if (error) {
223 		UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
224 		*vpp = NULL;
225 		return (error);
226 	}
227 
228 	/*
229 	 * The parent directory will have been unlocked, unless lookup
230 	 * found the last component or if dvp == tdvp (tdvp must be locked).
231 	 *
232 	 * We want our dvp to remain locked and ref'd.  We also want tdvp
233 	 * to remain locked and ref'd.
234 	 */
235 	UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
236 
237 	if (dvp != tdvp && (cnp->cn_flags & ISLASTCN) == 0)
238 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
239 
240 	/*
241 	 * Lastly check if the current node is a mount point in
242 	 * which case walk up the mount hierarchy making sure not to
243 	 * bump into the root of the mount tree (ie. dvp != udvp).
244 	 *
245 	 * We use dvp as a temporary variable here, it is no longer related
246 	 * to the dvp above.  However, we have to ensure that both *pdvp and
247 	 * tdvp are locked on return.
248 	 */
249 
250 	dvp = tdvp;
251 	while (
252 	    dvp != udvp &&
253 	    (dvp->v_type == VDIR) &&
254 	    (mp = dvp->v_mountedhere)
255 	) {
256 		int relock_pdvp = 0;
257 
258 		if (vfs_busy(mp, 0, 0, p))
259 			continue;
260 
261 		if (dvp == *pdvp)
262 			relock_pdvp = 1;
263 		vput(dvp);
264 		dvp = NULL;
265 		error = VFS_ROOT(mp, &dvp);
266 
267 		vfs_unbusy(mp, p);
268 
269 		if (relock_pdvp)
270 			vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY, p);
271 
272 		if (error) {
273 			*vpp = NULL;
274 			return (error);
275 		}
276 	}
277 	*vpp = dvp;
278 	return (0);
279 }
280 
281 static int
282 union_lookup(ap)
283 	struct vop_lookup_args /* {
284 		struct vnodeop_desc *a_desc;
285 		struct vnode *a_dvp;
286 		struct vnode **a_vpp;
287 		struct componentname *a_cnp;
288 	} */ *ap;
289 {
290 	int error;
291 	int uerror, lerror;
292 	struct vnode *uppervp, *lowervp;
293 	struct vnode *upperdvp, *lowerdvp;
294 	struct vnode *dvp = ap->a_dvp;		/* starting dir */
295 	struct union_node *dun = VTOUNION(dvp);	/* associated union node */
296 	struct componentname *cnp = ap->a_cnp;
297 	struct proc *p = cnp->cn_proc;
298 	int lockparent = cnp->cn_flags & LOCKPARENT;
299 	struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
300 	struct ucred *saved_cred = NULL;
301 	int iswhiteout;
302 	struct vattr va;
303 
304 	*ap->a_vpp = NULLVP;
305 
306 	/*
307 	 * Disallow write attemps to the filesystem mounted read-only.
308 	 */
309 	if ((cnp->cn_flags & ISLASTCN) &&
310 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
311 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
312 		return (EROFS);
313 	}
314 
315 	/*
316 	 * For any lookup's we do, always return with the parent locked
317 	 */
318 	cnp->cn_flags |= LOCKPARENT;
319 
320 	lowerdvp = dun->un_lowervp;
321 	uppervp = NULLVP;
322 	lowervp = NULLVP;
323 	iswhiteout = 0;
324 
325 	uerror = ENOENT;
326 	lerror = ENOENT;
327 
328 	/*
329 	 * Get a private lock on uppervp and a reference, effectively
330 	 * taking it out of the union_node's control.
331 	 *
332 	 * We must lock upperdvp while holding our lock on dvp
333 	 * to avoid a deadlock.
334 	 */
335 	upperdvp = union_lock_upper(dun, p);
336 
337 	/*
338 	 * do the lookup in the upper level.
339 	 * if that level comsumes additional pathnames,
340 	 * then assume that something special is going
341 	 * on and just return that vnode.
342 	 */
343 	if (upperdvp != NULLVP) {
344 		/*
345 		 * We do not have to worry about the DOTDOT case, we've
346 		 * already unlocked dvp.
347 		 */
348 		UDEBUG(("A %p\n", upperdvp));
349 
350 		/*
351 		 * Do the lookup.   We must supply a locked and referenced
352 		 * upperdvp to the function and will get a new locked and
353 		 * referenced upperdvp back with the old having been
354 		 * dereferenced.
355 		 *
356 		 * If an error is returned, uppervp will be NULLVP.  If no
357 		 * error occurs, uppervp will be the locked and referenced
358 		 * return vnode or possibly NULL, depending on what is being
359 		 * requested.  It is possible that the returned uppervp
360 		 * will be the same as upperdvp.
361 		 */
362 		uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
363 		UDEBUG((
364 		    "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
365 		    uerror,
366 		    upperdvp,
367 		    upperdvp->v_usecount,
368 		    VOP_ISLOCKED(upperdvp),
369 		    uppervp,
370 		    (uppervp ? uppervp->v_usecount : -99),
371 		    (uppervp ? VOP_ISLOCKED(uppervp) : -99)
372 		));
373 
374 		/*
375 		 * Disallow write attemps to the filesystem mounted read-only.
376 		 */
377 		if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) &&
378 		    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
379 		    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) {
380 			error = EROFS;
381 			goto out;
382 		}
383 
384 		/*
385 		 * Special case.  If cn_consume != 0 skip out.  The result
386 		 * of the lookup is transfered to our return variable.  If
387 		 * an error occured we have to throw away the results.
388 		 */
389 
390 		if (cnp->cn_consume != 0) {
391 			if ((error = uerror) == 0) {
392 				*ap->a_vpp = uppervp;
393 				uppervp = NULL;
394 			}
395 			goto out;
396 		}
397 
398 		/*
399 		 * Calculate whiteout, fall through
400 		 */
401 
402 		if (uerror == ENOENT || uerror == EJUSTRETURN) {
403 			if (cnp->cn_flags & ISWHITEOUT) {
404 				iswhiteout = 1;
405 			} else if (lowerdvp != NULLVP) {
406 				int terror;
407 
408 				terror = VOP_GETATTR(upperdvp, &va,
409 					cnp->cn_cred, cnp->cn_proc);
410 				if (terror == 0 && (va.va_flags & OPAQUE))
411 					iswhiteout = 1;
412 			}
413 		}
414 	}
415 
416 	/*
417 	 * in a similar way to the upper layer, do the lookup
418 	 * in the lower layer.   this time, if there is some
419 	 * component magic going on, then vput whatever we got
420 	 * back from the upper layer and return the lower vnode
421 	 * instead.
422 	 */
423 
424 	if (lowerdvp != NULLVP && !iswhiteout) {
425 		int nameiop;
426 
427 		UDEBUG(("B %p\n", lowerdvp));
428 
429 		/*
430 		 * Force only LOOKUPs on the lower node, since
431 		 * we won't be making changes to it anyway.
432 		 */
433 		nameiop = cnp->cn_nameiop;
434 		cnp->cn_nameiop = LOOKUP;
435 		if (um->um_op == UNMNT_BELOW) {
436 			saved_cred = cnp->cn_cred;
437 			cnp->cn_cred = um->um_cred;
438 		}
439 
440 		/*
441 		 * We shouldn't have to worry about locking interactions
442 		 * between the lower layer and our union layer (w.r.t.
443 		 * `..' processing) because we don't futz with lowervp
444 		 * locks in the union-node instantiation code path.
445 		 *
446 		 * union_lookup1() requires lowervp to be locked on entry,
447 		 * and it will be unlocked on return.  The ref count will
448 		 * not change.  On return lowervp doesn't represent anything
449 		 * to us so we NULL it out.
450 		 */
451 		VREF(lowerdvp);
452 		vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, p);
453 		lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
454 		if (lowerdvp == lowervp)
455 			vrele(lowerdvp);
456 		else
457 			vput(lowerdvp);
458 		lowerdvp = NULL;	/* lowerdvp invalid after vput */
459 
460 		if (um->um_op == UNMNT_BELOW)
461 			cnp->cn_cred = saved_cred;
462 		cnp->cn_nameiop = nameiop;
463 
464 		if (cnp->cn_consume != 0 || lerror == EACCES) {
465 			if ((error = lerror) == 0) {
466 				*ap->a_vpp = lowervp;
467 				lowervp = NULL;
468 			}
469 			goto out;
470 		}
471 	} else {
472 		UDEBUG(("C %p\n", lowerdvp));
473 		if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
474 			if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
475 				VREF(lowervp);
476 				vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, p);
477 				lerror = 0;
478 			}
479 		}
480 	}
481 
482 	/*
483 	 * Ok.  Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
484 	 *
485 	 * 1. If both layers returned an error, select the upper layer.
486 	 *
487 	 * 2. If the upper layer faile and the bottom layer succeeded,
488 	 *    two subcases occur:
489 	 *
490 	 *	a.  The bottom vnode is not a directory, in which case
491 	 *	    just return a new union vnode referencing an
492 	 *	    empty top layer and the existing bottom layer.
493 	 *
494 	 *	b.  The button vnode is a directory, in which case
495 	 *	    create a new directory in the top layer and
496 	 *	    and fall through to case 3.
497 	 *
498 	 * 3. If the top layer succeeded then return a new union
499 	 *    vnode referencing whatever the new top layer and
500 	 *    whatever the bottom layer returned.
501 	 */
502 
503 	/* case 1. */
504 	if ((uerror != 0) && (lerror != 0)) {
505 		error = uerror;
506 		goto out;
507 	}
508 
509 	/* case 2. */
510 	if (uerror != 0 /* && (lerror == 0) */ ) {
511 		if (lowervp->v_type == VDIR) { /* case 2b. */
512 			KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
513 			/*
514 			 * oops, uppervp has a problem, we may have to shadow.
515 			 */
516 			uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
517 			if (uerror) {
518 				error = uerror;
519 				goto out;
520 			}
521 		}
522 	}
523 
524 	/*
525 	 * Must call union_allocvp with both the upper and lower vnodes
526 	 * referenced and the upper vnode locked.   ap->a_vpp is returned
527 	 * referenced and locked.  lowervp, uppervp, and upperdvp are
528 	 * absorbed by union_allocvp() whether it succeeds or fails.
529 	 *
530 	 * upperdvp is the parent directory of uppervp which may be
531 	 * different, depending on the path, from dvp->un_uppervp.  That's
532 	 * why it is a separate argument.  Note that it must be unlocked.
533 	 *
534 	 * dvp must be locked on entry to the call and will be locked on
535 	 * return.
536 	 */
537 
538 	if (uppervp && uppervp != upperdvp)
539 		VOP_UNLOCK(uppervp, 0, p);
540 	if (lowervp)
541 		VOP_UNLOCK(lowervp, 0, p);
542 	if (upperdvp)
543 		VOP_UNLOCK(upperdvp, 0, p);
544 
545 	error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
546 			      uppervp, lowervp, 1);
547 
548 	UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? ((*ap->a_vpp)->v_usecount) : -99));
549 
550 	uppervp = NULL;
551 	upperdvp = NULL;
552 	lowervp = NULL;
553 
554 	/*
555 	 *	Termination Code
556 	 *
557 	 *	- put away any extra junk laying around.  Note that lowervp
558 	 *	  (if not NULL) will never be the same as *ap->a_vp and
559 	 *	  neither will uppervp, because when we set that state we
560 	 *	  NULL-out lowervp or uppervp.  On the otherhand, upperdvp
561 	 *	  may match uppervp or *ap->a_vpp.
562 	 *
563 	 *	- relock/unlock dvp if appropriate.
564 	 */
565 
566 out:
567 	if (upperdvp) {
568 		if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
569 			vrele(upperdvp);
570 		else
571 			vput(upperdvp);
572 	}
573 
574 	if (uppervp)
575 		vput(uppervp);
576 
577 	if (lowervp)
578 		vput(lowervp);
579 
580 	/*
581 	 * Restore LOCKPARENT state
582 	 */
583 
584 	if (!lockparent)
585 		cnp->cn_flags &= ~LOCKPARENT;
586 
587 	UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
588 		((*ap->a_vpp) ? (*ap->a_vpp)->v_usecount : -99),
589 		lowervp, uppervp));
590 
591 	/*
592 	 * dvp lock state, determine whether to relock dvp.  dvp is expected
593 	 * to be locked on return if:
594 	 *
595 	 *	- there was an error (except not EJUSTRETURN), or
596 	 *	- we hit the last component and lockparent is true
597 	 *
598 	 * dvp_is_locked is the current state of the dvp lock, not counting
599 	 * the possibility that *ap->a_vpp == dvp (in which case it is locked
600 	 * anyway).  Note that *ap->a_vpp == dvp only if no error occured.
601 	 */
602 
603 	if (*ap->a_vpp != dvp) {
604 		if ((error == 0 || error == EJUSTRETURN) &&
605 		    (!lockparent || (cnp->cn_flags & ISLASTCN) == 0)) {
606 			VOP_UNLOCK(dvp, 0, p);
607 		}
608 	}
609 
610 	/*
611 	 * Diagnostics
612 	 */
613 
614 #ifdef DIAGNOSTIC
615 	if (cnp->cn_namelen == 1 &&
616 	    cnp->cn_nameptr[0] == '.' &&
617 	    *ap->a_vpp != dvp) {
618 		panic("union_lookup returning . (%p) not same as startdir (%p)", ap->a_vpp, dvp);
619 	}
620 #endif
621 
622 	return (error);
623 }
624 
625 /*
626  * 	union_create:
627  *
628  * a_dvp is locked on entry and remains locked on return.  a_vpp is returned
629  * locked if no error occurs, otherwise it is garbage.
630  */
631 
632 static int
633 union_create(ap)
634 	struct vop_create_args /* {
635 		struct vnode *a_dvp;
636 		struct vnode **a_vpp;
637 		struct componentname *a_cnp;
638 		struct vattr *a_vap;
639 	} */ *ap;
640 {
641 	struct union_node *dun = VTOUNION(ap->a_dvp);
642 	struct componentname *cnp = ap->a_cnp;
643 	struct proc *p = cnp->cn_proc;
644 	struct vnode *dvp;
645 	int error = EROFS;
646 
647 	if ((dvp = union_lock_upper(dun, p)) != NULL) {
648 		struct vnode *vp;
649 		struct mount *mp;
650 
651 		error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
652 		if (error == 0) {
653 			mp = ap->a_dvp->v_mount;
654 			VOP_UNLOCK(vp, 0, p);
655 			UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vp->v_usecount));
656 			error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
657 				cnp, vp, NULLVP, 1);
658 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
659 		}
660 		union_unlock_upper(dvp, p);
661 	}
662 	return (error);
663 }
664 
665 static int
666 union_whiteout(ap)
667 	struct vop_whiteout_args /* {
668 		struct vnode *a_dvp;
669 		struct componentname *a_cnp;
670 		int a_flags;
671 	} */ *ap;
672 {
673 	struct union_node *un = VTOUNION(ap->a_dvp);
674 	struct componentname *cnp = ap->a_cnp;
675 	struct vnode *uppervp;
676 	int error = EOPNOTSUPP;
677 
678 	if ((uppervp = union_lock_upper(un, cnp->cn_proc)) != NULLVP) {
679 		error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
680 		union_unlock_upper(uppervp, cnp->cn_proc);
681 	}
682 	return(error);
683 }
684 
685 /*
686  * 	union_mknod:
687  *
688  *	a_dvp is locked on entry and should remain locked on return.
689  *	a_vpp is garbagre whether an error occurs or not.
690  */
691 
692 static int
693 union_mknod(ap)
694 	struct vop_mknod_args /* {
695 		struct vnode *a_dvp;
696 		struct vnode **a_vpp;
697 		struct componentname *a_cnp;
698 		struct vattr *a_vap;
699 	} */ *ap;
700 {
701 	struct union_node *dun = VTOUNION(ap->a_dvp);
702 	struct componentname *cnp = ap->a_cnp;
703 	struct vnode *dvp;
704 	int error = EROFS;
705 
706 	if ((dvp = union_lock_upper(dun, cnp->cn_proc)) != NULL) {
707 		struct vnode *vp;
708 		error = VOP_MKNOD(dvp, &vp, cnp, ap->a_vap);
709 		/* vp is garbage whether an error occurs or not */
710 		union_unlock_upper(dvp, cnp->cn_proc);
711 	}
712 	return (error);
713 }
714 
715 /*
716  *	union_open:
717  *
718  *	run open VOP.  When opening the underlying vnode we have to mimic
719  *	vn_open.  What we *really* need to do to avoid screwups if the
720  *	open semantics change is to call vn_open().  For example, ufs blows
721  *	up if you open a file but do not vmio it prior to writing.
722  */
723 
724 static int
725 union_open(ap)
726 	struct vop_open_args /* {
727 		struct vnodeop_desc *a_desc;
728 		struct vnode *a_vp;
729 		int a_mode;
730 		struct ucred *a_cred;
731 		struct proc *a_p;
732 	} */ *ap;
733 {
734 	struct union_node *un = VTOUNION(ap->a_vp);
735 	struct vnode *tvp;
736 	int mode = ap->a_mode;
737 	struct ucred *cred = ap->a_cred;
738 	struct proc *p = ap->a_p;
739 	int error = 0;
740 	int tvpisupper = 1;
741 
742 	/*
743 	 * If there is an existing upper vp then simply open that.
744 	 * The upper vp takes precedence over the lower vp.  When opening
745 	 * a lower vp for writing copy it to the uppervp and then open the
746 	 * uppervp.
747 	 *
748 	 * At the end of this section tvp will be left locked.
749 	 */
750 	if ((tvp = union_lock_upper(un, p)) == NULLVP) {
751 		/*
752 		 * If the lower vnode is being opened for writing, then
753 		 * copy the file contents to the upper vnode and open that,
754 		 * otherwise can simply open the lower vnode.
755 		 */
756 		tvp = un->un_lowervp;
757 		if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
758 			int docopy = !(mode & O_TRUNC);
759 			error = union_copyup(un, docopy, cred, p);
760 			tvp = union_lock_upper(un, p);
761 		} else {
762 			un->un_openl++;
763 			VREF(tvp);
764 			vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p);
765 			tvpisupper = 0;
766 		}
767 	}
768 
769 	/*
770 	 * We are holding the correct vnode, open it
771 	 */
772 
773 	if (error == 0)
774 		error = VOP_OPEN(tvp, mode, cred, p);
775 
776 	/*
777 	 * Absolutely necessary or UFS will blowup
778 	 */
779         if (error == 0 && vn_canvmio(tvp) == TRUE) {
780                 error = vfs_object_create(tvp, p, cred);
781         }
782 
783 	/*
784 	 * Release any locks held
785 	 */
786 	if (tvpisupper) {
787 		if (tvp)
788 			union_unlock_upper(tvp, p);
789 	} else {
790 		vput(tvp);
791 	}
792 	return (error);
793 }
794 
795 /*
796  *	union_close:
797  *
798  *	It is unclear whether a_vp is passed locked or unlocked.  Whatever
799  *	the case we do not change it.
800  */
801 
802 static int
803 union_close(ap)
804 	struct vop_close_args /* {
805 		struct vnode *a_vp;
806 		int  a_fflag;
807 		struct ucred *a_cred;
808 		struct proc *a_p;
809 	} */ *ap;
810 {
811 	struct union_node *un = VTOUNION(ap->a_vp);
812 	struct vnode *vp;
813 
814 	if ((vp = un->un_uppervp) == NULLVP) {
815 #ifdef UNION_DIAGNOSTIC
816 		if (un->un_openl <= 0)
817 			panic("union: un_openl cnt");
818 #endif
819 		--un->un_openl;
820 		vp = un->un_lowervp;
821 	}
822 	ap->a_vp = vp;
823 	return (VCALL(vp, VOFFSET(vop_close), ap));
824 }
825 
826 /*
827  * Check access permission on the union vnode.
828  * The access check being enforced is to check
829  * against both the underlying vnode, and any
830  * copied vnode.  This ensures that no additional
831  * file permissions are given away simply because
832  * the user caused an implicit file copy.
833  */
834 static int
835 union_access(ap)
836 	struct vop_access_args /* {
837 		struct vnodeop_desc *a_desc;
838 		struct vnode *a_vp;
839 		int a_mode;
840 		struct ucred *a_cred;
841 		struct proc *a_p;
842 	} */ *ap;
843 {
844 	struct union_node *un = VTOUNION(ap->a_vp);
845 	struct proc *p = ap->a_p;
846 	int error = EACCES;
847 	struct vnode *vp;
848 
849 	/*
850 	 * Disallow write attempts on filesystems mounted read-only.
851 	 */
852 	if ((ap->a_mode & VWRITE) &&
853 	    (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
854 		switch (ap->a_vp->v_type) {
855 		case VREG:
856 		case VDIR:
857 		case VLNK:
858 			return (EROFS);
859 		default:
860 			break;
861 		}
862 	}
863 
864 	if ((vp = union_lock_upper(un, p)) != NULLVP) {
865 		ap->a_vp = vp;
866 		error = VCALL(vp, VOFFSET(vop_access), ap);
867 		union_unlock_upper(vp, p);
868 		return(error);
869 	}
870 
871 	if ((vp = un->un_lowervp) != NULLVP) {
872 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
873 		ap->a_vp = vp;
874 
875 		/*
876 		 * Remove VWRITE from a_mode if our mount point is RW, because
877 		 * we want to allow writes and lowervp may be read-only.
878 		 */
879 		if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
880 			ap->a_mode &= ~VWRITE;
881 
882 		error = VCALL(vp, VOFFSET(vop_access), ap);
883 		if (error == 0) {
884 			struct union_mount *um;
885 
886 			um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
887 
888 			if (um->um_op == UNMNT_BELOW) {
889 				ap->a_cred = um->um_cred;
890 				error = VCALL(vp, VOFFSET(vop_access), ap);
891 			}
892 		}
893 		VOP_UNLOCK(vp, 0, p);
894 	}
895 	return(error);
896 }
897 
898 /*
899  * We handle getattr only to change the fsid and
900  * track object sizes
901  *
902  * It's not clear whether VOP_GETATTR is to be
903  * called with the vnode locked or not.  stat() calls
904  * it with (vp) locked, and fstat calls it with
905  * (vp) unlocked.
906  *
907  * Because of this we cannot use our normal locking functions
908  * if we do not intend to lock the main a_vp node.  At the moment
909  * we are running without any specific locking at all, but beware
910  * to any programmer that care must be taken if locking is added
911  * to this function.
912  */
913 
914 static int
915 union_getattr(ap)
916 	struct vop_getattr_args /* {
917 		struct vnode *a_vp;
918 		struct vattr *a_vap;
919 		struct ucred *a_cred;
920 		struct proc *a_p;
921 	} */ *ap;
922 {
923 	int error;
924 	struct union_node *un = VTOUNION(ap->a_vp);
925 	struct vnode *vp;
926 	struct vattr *vap;
927 	struct vattr va;
928 
929 	/*
930 	 * Some programs walk the filesystem hierarchy by counting
931 	 * links to directories to avoid stat'ing all the time.
932 	 * This means the link count on directories needs to be "correct".
933 	 * The only way to do that is to call getattr on both layers
934 	 * and fix up the link count.  The link count will not necessarily
935 	 * be accurate but will be large enough to defeat the tree walkers.
936 	 */
937 
938 	vap = ap->a_vap;
939 
940 	if ((vp = un->un_uppervp) != NULLVP) {
941 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p);
942 		if (error)
943 			return (error);
944 		/* XXX isn't this dangerouso without a lock? */
945 		union_newsize(ap->a_vp, vap->va_size, VNOVAL);
946 	}
947 
948 	if (vp == NULLVP) {
949 		vp = un->un_lowervp;
950 	} else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
951 		vp = un->un_lowervp;
952 		vap = &va;
953 	} else {
954 		vp = NULLVP;
955 	}
956 
957 	if (vp != NULLVP) {
958 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p);
959 		if (error)
960 			return (error);
961 		/* XXX isn't this dangerous without a lock? */
962 		union_newsize(ap->a_vp, VNOVAL, vap->va_size);
963 	}
964 
965 	if ((vap != ap->a_vap) && (vap->va_type == VDIR))
966 		ap->a_vap->va_nlink += vap->va_nlink;
967 	return (0);
968 }
969 
970 static int
971 union_setattr(ap)
972 	struct vop_setattr_args /* {
973 		struct vnode *a_vp;
974 		struct vattr *a_vap;
975 		struct ucred *a_cred;
976 		struct proc *a_p;
977 	} */ *ap;
978 {
979 	struct union_node *un = VTOUNION(ap->a_vp);
980 	struct proc *p = ap->a_p;
981 	struct vattr *vap = ap->a_vap;
982 	struct vnode *uppervp;
983 	int error;
984 
985 	/*
986 	 * Disallow write attempts on filesystems mounted read-only.
987 	 */
988 	if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
989 	    (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
990 	     vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
991 	     vap->va_mtime.tv_sec != VNOVAL ||
992 	     vap->va_mode != (mode_t)VNOVAL)) {
993 		return (EROFS);
994 	}
995 
996 	/*
997 	 * Handle case of truncating lower object to zero size,
998 	 * by creating a zero length upper object.  This is to
999 	 * handle the case of open with O_TRUNC and O_CREAT.
1000 	 */
1001 	if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
1002 		error = union_copyup(un, (ap->a_vap->va_size != 0),
1003 			    ap->a_cred, ap->a_p);
1004 		if (error)
1005 			return (error);
1006 	}
1007 
1008 	/*
1009 	 * Try to set attributes in upper layer,
1010 	 * otherwise return read-only filesystem error.
1011 	 */
1012 	error = EROFS;
1013 	if ((uppervp = union_lock_upper(un, p)) != NULLVP) {
1014 		error = VOP_SETATTR(un->un_uppervp, ap->a_vap,
1015 					ap->a_cred, ap->a_p);
1016 		if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
1017 			union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
1018 		union_unlock_upper(uppervp, p);
1019 	}
1020 	return (error);
1021 }
1022 
1023 /*
1024  *	union_getpages:
1025  */
1026 
1027 static int
1028 union_getpages(struct vop_getpages_args *ap)
1029 {
1030 	int r;
1031 
1032 	r = vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
1033 		ap->a_count, ap->a_reqpage);
1034 	return(r);
1035 }
1036 
1037 /*
1038  *	union_putpages:
1039  */
1040 
1041 static int
1042 union_putpages(struct vop_putpages_args *ap)
1043 {
1044 	int r;
1045 
1046 	r = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
1047 		ap->a_sync, ap->a_rtvals);
1048 	return(r);
1049 }
1050 
1051 static int
1052 union_read(ap)
1053 	struct vop_read_args /* {
1054 		struct vnode *a_vp;
1055 		struct uio *a_uio;
1056 		int  a_ioflag;
1057 		struct ucred *a_cred;
1058 	} */ *ap;
1059 {
1060 	struct union_node *un = VTOUNION(ap->a_vp);
1061 	struct proc *p = ap->a_uio->uio_procp;
1062 	struct vnode *uvp;
1063 	int error;
1064 
1065 	uvp = union_lock_other(un, p);
1066 	KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1067 
1068 	if (ap->a_vp->v_flag & VOBJBUF)
1069 		union_vm_coherency(ap->a_vp, ap->a_uio, 0);
1070 
1071 	error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1072 	union_unlock_other(uvp, p);
1073 
1074 	/*
1075 	 * XXX
1076 	 * perhaps the size of the underlying object has changed under
1077 	 * our feet.  take advantage of the offset information present
1078 	 * in the uio structure.
1079 	 */
1080 	if (error == 0) {
1081 		struct union_node *un = VTOUNION(ap->a_vp);
1082 		off_t cur = ap->a_uio->uio_offset;
1083 
1084 		if (uvp == un->un_uppervp) {
1085 			if (cur > un->un_uppersz)
1086 				union_newsize(ap->a_vp, cur, VNOVAL);
1087 		} else {
1088 			if (cur > un->un_lowersz)
1089 				union_newsize(ap->a_vp, VNOVAL, cur);
1090 		}
1091 	}
1092 	return (error);
1093 }
1094 
1095 static int
1096 union_write(ap)
1097 	struct vop_read_args /* {
1098 		struct vnode *a_vp;
1099 		struct uio *a_uio;
1100 		int  a_ioflag;
1101 		struct ucred *a_cred;
1102 	} */ *ap;
1103 {
1104 	struct union_node *un = VTOUNION(ap->a_vp);
1105 	struct proc *p = ap->a_uio->uio_procp;
1106 	struct vnode *uppervp;
1107 	int error;
1108 
1109 	if ((uppervp = union_lock_upper(un, p)) == NULLVP)
1110 		panic("union: missing upper layer in write");
1111 
1112 	/*
1113 	 * Since our VM pages are associated with our vnode rather then
1114 	 * the real vnode, and since we do not run our reads and writes
1115 	 * through our own VM cache, we have a VM/VFS coherency problem.
1116 	 * We solve them by invalidating or flushing the associated VM
1117 	 * pages prior to allowing a normal read or write to occur.
1118 	 *
1119 	 * VM-backed writes (UIO_NOCOPY) have to be converted to normal
1120 	 * writes because we are not cache-coherent.  Normal writes need
1121 	 * to be made coherent with our VM-backing store, which we do by
1122 	 * first flushing any dirty VM pages associated with the write
1123 	 * range, and then destroying any clean VM pages associated with
1124 	 * the write range.
1125 	 */
1126 
1127 	if (ap->a_uio->uio_segflg == UIO_NOCOPY) {
1128 		ap->a_uio->uio_segflg = UIO_SYSSPACE;
1129 	} else if (ap->a_vp->v_flag & VOBJBUF) {
1130 		union_vm_coherency(ap->a_vp, ap->a_uio, 1);
1131 	}
1132 
1133 	error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1134 
1135 	/*
1136 	 * the size of the underlying object may be changed by the
1137 	 * write.
1138 	 */
1139 	if (error == 0) {
1140 		off_t cur = ap->a_uio->uio_offset;
1141 
1142 		if (cur > un->un_uppersz)
1143 			union_newsize(ap->a_vp, cur, VNOVAL);
1144 	}
1145 	union_unlock_upper(uppervp, p);
1146 	return (error);
1147 }
1148 
1149 static int
1150 union_lease(ap)
1151 	struct vop_lease_args /* {
1152 		struct vnode *a_vp;
1153 		struct proc *a_p;
1154 		struct ucred *a_cred;
1155 		int a_flag;
1156 	} */ *ap;
1157 {
1158 	struct vnode *ovp = OTHERVP(ap->a_vp);
1159 
1160 	ap->a_vp = ovp;
1161 	return (VCALL(ovp, VOFFSET(vop_lease), ap));
1162 }
1163 
1164 static int
1165 union_ioctl(ap)
1166 	struct vop_ioctl_args /* {
1167 		struct vnode *a_vp;
1168 		int  a_command;
1169 		caddr_t  a_data;
1170 		int  a_fflag;
1171 		struct ucred *a_cred;
1172 		struct proc *a_p;
1173 	} */ *ap;
1174 {
1175 	struct vnode *ovp = OTHERVP(ap->a_vp);
1176 
1177 	ap->a_vp = ovp;
1178 	return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1179 }
1180 
1181 static int
1182 union_poll(ap)
1183 	struct vop_poll_args /* {
1184 		struct vnode *a_vp;
1185 		int  a_events;
1186 		struct ucred *a_cred;
1187 		struct proc *a_p;
1188 	} */ *ap;
1189 {
1190 	struct vnode *ovp = OTHERVP(ap->a_vp);
1191 
1192 	ap->a_vp = ovp;
1193 	return (VCALL(ovp, VOFFSET(vop_poll), ap));
1194 }
1195 
1196 static int
1197 union_revoke(ap)
1198 	struct vop_revoke_args /* {
1199 		struct vnode *a_vp;
1200 		int a_flags;
1201 		struct proc *a_p;
1202 	} */ *ap;
1203 {
1204 	struct vnode *vp = ap->a_vp;
1205 
1206 	if (UPPERVP(vp))
1207 		VOP_REVOKE(UPPERVP(vp), ap->a_flags);
1208 	if (LOWERVP(vp))
1209 		VOP_REVOKE(LOWERVP(vp), ap->a_flags);
1210 	vgone(vp);
1211 	return (0);
1212 }
1213 
1214 static int
1215 union_mmap(ap)
1216 	struct vop_mmap_args /* {
1217 		struct vnode *a_vp;
1218 		int  a_fflags;
1219 		struct ucred *a_cred;
1220 		struct proc *a_p;
1221 	} */ *ap;
1222 {
1223 	struct vnode *ovp = OTHERVP(ap->a_vp);
1224 
1225 	ap->a_vp = ovp;
1226 	return (VCALL(ovp, VOFFSET(vop_mmap), ap));
1227 }
1228 
1229 static int
1230 union_fsync(ap)
1231 	struct vop_fsync_args /* {
1232 		struct vnode *a_vp;
1233 		struct ucred *a_cred;
1234 		int  a_waitfor;
1235 		struct proc *a_p;
1236 	} */ *ap;
1237 {
1238 	int error = 0;
1239 	struct proc *p = ap->a_p;
1240 	struct vnode *targetvp;
1241 	struct union_node *un = VTOUNION(ap->a_vp);
1242 
1243 	if ((targetvp = union_lock_other(un, p)) != NULLVP) {
1244 		error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, p);
1245 		union_unlock_other(targetvp, p);
1246 	}
1247 
1248 	return (error);
1249 }
1250 
1251 /*
1252  *	union_remove:
1253  *
1254  *	Remove the specified cnp.  The dvp and vp are passed to us locked
1255  *	and must remain locked on return.
1256  */
1257 
1258 static int
1259 union_remove(ap)
1260 	struct vop_remove_args /* {
1261 		struct vnode *a_dvp;
1262 		struct vnode *a_vp;
1263 		struct componentname *a_cnp;
1264 	} */ *ap;
1265 {
1266 	struct union_node *dun = VTOUNION(ap->a_dvp);
1267 	struct union_node *un = VTOUNION(ap->a_vp);
1268 	struct componentname *cnp = ap->a_cnp;
1269 	struct proc *p = cnp->cn_proc;
1270 	struct vnode *uppervp;
1271 	struct vnode *upperdvp;
1272 	int error;
1273 
1274 	if ((upperdvp = union_lock_upper(dun, p)) == NULLVP)
1275 		panic("union remove: null upper vnode");
1276 
1277 	if ((uppervp = union_lock_upper(un, p)) != NULLVP) {
1278 		if (union_dowhiteout(un, cnp->cn_cred, p))
1279 			cnp->cn_flags |= DOWHITEOUT;
1280 		error = VOP_REMOVE(upperdvp, uppervp, cnp);
1281 #if 0
1282 		/* XXX */
1283 		if (!error)
1284 			union_removed_upper(un);
1285 #endif
1286 		union_unlock_upper(uppervp, p);
1287 	} else {
1288 		error = union_mkwhiteout(
1289 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1290 			    upperdvp, ap->a_cnp, un->un_path);
1291 	}
1292 	union_unlock_upper(upperdvp, p);
1293 	return (error);
1294 }
1295 
1296 /*
1297  *	union_link:
1298  *
1299  *	tdvp will be locked on entry, vp will not be locked on entry.
1300  *	tdvp should remain locked on return and vp should remain unlocked
1301  *	on return.
1302  */
1303 
1304 static int
1305 union_link(ap)
1306 	struct vop_link_args /* {
1307 		struct vnode *a_tdvp;
1308 		struct vnode *a_vp;
1309 		struct componentname *a_cnp;
1310 	} */ *ap;
1311 {
1312 	struct componentname *cnp = ap->a_cnp;
1313 	struct proc *p = cnp->cn_proc;
1314 	struct union_node *dun = VTOUNION(ap->a_tdvp);
1315 	struct vnode *vp;
1316 	struct vnode *tdvp;
1317 	int error = 0;
1318 
1319 	if (ap->a_tdvp->v_op != ap->a_vp->v_op) {
1320 		vp = ap->a_vp;
1321 	} else {
1322 		struct union_node *tun = VTOUNION(ap->a_vp);
1323 
1324 		if (tun->un_uppervp == NULLVP) {
1325 			vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p);
1326 #if 0
1327 			if (dun->un_uppervp == tun->un_dirvp) {
1328 				if (dun->un_flags & UN_ULOCK) {
1329 					dun->un_flags &= ~UN_ULOCK;
1330 					VOP_UNLOCK(dun->un_uppervp, 0, p);
1331 				}
1332 			}
1333 #endif
1334 			error = union_copyup(tun, 1, cnp->cn_cred, p);
1335 #if 0
1336 			if (dun->un_uppervp == tun->un_dirvp) {
1337 				vn_lock(dun->un_uppervp,
1338 					    LK_EXCLUSIVE | LK_RETRY, p);
1339 				dun->un_flags |= UN_ULOCK;
1340 			}
1341 #endif
1342 			VOP_UNLOCK(ap->a_vp, 0, p);
1343 		}
1344 		vp = tun->un_uppervp;
1345 	}
1346 
1347 	if (error)
1348 		return (error);
1349 
1350 	/*
1351 	 * Make sure upper is locked, then unlock the union directory we were
1352 	 * called with to avoid a deadlock while we are calling VOP_LINK on
1353 	 * the upper (with tdvp locked and vp not locked).  Our ap->a_tdvp
1354 	 * is expected to be locked on return.
1355 	 */
1356 
1357 	if ((tdvp = union_lock_upper(dun, p)) == NULLVP)
1358 		return (EROFS);
1359 
1360 	VOP_UNLOCK(ap->a_tdvp, 0, p);		/* unlock calling node */
1361 	error = VOP_LINK(tdvp, vp, cnp);	/* call link on upper */
1362 
1363 	/*
1364 	 * We have to unlock tdvp prior to relocking our calling node in
1365 	 * order to avoid a deadlock.
1366 	 */
1367 	union_unlock_upper(tdvp, p);
1368 	vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY, p);
1369 	return (error);
1370 }
1371 
1372 static int
1373 union_rename(ap)
1374 	struct vop_rename_args  /* {
1375 		struct vnode *a_fdvp;
1376 		struct vnode *a_fvp;
1377 		struct componentname *a_fcnp;
1378 		struct vnode *a_tdvp;
1379 		struct vnode *a_tvp;
1380 		struct componentname *a_tcnp;
1381 	} */ *ap;
1382 {
1383 	int error;
1384 	struct vnode *fdvp = ap->a_fdvp;
1385 	struct vnode *fvp = ap->a_fvp;
1386 	struct vnode *tdvp = ap->a_tdvp;
1387 	struct vnode *tvp = ap->a_tvp;
1388 
1389 	/*
1390 	 * Figure out what fdvp to pass to our upper or lower vnode.  If we
1391 	 * replace the fdvp, release the original one and ref the new one.
1392 	 */
1393 
1394 	if (fdvp->v_op == union_vnodeop_p) {	/* always true */
1395 		struct union_node *un = VTOUNION(fdvp);
1396 		if (un->un_uppervp == NULLVP) {
1397 			/*
1398 			 * this should never happen in normal
1399 			 * operation but might if there was
1400 			 * a problem creating the top-level shadow
1401 			 * directory.
1402 			 */
1403 			error = EXDEV;
1404 			goto bad;
1405 		}
1406 		fdvp = un->un_uppervp;
1407 		VREF(fdvp);
1408 		vrele(ap->a_fdvp);
1409 	}
1410 
1411 	/*
1412 	 * Figure out what fvp to pass to our upper or lower vnode.  If we
1413 	 * replace the fvp, release the original one and ref the new one.
1414 	 */
1415 
1416 	if (fvp->v_op == union_vnodeop_p) {	/* always true */
1417 		struct union_node *un = VTOUNION(fvp);
1418 #if 0
1419 		struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1420 #endif
1421 
1422 		if (un->un_uppervp == NULLVP) {
1423 			switch(fvp->v_type) {
1424 			case VREG:
1425 				vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_proc);
1426 				error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_proc);
1427 				VOP_UNLOCK(un->un_vnode, 0, ap->a_fcnp->cn_proc);
1428 				if (error)
1429 					goto bad;
1430 				break;
1431 			case VDIR:
1432 				/*
1433 				 * XXX not yet.
1434 				 *
1435 				 * There is only one way to rename a directory
1436 				 * based in the lowervp, and that is to copy
1437 				 * the entire directory hierarchy.  Otherwise
1438 				 * it would not last across a reboot.
1439 				 */
1440 #if 0
1441 				vrele(fvp);
1442 				fvp = NULL;
1443 				vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_proc);
1444 				error = union_mkshadow(um, fdvp,
1445 					    ap->a_fcnp, &un->un_uppervp);
1446 				VOP_UNLOCK(fdvp, 0, ap->a_fcnp->cn_proc);
1447 				if (un->un_uppervp)
1448 					VOP_UNLOCK(un->un_uppervp, 0, ap->a_fcnp->cn_proc);
1449 				if (error)
1450 					goto bad;
1451 				break;
1452 #endif
1453 			default:
1454 				error = EXDEV;
1455 				goto bad;
1456 			}
1457 		}
1458 
1459 		if (un->un_lowervp != NULLVP)
1460 			ap->a_fcnp->cn_flags |= DOWHITEOUT;
1461 		fvp = un->un_uppervp;
1462 		VREF(fvp);
1463 		vrele(ap->a_fvp);
1464 	}
1465 
1466 	/*
1467 	 * Figure out what tdvp (destination directory) to pass to the
1468 	 * lower level.  If we replace it with uppervp, we need to vput the
1469 	 * old one.  The exclusive lock is transfered to what we will pass
1470 	 * down in the VOP_RENAME and we replace uppervp with a simple
1471 	 * reference.
1472 	 */
1473 
1474 	if (tdvp->v_op == union_vnodeop_p) {
1475 		struct union_node *un = VTOUNION(tdvp);
1476 
1477 		if (un->un_uppervp == NULLVP) {
1478 			/*
1479 			 * this should never happen in normal
1480 			 * operation but might if there was
1481 			 * a problem creating the top-level shadow
1482 			 * directory.
1483 			 */
1484 			error = EXDEV;
1485 			goto bad;
1486 		}
1487 
1488 		/*
1489 		 * new tdvp is a lock and reference on uppervp, put away
1490 		 * the old tdvp.
1491 		 */
1492 		tdvp = union_lock_upper(un, ap->a_tcnp->cn_proc);
1493 		vput(ap->a_tdvp);
1494 	}
1495 
1496 	/*
1497 	 * Figure out what tvp (destination file) to pass to the
1498 	 * lower level.
1499 	 *
1500 	 * If the uppervp file does not exist put away the (wrong)
1501 	 * file and change tvp to NULL.
1502 	 */
1503 
1504 	if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) {
1505 		struct union_node *un = VTOUNION(tvp);
1506 
1507 		tvp = union_lock_upper(un, ap->a_tcnp->cn_proc);
1508 		vput(ap->a_tvp);
1509 		/* note: tvp may be NULL */
1510 	}
1511 
1512 	/*
1513 	 * VOP_RENAME releases/vputs prior to returning, so we have no
1514 	 * cleanup to do.
1515 	 */
1516 
1517 	return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1518 
1519 	/*
1520 	 * Error.  We still have to release / vput the various elements.
1521 	 */
1522 
1523 bad:
1524 	vrele(fdvp);
1525 	if (fvp)
1526 		vrele(fvp);
1527 	vput(tdvp);
1528 	if (tvp != NULLVP) {
1529 		if (tvp != tdvp)
1530 			vput(tvp);
1531 		else
1532 			vrele(tvp);
1533 	}
1534 	return (error);
1535 }
1536 
1537 static int
1538 union_mkdir(ap)
1539 	struct vop_mkdir_args /* {
1540 		struct vnode *a_dvp;
1541 		struct vnode **a_vpp;
1542 		struct componentname *a_cnp;
1543 		struct vattr *a_vap;
1544 	} */ *ap;
1545 {
1546 	struct union_node *dun = VTOUNION(ap->a_dvp);
1547 	struct componentname *cnp = ap->a_cnp;
1548 	struct proc *p = cnp->cn_proc;
1549 	struct vnode *upperdvp;
1550 	int error = EROFS;
1551 
1552 	if ((upperdvp = union_lock_upper(dun, p)) != NULLVP) {
1553 		struct vnode *vp;
1554 
1555 		error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
1556 		union_unlock_upper(upperdvp, p);
1557 
1558 		if (error == 0) {
1559 			VOP_UNLOCK(vp, 0, p);
1560 			UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vp->v_usecount));
1561 			error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1562 				ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1563 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
1564 		}
1565 	}
1566 	return (error);
1567 }
1568 
1569 static int
1570 union_rmdir(ap)
1571 	struct vop_rmdir_args /* {
1572 		struct vnode *a_dvp;
1573 		struct vnode *a_vp;
1574 		struct componentname *a_cnp;
1575 	} */ *ap;
1576 {
1577 	struct union_node *dun = VTOUNION(ap->a_dvp);
1578 	struct union_node *un = VTOUNION(ap->a_vp);
1579 	struct componentname *cnp = ap->a_cnp;
1580 	struct proc *p = cnp->cn_proc;
1581 	struct vnode *upperdvp;
1582 	struct vnode *uppervp;
1583 	int error;
1584 
1585 	if ((upperdvp = union_lock_upper(dun, p)) == NULLVP)
1586 		panic("union rmdir: null upper vnode");
1587 
1588 	if ((uppervp = union_lock_upper(un, p)) != NULLVP) {
1589 		if (union_dowhiteout(un, cnp->cn_cred, p))
1590 			cnp->cn_flags |= DOWHITEOUT;
1591 		error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
1592 		union_unlock_upper(uppervp, p);
1593 	} else {
1594 		error = union_mkwhiteout(
1595 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1596 			    dun->un_uppervp, ap->a_cnp, un->un_path);
1597 	}
1598 	union_unlock_upper(upperdvp, p);
1599 	return (error);
1600 }
1601 
1602 /*
1603  *	union_symlink:
1604  *
1605  *	dvp is locked on entry and remains locked on return.  a_vpp is garbage
1606  *	(unused).
1607  */
1608 
1609 static int
1610 union_symlink(ap)
1611 	struct vop_symlink_args /* {
1612 		struct vnode *a_dvp;
1613 		struct vnode **a_vpp;
1614 		struct componentname *a_cnp;
1615 		struct vattr *a_vap;
1616 		char *a_target;
1617 	} */ *ap;
1618 {
1619 	struct union_node *dun = VTOUNION(ap->a_dvp);
1620 	struct componentname *cnp = ap->a_cnp;
1621 	struct proc *p = cnp->cn_proc;
1622 	struct vnode *dvp;
1623 	int error = EROFS;
1624 
1625 	if ((dvp = union_lock_upper(dun, p)) != NULLVP) {
1626 		struct vnode *vp;
1627 
1628 		error = VOP_SYMLINK(dvp, &vp, cnp, ap->a_vap, ap->a_target);
1629 		/* vp is garbage whether an error occurs or not */
1630 		*ap->a_vpp = NULLVP;
1631 		union_unlock_upper(dvp, p);
1632 	}
1633 	return (error);
1634 }
1635 
1636 /*
1637  * union_readdir works in concert with getdirentries and
1638  * readdir(3) to provide a list of entries in the unioned
1639  * directories.  getdirentries is responsible for walking
1640  * down the union stack.  readdir(3) is responsible for
1641  * eliminating duplicate names from the returned data stream.
1642  */
1643 static int
1644 union_readdir(ap)
1645 	struct vop_readdir_args /* {
1646 		struct vnode *a_vp;
1647 		struct uio *a_uio;
1648 		struct ucred *a_cred;
1649 		int *a_eofflag;
1650 		u_long *a_cookies;
1651 		int a_ncookies;
1652 	} */ *ap;
1653 {
1654 	struct union_node *un = VTOUNION(ap->a_vp);
1655 	struct proc *p = ap->a_uio->uio_procp;
1656 	struct vnode *uvp;
1657 	int error = 0;
1658 
1659 	if ((uvp = union_lock_upper(un, p)) != NULLVP) {
1660 		ap->a_vp = uvp;
1661 		error = VCALL(uvp, VOFFSET(vop_readdir), ap);
1662 		union_unlock_upper(uvp, p);
1663 	}
1664 	return(error);
1665 }
1666 
1667 static int
1668 union_readlink(ap)
1669 	struct vop_readlink_args /* {
1670 		struct vnode *a_vp;
1671 		struct uio *a_uio;
1672 		struct ucred *a_cred;
1673 	} */ *ap;
1674 {
1675 	int error;
1676 	struct union_node *un = VTOUNION(ap->a_vp);
1677 	struct uio *uio = ap->a_uio;
1678 	struct proc *p = uio->uio_procp;
1679 	struct vnode *vp;
1680 
1681 	vp = union_lock_other(un, p);
1682 	KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1683 
1684 	ap->a_vp = vp;
1685 	error = VCALL(vp, VOFFSET(vop_readlink), ap);
1686 	union_unlock_other(vp, p);
1687 
1688 	return (error);
1689 }
1690 
1691 /*
1692  *	union_abortop:
1693  *
1694  *	dvp is locked on entry and left locked on return
1695  *
1696  */
1697 
1698 static int
1699 union_abortop(ap)
1700 	struct vop_abortop_args /* {
1701 		struct vnode *a_dvp;
1702 		struct componentname *a_cnp;
1703 	} */ *ap;
1704 {
1705 	struct componentname *cnp = ap->a_cnp;
1706 	struct proc *p = cnp->cn_proc;
1707 	struct union_node *un = VTOUNION(ap->a_dvp);
1708 	int islocked = VOP_ISLOCKED(ap->a_dvp);
1709 	struct vnode *vp;
1710 	int error;
1711 
1712 	if (islocked) {
1713 		vp = union_lock_other(un, p);
1714 	} else {
1715 		vp = OTHERVP(ap->a_dvp);
1716 	}
1717 	KASSERT(vp != NULL, ("union_abortop: backing vnode missing!"));
1718 
1719 	ap->a_dvp = vp;
1720 	error = VCALL(vp, VOFFSET(vop_abortop), ap);
1721 
1722 	if (islocked)
1723 		union_unlock_other(vp, p);
1724 
1725 	return (error);
1726 }
1727 
1728 /*
1729  *	union_inactive:
1730  *
1731  *	Called with the vnode locked.  We are expected to unlock the vnode.
1732  */
1733 
1734 static int
1735 union_inactive(ap)
1736 	struct vop_inactive_args /* {
1737 		struct vnode *a_vp;
1738 		struct proc *a_p;
1739 	} */ *ap;
1740 {
1741 	struct vnode *vp = ap->a_vp;
1742 	struct proc *p = ap->a_p;
1743 	struct union_node *un = VTOUNION(vp);
1744 	struct vnode **vpp;
1745 
1746 	/*
1747 	 * Do nothing (and _don't_ bypass).
1748 	 * Wait to vrele lowervp until reclaim,
1749 	 * so that until then our union_node is in the
1750 	 * cache and reusable.
1751 	 *
1752 	 * NEEDSWORK: Someday, consider inactive'ing
1753 	 * the lowervp and then trying to reactivate it
1754 	 * with capabilities (v_id)
1755 	 * like they do in the name lookup cache code.
1756 	 * That's too much work for now.
1757 	 */
1758 
1759 	if (un->un_dircache != 0) {
1760 		for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1761 			vrele(*vpp);
1762 		free (un->un_dircache, M_TEMP);
1763 		un->un_dircache = 0;
1764 	}
1765 
1766 #if 0
1767 	if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1768 		un->un_flags &= ~UN_ULOCK;
1769 		VOP_UNLOCK(un->un_uppervp, 0, p);
1770 	}
1771 #endif
1772 
1773 	VOP_UNLOCK(vp, 0, p);
1774 
1775 	if ((un->un_flags & UN_CACHED) == 0)
1776 		vgone(vp);
1777 
1778 	return (0);
1779 }
1780 
1781 static int
1782 union_reclaim(ap)
1783 	struct vop_reclaim_args /* {
1784 		struct vnode *a_vp;
1785 	} */ *ap;
1786 {
1787 	union_freevp(ap->a_vp);
1788 
1789 	return (0);
1790 }
1791 
1792 static int
1793 union_lock(ap)
1794 	struct vop_lock_args *ap;
1795 {
1796 #if 0
1797 	struct vnode *vp = ap->a_vp;
1798 	struct proc *p = ap->a_p;
1799 	int flags = ap->a_flags;
1800 	struct union_node *un;
1801 #endif
1802 	int error;
1803 
1804 	error = vop_stdlock(ap);
1805 #if 0
1806 	un = VTOUNION(vp);
1807 
1808 	if (error == 0) {
1809 		/*
1810 		 * Lock the upper if it exists and this is an exclusive lock
1811 		 * request.
1812 		 */
1813 		if (un->un_uppervp != NULLVP &&
1814 		    (flags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1815 			if ((un->un_flags & UN_ULOCK) == 0 && vp->v_usecount) {
1816 				error = vn_lock(un->un_uppervp, flags, p);
1817 				if (error) {
1818 					struct vop_unlock_args uap = { 0 };
1819 					uap.a_vp = ap->a_vp;
1820 					uap.a_flags = ap->a_flags;
1821 					uap.a_p = ap->a_p;
1822 					vop_stdunlock(&uap);
1823 					return (error);
1824 				}
1825 				un->un_flags |= UN_ULOCK;
1826 			}
1827 		}
1828 	}
1829 #endif
1830 	return (error);
1831 }
1832 
1833 /*
1834  *	union_unlock:
1835  *
1836  *	Unlock our union node.  This also unlocks uppervp.
1837  */
1838 static int
1839 union_unlock(ap)
1840 	struct vop_unlock_args /* {
1841 		struct vnode *a_vp;
1842 		int a_flags;
1843 		struct proc *a_p;
1844 	} */ *ap;
1845 {
1846 	struct union_node *un = VTOUNION(ap->a_vp);
1847 	int error;
1848 
1849 	KASSERT((un->un_uppervp == NULL || un->un_uppervp->v_usecount > 0), ("uppervp usecount is 0"));
1850 
1851 	error = vop_stdunlock(ap);
1852 #if 0
1853 
1854 	/*
1855 	 * If no exclusive locks remain and we are holding an uppervp lock,
1856 	 * remove the uppervp lock.
1857 	 */
1858 
1859 	if ((un->un_flags & UN_ULOCK) &&
1860 	    lockstatus(&un->un_lock) != LK_EXCLUSIVE) {
1861 		un->un_flags &= ~UN_ULOCK;
1862 		VOP_UNLOCK(un->un_uppervp, LK_EXCLUSIVE, p);
1863 	}
1864 #endif
1865 	return(error);
1866 }
1867 
1868 /*
1869  *	union_bmap:
1870  *
1871  *	There isn't much we can do.  We cannot push through to the real vnode
1872  *	to get to the underlying device because this will bypass data
1873  *	cached by the real vnode.
1874  *
1875  *	For some reason we cannot return the 'real' vnode either, it seems
1876  *	to blow up memory maps.
1877  */
1878 
1879 static int
1880 union_bmap(ap)
1881 	struct vop_bmap_args /* {
1882 		struct vnode *a_vp;
1883 		daddr_t  a_bn;
1884 		struct vnode **a_vpp;
1885 		daddr_t *a_bnp;
1886 		int *a_runp;
1887 		int *a_runb;
1888 	} */ *ap;
1889 {
1890 	return(EOPNOTSUPP);
1891 }
1892 
1893 static int
1894 union_print(ap)
1895 	struct vop_print_args /* {
1896 		struct vnode *a_vp;
1897 	} */ *ap;
1898 {
1899 	struct vnode *vp = ap->a_vp;
1900 
1901 	printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1902 			vp, UPPERVP(vp), LOWERVP(vp));
1903 	if (UPPERVP(vp) != NULLVP)
1904 		vprint("union: upper", UPPERVP(vp));
1905 	if (LOWERVP(vp) != NULLVP)
1906 		vprint("union: lower", LOWERVP(vp));
1907 
1908 	return (0);
1909 }
1910 
1911 static int
1912 union_pathconf(ap)
1913 	struct vop_pathconf_args /* {
1914 		struct vnode *a_vp;
1915 		int a_name;
1916 		int *a_retval;
1917 	} */ *ap;
1918 {
1919 	int error;
1920 	struct proc *p = curproc;		/* XXX */
1921 	struct union_node *un = VTOUNION(ap->a_vp);
1922 	struct vnode *vp;
1923 
1924 	vp = union_lock_other(un, p);
1925 	KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1926 
1927 	ap->a_vp = vp;
1928 	error = VCALL(vp, VOFFSET(vop_pathconf), ap);
1929 	union_unlock_other(vp, p);
1930 
1931 	return (error);
1932 }
1933 
1934 static int
1935 union_advlock(ap)
1936 	struct vop_advlock_args /* {
1937 		struct vnode *a_vp;
1938 		caddr_t  a_id;
1939 		int  a_op;
1940 		struct flock *a_fl;
1941 		int  a_flags;
1942 	} */ *ap;
1943 {
1944 	register struct vnode *ovp = OTHERVP(ap->a_vp);
1945 
1946 	ap->a_vp = ovp;
1947 	return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1948 }
1949 
1950 
1951 /*
1952  * XXX - vop_strategy must be hand coded because it has no
1953  * YYY - and it is not coherent with anything
1954  *
1955  * vnode in its arguments.
1956  * This goes away with a merged VM/buffer cache.
1957  */
1958 static int
1959 union_strategy(ap)
1960 	struct vop_strategy_args /* {
1961 		struct vnode *a_vp;
1962 		struct buf *a_bp;
1963 	} */ *ap;
1964 {
1965 	struct buf *bp = ap->a_bp;
1966 	struct vnode *othervp = OTHERVP(bp->b_vp);
1967 
1968 #ifdef DIAGNOSTIC
1969 	if (othervp == NULLVP)
1970 		panic("union_strategy: nil vp");
1971 	if (((bp->b_flags & B_READ) == 0) &&
1972 	    (othervp == LOWERVP(bp->b_vp)))
1973 		panic("union_strategy: writing to lowervp");
1974 #endif
1975 	return (VOP_STRATEGY(othervp, bp));
1976 }
1977 
1978 /*
1979  * Global vfs data structures
1980  */
1981 vop_t **union_vnodeop_p;
1982 static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
1983 	{ &vop_default_desc,		(vop_t *) vop_defaultop },
1984 	{ &vop_abortop_desc,		(vop_t *) union_abortop },
1985 	{ &vop_access_desc,		(vop_t *) union_access },
1986 	{ &vop_advlock_desc,		(vop_t *) union_advlock },
1987 	{ &vop_bmap_desc,		(vop_t *) union_bmap },
1988 	{ &vop_close_desc,		(vop_t *) union_close },
1989 	{ &vop_create_desc,		(vop_t *) union_create },
1990 	{ &vop_fsync_desc,		(vop_t *) union_fsync },
1991 	{ &vop_getpages_desc,		(vop_t *) union_getpages },
1992 	{ &vop_putpages_desc,		(vop_t *) union_putpages },
1993 	{ &vop_getattr_desc,		(vop_t *) union_getattr },
1994 	{ &vop_inactive_desc,		(vop_t *) union_inactive },
1995 	{ &vop_ioctl_desc,		(vop_t *) union_ioctl },
1996 	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
1997 	{ &vop_lease_desc,		(vop_t *) union_lease },
1998 	{ &vop_link_desc,		(vop_t *) union_link },
1999 	{ &vop_lock_desc,		(vop_t *) union_lock },
2000 	{ &vop_lookup_desc,		(vop_t *) union_lookup },
2001 	{ &vop_mkdir_desc,		(vop_t *) union_mkdir },
2002 	{ &vop_mknod_desc,		(vop_t *) union_mknod },
2003 	{ &vop_mmap_desc,		(vop_t *) union_mmap },
2004 	{ &vop_open_desc,		(vop_t *) union_open },
2005 	{ &vop_pathconf_desc,		(vop_t *) union_pathconf },
2006 	{ &vop_poll_desc,		(vop_t *) union_poll },
2007 	{ &vop_print_desc,		(vop_t *) union_print },
2008 	{ &vop_read_desc,		(vop_t *) union_read },
2009 	{ &vop_readdir_desc,		(vop_t *) union_readdir },
2010 	{ &vop_readlink_desc,		(vop_t *) union_readlink },
2011 	{ &vop_reclaim_desc,		(vop_t *) union_reclaim },
2012 	{ &vop_remove_desc,		(vop_t *) union_remove },
2013 	{ &vop_rename_desc,		(vop_t *) union_rename },
2014 	{ &vop_revoke_desc,		(vop_t *) union_revoke },
2015 	{ &vop_rmdir_desc,		(vop_t *) union_rmdir },
2016 	{ &vop_setattr_desc,		(vop_t *) union_setattr },
2017 	{ &vop_strategy_desc,		(vop_t *) union_strategy },
2018 	{ &vop_symlink_desc,		(vop_t *) union_symlink },
2019 	{ &vop_unlock_desc,		(vop_t *) union_unlock },
2020 	{ &vop_whiteout_desc,		(vop_t *) union_whiteout },
2021 	{ &vop_write_desc,		(vop_t *) union_write },
2022 	{ NULL, NULL }
2023 };
2024 static struct vnodeopv_desc union_vnodeop_opv_desc =
2025 	{ &union_vnodeop_p, union_vnodeop_entries };
2026 
2027 VNODEOP_SET(union_vnodeop_opv_desc);
2028