xref: /freebsd/sys/fs/unionfs/union_vnops.c (revision b601c69bdbe8755d26570261d7fd4c02ee4eff74)
1 /*
2  * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3  * Copyright (c) 1992, 1993, 1994, 1995
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)union_vnops.c	8.32 (Berkeley) 6/23/95
38  * $FreeBSD$
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/fcntl.h>
45 #include <sys/stat.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/malloc.h>
51 #include <sys/bio.h>
52 #include <sys/buf.h>
53 #include <sys/lock.h>
54 #include <sys/sysctl.h>
55 #include <miscfs/union/union.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vnode_pager.h>
59 
60 #include <vm/vm_page.h>
61 #include <vm/vm_object.h>
62 
63 int uniondebug = 0;
64 
65 #if UDEBUG_ENABLED
66 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
67 #else
68 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
69 #endif
70 
71 static int	union_access __P((struct vop_access_args *ap));
72 static int	union_advlock __P((struct vop_advlock_args *ap));
73 static int	union_bmap __P((struct vop_bmap_args *ap));
74 static int	union_close __P((struct vop_close_args *ap));
75 static int	union_create __P((struct vop_create_args *ap));
76 static int	union_fsync __P((struct vop_fsync_args *ap));
77 static int	union_getattr __P((struct vop_getattr_args *ap));
78 static int	union_inactive __P((struct vop_inactive_args *ap));
79 static int	union_ioctl __P((struct vop_ioctl_args *ap));
80 static int	union_lease __P((struct vop_lease_args *ap));
81 static int	union_link __P((struct vop_link_args *ap));
82 static int	union_lock __P((struct vop_lock_args *ap));
83 static int	union_lookup __P((struct vop_lookup_args *ap));
84 static int	union_lookup1 __P((struct vnode *udvp, struct vnode **dvp,
85 				   struct vnode **vpp,
86 				   struct componentname *cnp));
87 static int	union_mkdir __P((struct vop_mkdir_args *ap));
88 static int	union_mknod __P((struct vop_mknod_args *ap));
89 static int	union_mmap __P((struct vop_mmap_args *ap));
90 static int	union_open __P((struct vop_open_args *ap));
91 static int	union_pathconf __P((struct vop_pathconf_args *ap));
92 static int	union_print __P((struct vop_print_args *ap));
93 static int	union_read __P((struct vop_read_args *ap));
94 static int	union_readdir __P((struct vop_readdir_args *ap));
95 static int	union_readlink __P((struct vop_readlink_args *ap));
96 static int	union_getwritemount __P((struct vop_getwritemount_args *ap));
97 static int	union_reclaim __P((struct vop_reclaim_args *ap));
98 static int	union_remove __P((struct vop_remove_args *ap));
99 static int	union_rename __P((struct vop_rename_args *ap));
100 static int	union_revoke __P((struct vop_revoke_args *ap));
101 static int	union_rmdir __P((struct vop_rmdir_args *ap));
102 static int	union_poll __P((struct vop_poll_args *ap));
103 static int	union_setattr __P((struct vop_setattr_args *ap));
104 static int	union_strategy __P((struct vop_strategy_args *ap));
105 static int	union_getpages __P((struct vop_getpages_args *ap));
106 static int	union_putpages __P((struct vop_putpages_args *ap));
107 static int	union_symlink __P((struct vop_symlink_args *ap));
108 static int	union_unlock __P((struct vop_unlock_args *ap));
109 static int	union_whiteout __P((struct vop_whiteout_args *ap));
110 static int	union_write __P((struct vop_read_args *ap));
111 
112 static __inline
113 struct vnode *
114 union_lock_upper(struct union_node *un, struct proc *p)
115 {
116 	struct vnode *uppervp;
117 
118 	if ((uppervp = un->un_uppervp) != NULL) {
119 		VREF(uppervp);
120 		vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
121 	}
122 	KASSERT((uppervp == NULL || uppervp->v_usecount > 0), ("uppervp usecount is 0"));
123 	return(uppervp);
124 }
125 
126 static __inline
127 void
128 union_unlock_upper(struct vnode *uppervp, struct proc *p)
129 {
130 	vput(uppervp);
131 }
132 
133 static __inline
134 struct vnode *
135 union_lock_other(struct union_node *un, struct proc *p)
136 {
137 	struct vnode *vp;
138 
139 	if (un->un_uppervp != NULL) {
140 		vp = union_lock_upper(un, p);
141 	} else if ((vp = un->un_lowervp) != NULL) {
142 		VREF(vp);
143 		vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
144 	}
145 	return(vp);
146 }
147 
148 static __inline
149 void
150 union_unlock_other(struct vnode *vp, struct proc *p)
151 {
152 	vput(vp);
153 }
154 
155 /*
156  *	union_lookup:
157  *
158  *	udvp	must be exclusively locked on call and will remain
159  *		exclusively locked on return.  This is the mount point
160  *		for out filesystem.
161  *
162  *	dvp	Our base directory, locked and referenced.
163  *		The passed dvp will be dereferenced and unlocked on return
164  *		and a new dvp will be returned which is locked and
165  *		referenced in the same variable.
166  *
167  *	vpp	is filled in with the result if no error occured,
168  *		locked and ref'd.
169  *
170  *		If an error is returned, *vpp is set to NULLVP.  If no
171  *		error occurs, *vpp is returned with a reference and an
172  *		exclusive lock.
173  */
174 
175 static int
176 union_lookup1(udvp, pdvp, vpp, cnp)
177 	struct vnode *udvp;
178 	struct vnode **pdvp;
179 	struct vnode **vpp;
180 	struct componentname *cnp;
181 {
182 	int error;
183 	struct proc *p = cnp->cn_proc;
184 	struct vnode *dvp = *pdvp;
185 	struct vnode *tdvp;
186 	struct mount *mp;
187 
188 	/*
189 	 * If stepping up the directory tree, check for going
190 	 * back across the mount point, in which case do what
191 	 * lookup would do by stepping back down the mount
192 	 * hierarchy.
193 	 */
194 	if (cnp->cn_flags & ISDOTDOT) {
195 		while ((dvp != udvp) && (dvp->v_flag & VROOT)) {
196 			/*
197 			 * Don't do the NOCROSSMOUNT check
198 			 * at this level.  By definition,
199 			 * union fs deals with namespaces, not
200 			 * filesystems.
201 			 */
202 			tdvp = dvp;
203 			dvp = dvp->v_mount->mnt_vnodecovered;
204 			VREF(dvp);
205 			vput(tdvp);
206 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
207 		}
208 	}
209 
210 	/*
211 	 * Set return dvp to be the upperdvp 'parent directory.
212 	 */
213 	*pdvp = dvp;
214 
215 	/*
216 	 * If the VOP_LOOKUP call generates an error, tdvp is invalid and no
217 	 * changes will have been made to dvp, so we are set to return.
218 	 */
219 
220         error = VOP_LOOKUP(dvp, &tdvp, cnp);
221 	if (error) {
222 		UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
223 		*vpp = NULL;
224 		return (error);
225 	}
226 
227 	/*
228 	 * The parent directory will have been unlocked, unless lookup
229 	 * found the last component or if dvp == tdvp (tdvp must be locked).
230 	 *
231 	 * We want our dvp to remain locked and ref'd.  We also want tdvp
232 	 * to remain locked and ref'd.
233 	 */
234 	UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
235 
236 	if (dvp != tdvp && (cnp->cn_flags & ISLASTCN) == 0)
237 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
238 
239 	/*
240 	 * Lastly check if the current node is a mount point in
241 	 * which case walk up the mount hierarchy making sure not to
242 	 * bump into the root of the mount tree (ie. dvp != udvp).
243 	 *
244 	 * We use dvp as a temporary variable here, it is no longer related
245 	 * to the dvp above.  However, we have to ensure that both *pdvp and
246 	 * tdvp are locked on return.
247 	 */
248 
249 	dvp = tdvp;
250 	while (
251 	    dvp != udvp &&
252 	    (dvp->v_type == VDIR) &&
253 	    (mp = dvp->v_mountedhere)
254 	) {
255 		int relock_pdvp = 0;
256 
257 		if (vfs_busy(mp, 0, 0, p))
258 			continue;
259 
260 		if (dvp == *pdvp)
261 			relock_pdvp = 1;
262 		vput(dvp);
263 		dvp = NULL;
264 		error = VFS_ROOT(mp, &dvp);
265 
266 		vfs_unbusy(mp, p);
267 
268 		if (relock_pdvp)
269 			vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY, p);
270 
271 		if (error) {
272 			*vpp = NULL;
273 			return (error);
274 		}
275 	}
276 	*vpp = dvp;
277 	return (0);
278 }
279 
280 static int
281 union_lookup(ap)
282 	struct vop_lookup_args /* {
283 		struct vnodeop_desc *a_desc;
284 		struct vnode *a_dvp;
285 		struct vnode **a_vpp;
286 		struct componentname *a_cnp;
287 	} */ *ap;
288 {
289 	int error;
290 	int uerror, lerror;
291 	struct vnode *uppervp, *lowervp;
292 	struct vnode *upperdvp, *lowerdvp;
293 	struct vnode *dvp = ap->a_dvp;		/* starting dir */
294 	struct union_node *dun = VTOUNION(dvp);	/* associated union node */
295 	struct componentname *cnp = ap->a_cnp;
296 	struct proc *p = cnp->cn_proc;
297 	int lockparent = cnp->cn_flags & LOCKPARENT;
298 	struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
299 	struct ucred *saved_cred = NULL;
300 	int iswhiteout;
301 	struct vattr va;
302 
303 	*ap->a_vpp = NULLVP;
304 
305 	/*
306 	 * Disallow write attemps to the filesystem mounted read-only.
307 	 */
308 	if ((cnp->cn_flags & ISLASTCN) &&
309 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
310 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
311 		return (EROFS);
312 	}
313 
314 	/*
315 	 * For any lookup's we do, always return with the parent locked
316 	 */
317 	cnp->cn_flags |= LOCKPARENT;
318 
319 	lowerdvp = dun->un_lowervp;
320 	uppervp = NULLVP;
321 	lowervp = NULLVP;
322 	iswhiteout = 0;
323 
324 	uerror = ENOENT;
325 	lerror = ENOENT;
326 
327 	/*
328 	 * Get a private lock on uppervp and a reference, effectively
329 	 * taking it out of the union_node's control.
330 	 *
331 	 * We must lock upperdvp while holding our lock on dvp
332 	 * to avoid a deadlock.
333 	 */
334 	upperdvp = union_lock_upper(dun, p);
335 
336 	/*
337 	 * do the lookup in the upper level.
338 	 * if that level comsumes additional pathnames,
339 	 * then assume that something special is going
340 	 * on and just return that vnode.
341 	 */
342 	if (upperdvp != NULLVP) {
343 		/*
344 		 * We do not have to worry about the DOTDOT case, we've
345 		 * already unlocked dvp.
346 		 */
347 		UDEBUG(("A %p\n", upperdvp));
348 
349 		/*
350 		 * Do the lookup.   We must supply a locked and referenced
351 		 * upperdvp to the function and will get a new locked and
352 		 * referenced upperdvp back with the old having been
353 		 * dereferenced.
354 		 *
355 		 * If an error is returned, uppervp will be NULLVP.  If no
356 		 * error occurs, uppervp will be the locked and referenced
357 		 * return vnode or possibly NULL, depending on what is being
358 		 * requested.  It is possible that the returned uppervp
359 		 * will be the same as upperdvp.
360 		 */
361 		uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
362 		UDEBUG((
363 		    "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
364 		    uerror,
365 		    upperdvp,
366 		    upperdvp->v_usecount,
367 		    VOP_ISLOCKED(upperdvp, NULL),
368 		    uppervp,
369 		    (uppervp ? uppervp->v_usecount : -99),
370 		    (uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99)
371 		));
372 
373 		/*
374 		 * Disallow write attemps to the filesystem mounted read-only.
375 		 */
376 		if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) &&
377 		    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
378 		    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) {
379 			error = EROFS;
380 			goto out;
381 		}
382 
383 		/*
384 		 * Special case.  If cn_consume != 0 skip out.  The result
385 		 * of the lookup is transfered to our return variable.  If
386 		 * an error occured we have to throw away the results.
387 		 */
388 
389 		if (cnp->cn_consume != 0) {
390 			if ((error = uerror) == 0) {
391 				*ap->a_vpp = uppervp;
392 				uppervp = NULL;
393 			}
394 			goto out;
395 		}
396 
397 		/*
398 		 * Calculate whiteout, fall through
399 		 */
400 
401 		if (uerror == ENOENT || uerror == EJUSTRETURN) {
402 			if (cnp->cn_flags & ISWHITEOUT) {
403 				iswhiteout = 1;
404 			} else if (lowerdvp != NULLVP) {
405 				int terror;
406 
407 				terror = VOP_GETATTR(upperdvp, &va,
408 					cnp->cn_cred, cnp->cn_proc);
409 				if (terror == 0 && (va.va_flags & OPAQUE))
410 					iswhiteout = 1;
411 			}
412 		}
413 	}
414 
415 	/*
416 	 * in a similar way to the upper layer, do the lookup
417 	 * in the lower layer.   this time, if there is some
418 	 * component magic going on, then vput whatever we got
419 	 * back from the upper layer and return the lower vnode
420 	 * instead.
421 	 */
422 
423 	if (lowerdvp != NULLVP && !iswhiteout) {
424 		int nameiop;
425 
426 		UDEBUG(("B %p\n", lowerdvp));
427 
428 		/*
429 		 * Force only LOOKUPs on the lower node, since
430 		 * we won't be making changes to it anyway.
431 		 */
432 		nameiop = cnp->cn_nameiop;
433 		cnp->cn_nameiop = LOOKUP;
434 		if (um->um_op == UNMNT_BELOW) {
435 			saved_cred = cnp->cn_cred;
436 			cnp->cn_cred = um->um_cred;
437 		}
438 
439 		/*
440 		 * We shouldn't have to worry about locking interactions
441 		 * between the lower layer and our union layer (w.r.t.
442 		 * `..' processing) because we don't futz with lowervp
443 		 * locks in the union-node instantiation code path.
444 		 *
445 		 * union_lookup1() requires lowervp to be locked on entry,
446 		 * and it will be unlocked on return.  The ref count will
447 		 * not change.  On return lowervp doesn't represent anything
448 		 * to us so we NULL it out.
449 		 */
450 		VREF(lowerdvp);
451 		vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, p);
452 		lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
453 		if (lowerdvp == lowervp)
454 			vrele(lowerdvp);
455 		else
456 			vput(lowerdvp);
457 		lowerdvp = NULL;	/* lowerdvp invalid after vput */
458 
459 		if (um->um_op == UNMNT_BELOW)
460 			cnp->cn_cred = saved_cred;
461 		cnp->cn_nameiop = nameiop;
462 
463 		if (cnp->cn_consume != 0 || lerror == EACCES) {
464 			if ((error = lerror) == 0) {
465 				*ap->a_vpp = lowervp;
466 				lowervp = NULL;
467 			}
468 			goto out;
469 		}
470 	} else {
471 		UDEBUG(("C %p\n", lowerdvp));
472 		if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
473 			if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
474 				VREF(lowervp);
475 				vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, p);
476 				lerror = 0;
477 			}
478 		}
479 	}
480 
481 	/*
482 	 * Ok.  Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
483 	 *
484 	 * 1. If both layers returned an error, select the upper layer.
485 	 *
486 	 * 2. If the upper layer faile and the bottom layer succeeded,
487 	 *    two subcases occur:
488 	 *
489 	 *	a.  The bottom vnode is not a directory, in which case
490 	 *	    just return a new union vnode referencing an
491 	 *	    empty top layer and the existing bottom layer.
492 	 *
493 	 *	b.  The button vnode is a directory, in which case
494 	 *	    create a new directory in the top layer and
495 	 *	    and fall through to case 3.
496 	 *
497 	 * 3. If the top layer succeeded then return a new union
498 	 *    vnode referencing whatever the new top layer and
499 	 *    whatever the bottom layer returned.
500 	 */
501 
502 	/* case 1. */
503 	if ((uerror != 0) && (lerror != 0)) {
504 		error = uerror;
505 		goto out;
506 	}
507 
508 	/* case 2. */
509 	if (uerror != 0 /* && (lerror == 0) */ ) {
510 		if (lowervp->v_type == VDIR) { /* case 2b. */
511 			KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
512 			/*
513 			 * oops, uppervp has a problem, we may have to shadow.
514 			 */
515 			uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
516 			if (uerror) {
517 				error = uerror;
518 				goto out;
519 			}
520 		}
521 	}
522 
523 	/*
524 	 * Must call union_allocvp with both the upper and lower vnodes
525 	 * referenced and the upper vnode locked.   ap->a_vpp is returned
526 	 * referenced and locked.  lowervp, uppervp, and upperdvp are
527 	 * absorbed by union_allocvp() whether it succeeds or fails.
528 	 *
529 	 * upperdvp is the parent directory of uppervp which may be
530 	 * different, depending on the path, from dvp->un_uppervp.  That's
531 	 * why it is a separate argument.  Note that it must be unlocked.
532 	 *
533 	 * dvp must be locked on entry to the call and will be locked on
534 	 * return.
535 	 */
536 
537 	if (uppervp && uppervp != upperdvp)
538 		VOP_UNLOCK(uppervp, 0, p);
539 	if (lowervp)
540 		VOP_UNLOCK(lowervp, 0, p);
541 	if (upperdvp)
542 		VOP_UNLOCK(upperdvp, 0, p);
543 
544 	error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
545 			      uppervp, lowervp, 1);
546 
547 	UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? ((*ap->a_vpp)->v_usecount) : -99));
548 
549 	uppervp = NULL;
550 	upperdvp = NULL;
551 	lowervp = NULL;
552 
553 	/*
554 	 *	Termination Code
555 	 *
556 	 *	- put away any extra junk laying around.  Note that lowervp
557 	 *	  (if not NULL) will never be the same as *ap->a_vp and
558 	 *	  neither will uppervp, because when we set that state we
559 	 *	  NULL-out lowervp or uppervp.  On the otherhand, upperdvp
560 	 *	  may match uppervp or *ap->a_vpp.
561 	 *
562 	 *	- relock/unlock dvp if appropriate.
563 	 */
564 
565 out:
566 	if (upperdvp) {
567 		if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
568 			vrele(upperdvp);
569 		else
570 			vput(upperdvp);
571 	}
572 
573 	if (uppervp)
574 		vput(uppervp);
575 
576 	if (lowervp)
577 		vput(lowervp);
578 
579 	/*
580 	 * Restore LOCKPARENT state
581 	 */
582 
583 	if (!lockparent)
584 		cnp->cn_flags &= ~LOCKPARENT;
585 
586 	UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
587 		((*ap->a_vpp) ? (*ap->a_vpp)->v_usecount : -99),
588 		lowervp, uppervp));
589 
590 	/*
591 	 * dvp lock state, determine whether to relock dvp.  dvp is expected
592 	 * to be locked on return if:
593 	 *
594 	 *	- there was an error (except not EJUSTRETURN), or
595 	 *	- we hit the last component and lockparent is true
596 	 *
597 	 * dvp_is_locked is the current state of the dvp lock, not counting
598 	 * the possibility that *ap->a_vpp == dvp (in which case it is locked
599 	 * anyway).  Note that *ap->a_vpp == dvp only if no error occured.
600 	 */
601 
602 	if (*ap->a_vpp != dvp) {
603 		if ((error == 0 || error == EJUSTRETURN) &&
604 		    (!lockparent || (cnp->cn_flags & ISLASTCN) == 0)) {
605 			VOP_UNLOCK(dvp, 0, p);
606 		}
607 	}
608 
609 	/*
610 	 * Diagnostics
611 	 */
612 
613 #ifdef DIAGNOSTIC
614 	if (cnp->cn_namelen == 1 &&
615 	    cnp->cn_nameptr[0] == '.' &&
616 	    *ap->a_vpp != dvp) {
617 		panic("union_lookup returning . (%p) not same as startdir (%p)", ap->a_vpp, dvp);
618 	}
619 #endif
620 
621 	return (error);
622 }
623 
624 /*
625  * 	union_create:
626  *
627  * a_dvp is locked on entry and remains locked on return.  a_vpp is returned
628  * locked if no error occurs, otherwise it is garbage.
629  */
630 
631 static int
632 union_create(ap)
633 	struct vop_create_args /* {
634 		struct vnode *a_dvp;
635 		struct vnode **a_vpp;
636 		struct componentname *a_cnp;
637 		struct vattr *a_vap;
638 	} */ *ap;
639 {
640 	struct union_node *dun = VTOUNION(ap->a_dvp);
641 	struct componentname *cnp = ap->a_cnp;
642 	struct proc *p = cnp->cn_proc;
643 	struct vnode *dvp;
644 	int error = EROFS;
645 
646 	if ((dvp = union_lock_upper(dun, p)) != NULL) {
647 		struct vnode *vp;
648 		struct mount *mp;
649 
650 		error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
651 		if (error == 0) {
652 			mp = ap->a_dvp->v_mount;
653 			VOP_UNLOCK(vp, 0, p);
654 			UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vp->v_usecount));
655 			error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
656 				cnp, vp, NULLVP, 1);
657 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
658 		}
659 		union_unlock_upper(dvp, p);
660 	}
661 	return (error);
662 }
663 
664 static int
665 union_whiteout(ap)
666 	struct vop_whiteout_args /* {
667 		struct vnode *a_dvp;
668 		struct componentname *a_cnp;
669 		int a_flags;
670 	} */ *ap;
671 {
672 	struct union_node *un = VTOUNION(ap->a_dvp);
673 	struct componentname *cnp = ap->a_cnp;
674 	struct vnode *uppervp;
675 	int error = EOPNOTSUPP;
676 
677 	if ((uppervp = union_lock_upper(un, cnp->cn_proc)) != NULLVP) {
678 		error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
679 		union_unlock_upper(uppervp, cnp->cn_proc);
680 	}
681 	return(error);
682 }
683 
684 /*
685  * 	union_mknod:
686  *
687  *	a_dvp is locked on entry and should remain locked on return.
688  *	a_vpp is garbagre whether an error occurs or not.
689  */
690 
691 static int
692 union_mknod(ap)
693 	struct vop_mknod_args /* {
694 		struct vnode *a_dvp;
695 		struct vnode **a_vpp;
696 		struct componentname *a_cnp;
697 		struct vattr *a_vap;
698 	} */ *ap;
699 {
700 	struct union_node *dun = VTOUNION(ap->a_dvp);
701 	struct componentname *cnp = ap->a_cnp;
702 	struct vnode *dvp;
703 	int error = EROFS;
704 
705 	if ((dvp = union_lock_upper(dun, cnp->cn_proc)) != NULL) {
706 		error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap);
707 		union_unlock_upper(dvp, cnp->cn_proc);
708 	}
709 	return (error);
710 }
711 
712 /*
713  *	union_open:
714  *
715  *	run open VOP.  When opening the underlying vnode we have to mimic
716  *	vn_open.  What we *really* need to do to avoid screwups if the
717  *	open semantics change is to call vn_open().  For example, ufs blows
718  *	up if you open a file but do not vmio it prior to writing.
719  */
720 
721 static int
722 union_open(ap)
723 	struct vop_open_args /* {
724 		struct vnodeop_desc *a_desc;
725 		struct vnode *a_vp;
726 		int a_mode;
727 		struct ucred *a_cred;
728 		struct proc *a_p;
729 	} */ *ap;
730 {
731 	struct union_node *un = VTOUNION(ap->a_vp);
732 	struct vnode *tvp;
733 	int mode = ap->a_mode;
734 	struct ucred *cred = ap->a_cred;
735 	struct proc *p = ap->a_p;
736 	int error = 0;
737 	int tvpisupper = 1;
738 
739 	/*
740 	 * If there is an existing upper vp then simply open that.
741 	 * The upper vp takes precedence over the lower vp.  When opening
742 	 * a lower vp for writing copy it to the uppervp and then open the
743 	 * uppervp.
744 	 *
745 	 * At the end of this section tvp will be left locked.
746 	 */
747 	if ((tvp = union_lock_upper(un, p)) == NULLVP) {
748 		/*
749 		 * If the lower vnode is being opened for writing, then
750 		 * copy the file contents to the upper vnode and open that,
751 		 * otherwise can simply open the lower vnode.
752 		 */
753 		tvp = un->un_lowervp;
754 		if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
755 			int docopy = !(mode & O_TRUNC);
756 			error = union_copyup(un, docopy, cred, p);
757 			tvp = union_lock_upper(un, p);
758 		} else {
759 			un->un_openl++;
760 			VREF(tvp);
761 			vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p);
762 			tvpisupper = 0;
763 		}
764 	}
765 
766 	/*
767 	 * We are holding the correct vnode, open it
768 	 */
769 
770 	if (error == 0)
771 		error = VOP_OPEN(tvp, mode, cred, p);
772 
773 	/*
774 	 * Absolutely necessary or UFS will blowup
775 	 */
776         if (error == 0 && vn_canvmio(tvp) == TRUE) {
777                 error = vfs_object_create(tvp, p, cred);
778         }
779 
780 	/*
781 	 * Release any locks held
782 	 */
783 	if (tvpisupper) {
784 		if (tvp)
785 			union_unlock_upper(tvp, p);
786 	} else {
787 		vput(tvp);
788 	}
789 	return (error);
790 }
791 
792 /*
793  *	union_close:
794  *
795  *	It is unclear whether a_vp is passed locked or unlocked.  Whatever
796  *	the case we do not change it.
797  */
798 
799 static int
800 union_close(ap)
801 	struct vop_close_args /* {
802 		struct vnode *a_vp;
803 		int  a_fflag;
804 		struct ucred *a_cred;
805 		struct proc *a_p;
806 	} */ *ap;
807 {
808 	struct union_node *un = VTOUNION(ap->a_vp);
809 	struct vnode *vp;
810 
811 	if ((vp = un->un_uppervp) == NULLVP) {
812 #ifdef UNION_DIAGNOSTIC
813 		if (un->un_openl <= 0)
814 			panic("union: un_openl cnt");
815 #endif
816 		--un->un_openl;
817 		vp = un->un_lowervp;
818 	}
819 	ap->a_vp = vp;
820 	return (VCALL(vp, VOFFSET(vop_close), ap));
821 }
822 
823 /*
824  * Check access permission on the union vnode.
825  * The access check being enforced is to check
826  * against both the underlying vnode, and any
827  * copied vnode.  This ensures that no additional
828  * file permissions are given away simply because
829  * the user caused an implicit file copy.
830  */
831 static int
832 union_access(ap)
833 	struct vop_access_args /* {
834 		struct vnodeop_desc *a_desc;
835 		struct vnode *a_vp;
836 		int a_mode;
837 		struct ucred *a_cred;
838 		struct proc *a_p;
839 	} */ *ap;
840 {
841 	struct union_node *un = VTOUNION(ap->a_vp);
842 	struct proc *p = ap->a_p;
843 	int error = EACCES;
844 	struct vnode *vp;
845 
846 	/*
847 	 * Disallow write attempts on filesystems mounted read-only.
848 	 */
849 	if ((ap->a_mode & VWRITE) &&
850 	    (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
851 		switch (ap->a_vp->v_type) {
852 		case VREG:
853 		case VDIR:
854 		case VLNK:
855 			return (EROFS);
856 		default:
857 			break;
858 		}
859 	}
860 
861 	if ((vp = union_lock_upper(un, p)) != NULLVP) {
862 		ap->a_vp = vp;
863 		error = VCALL(vp, VOFFSET(vop_access), ap);
864 		union_unlock_upper(vp, p);
865 		return(error);
866 	}
867 
868 	if ((vp = un->un_lowervp) != NULLVP) {
869 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
870 		ap->a_vp = vp;
871 
872 		/*
873 		 * Remove VWRITE from a_mode if our mount point is RW, because
874 		 * we want to allow writes and lowervp may be read-only.
875 		 */
876 		if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
877 			ap->a_mode &= ~VWRITE;
878 
879 		error = VCALL(vp, VOFFSET(vop_access), ap);
880 		if (error == 0) {
881 			struct union_mount *um;
882 
883 			um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
884 
885 			if (um->um_op == UNMNT_BELOW) {
886 				ap->a_cred = um->um_cred;
887 				error = VCALL(vp, VOFFSET(vop_access), ap);
888 			}
889 		}
890 		VOP_UNLOCK(vp, 0, p);
891 	}
892 	return(error);
893 }
894 
895 /*
896  * We handle getattr only to change the fsid and
897  * track object sizes
898  *
899  * It's not clear whether VOP_GETATTR is to be
900  * called with the vnode locked or not.  stat() calls
901  * it with (vp) locked, and fstat calls it with
902  * (vp) unlocked.
903  *
904  * Because of this we cannot use our normal locking functions
905  * if we do not intend to lock the main a_vp node.  At the moment
906  * we are running without any specific locking at all, but beware
907  * to any programmer that care must be taken if locking is added
908  * to this function.
909  */
910 
911 static int
912 union_getattr(ap)
913 	struct vop_getattr_args /* {
914 		struct vnode *a_vp;
915 		struct vattr *a_vap;
916 		struct ucred *a_cred;
917 		struct proc *a_p;
918 	} */ *ap;
919 {
920 	int error;
921 	struct union_node *un = VTOUNION(ap->a_vp);
922 	struct vnode *vp;
923 	struct vattr *vap;
924 	struct vattr va;
925 
926 	/*
927 	 * Some programs walk the filesystem hierarchy by counting
928 	 * links to directories to avoid stat'ing all the time.
929 	 * This means the link count on directories needs to be "correct".
930 	 * The only way to do that is to call getattr on both layers
931 	 * and fix up the link count.  The link count will not necessarily
932 	 * be accurate but will be large enough to defeat the tree walkers.
933 	 */
934 
935 	vap = ap->a_vap;
936 
937 	if ((vp = un->un_uppervp) != NULLVP) {
938 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p);
939 		if (error)
940 			return (error);
941 		/* XXX isn't this dangerouso without a lock? */
942 		union_newsize(ap->a_vp, vap->va_size, VNOVAL);
943 	}
944 
945 	if (vp == NULLVP) {
946 		vp = un->un_lowervp;
947 	} else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
948 		vp = un->un_lowervp;
949 		vap = &va;
950 	} else {
951 		vp = NULLVP;
952 	}
953 
954 	if (vp != NULLVP) {
955 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p);
956 		if (error)
957 			return (error);
958 		/* XXX isn't this dangerous without a lock? */
959 		union_newsize(ap->a_vp, VNOVAL, vap->va_size);
960 	}
961 
962 	if ((vap != ap->a_vap) && (vap->va_type == VDIR))
963 		ap->a_vap->va_nlink += vap->va_nlink;
964 	return (0);
965 }
966 
967 static int
968 union_setattr(ap)
969 	struct vop_setattr_args /* {
970 		struct vnode *a_vp;
971 		struct vattr *a_vap;
972 		struct ucred *a_cred;
973 		struct proc *a_p;
974 	} */ *ap;
975 {
976 	struct union_node *un = VTOUNION(ap->a_vp);
977 	struct proc *p = ap->a_p;
978 	struct vattr *vap = ap->a_vap;
979 	struct vnode *uppervp;
980 	int error;
981 
982 	/*
983 	 * Disallow write attempts on filesystems mounted read-only.
984 	 */
985 	if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
986 	    (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
987 	     vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
988 	     vap->va_mtime.tv_sec != VNOVAL ||
989 	     vap->va_mode != (mode_t)VNOVAL)) {
990 		return (EROFS);
991 	}
992 
993 	/*
994 	 * Handle case of truncating lower object to zero size,
995 	 * by creating a zero length upper object.  This is to
996 	 * handle the case of open with O_TRUNC and O_CREAT.
997 	 */
998 	if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
999 		error = union_copyup(un, (ap->a_vap->va_size != 0),
1000 			    ap->a_cred, ap->a_p);
1001 		if (error)
1002 			return (error);
1003 	}
1004 
1005 	/*
1006 	 * Try to set attributes in upper layer,
1007 	 * otherwise return read-only filesystem error.
1008 	 */
1009 	error = EROFS;
1010 	if ((uppervp = union_lock_upper(un, p)) != NULLVP) {
1011 		error = VOP_SETATTR(un->un_uppervp, ap->a_vap,
1012 					ap->a_cred, ap->a_p);
1013 		if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
1014 			union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
1015 		union_unlock_upper(uppervp, p);
1016 	}
1017 	return (error);
1018 }
1019 
1020 /*
1021  *	union_getpages:
1022  */
1023 
1024 static int
1025 union_getpages(struct vop_getpages_args *ap)
1026 {
1027 	int r;
1028 
1029 	r = vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
1030 		ap->a_count, ap->a_reqpage);
1031 	return(r);
1032 }
1033 
1034 /*
1035  *	union_putpages:
1036  */
1037 
1038 static int
1039 union_putpages(struct vop_putpages_args *ap)
1040 {
1041 	int r;
1042 
1043 	r = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
1044 		ap->a_sync, ap->a_rtvals);
1045 	return(r);
1046 }
1047 
1048 static int
1049 union_read(ap)
1050 	struct vop_read_args /* {
1051 		struct vnode *a_vp;
1052 		struct uio *a_uio;
1053 		int  a_ioflag;
1054 		struct ucred *a_cred;
1055 	} */ *ap;
1056 {
1057 	struct union_node *un = VTOUNION(ap->a_vp);
1058 	struct proc *p = ap->a_uio->uio_procp;
1059 	struct vnode *uvp;
1060 	int error;
1061 
1062 	uvp = union_lock_other(un, p);
1063 	KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1064 
1065 	if (ap->a_vp->v_flag & VOBJBUF)
1066 		union_vm_coherency(ap->a_vp, ap->a_uio, 0);
1067 
1068 	error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1069 	union_unlock_other(uvp, p);
1070 
1071 	/*
1072 	 * XXX
1073 	 * perhaps the size of the underlying object has changed under
1074 	 * our feet.  take advantage of the offset information present
1075 	 * in the uio structure.
1076 	 */
1077 	if (error == 0) {
1078 		struct union_node *un = VTOUNION(ap->a_vp);
1079 		off_t cur = ap->a_uio->uio_offset;
1080 
1081 		if (uvp == un->un_uppervp) {
1082 			if (cur > un->un_uppersz)
1083 				union_newsize(ap->a_vp, cur, VNOVAL);
1084 		} else {
1085 			if (cur > un->un_lowersz)
1086 				union_newsize(ap->a_vp, VNOVAL, cur);
1087 		}
1088 	}
1089 	return (error);
1090 }
1091 
1092 static int
1093 union_write(ap)
1094 	struct vop_read_args /* {
1095 		struct vnode *a_vp;
1096 		struct uio *a_uio;
1097 		int  a_ioflag;
1098 		struct ucred *a_cred;
1099 	} */ *ap;
1100 {
1101 	struct union_node *un = VTOUNION(ap->a_vp);
1102 	struct proc *p = ap->a_uio->uio_procp;
1103 	struct vnode *uppervp;
1104 	int error;
1105 
1106 	if ((uppervp = union_lock_upper(un, p)) == NULLVP)
1107 		panic("union: missing upper layer in write");
1108 
1109 	/*
1110 	 * Since our VM pages are associated with our vnode rather then
1111 	 * the real vnode, and since we do not run our reads and writes
1112 	 * through our own VM cache, we have a VM/VFS coherency problem.
1113 	 * We solve them by invalidating or flushing the associated VM
1114 	 * pages prior to allowing a normal read or write to occur.
1115 	 *
1116 	 * VM-backed writes (UIO_NOCOPY) have to be converted to normal
1117 	 * writes because we are not cache-coherent.  Normal writes need
1118 	 * to be made coherent with our VM-backing store, which we do by
1119 	 * first flushing any dirty VM pages associated with the write
1120 	 * range, and then destroying any clean VM pages associated with
1121 	 * the write range.
1122 	 */
1123 
1124 	if (ap->a_uio->uio_segflg == UIO_NOCOPY) {
1125 		ap->a_uio->uio_segflg = UIO_SYSSPACE;
1126 	} else if (ap->a_vp->v_flag & VOBJBUF) {
1127 		union_vm_coherency(ap->a_vp, ap->a_uio, 1);
1128 	}
1129 
1130 	error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1131 
1132 	/*
1133 	 * the size of the underlying object may be changed by the
1134 	 * write.
1135 	 */
1136 	if (error == 0) {
1137 		off_t cur = ap->a_uio->uio_offset;
1138 
1139 		if (cur > un->un_uppersz)
1140 			union_newsize(ap->a_vp, cur, VNOVAL);
1141 	}
1142 	union_unlock_upper(uppervp, p);
1143 	return (error);
1144 }
1145 
1146 static int
1147 union_lease(ap)
1148 	struct vop_lease_args /* {
1149 		struct vnode *a_vp;
1150 		struct proc *a_p;
1151 		struct ucred *a_cred;
1152 		int a_flag;
1153 	} */ *ap;
1154 {
1155 	struct vnode *ovp = OTHERVP(ap->a_vp);
1156 
1157 	ap->a_vp = ovp;
1158 	return (VCALL(ovp, VOFFSET(vop_lease), ap));
1159 }
1160 
1161 static int
1162 union_ioctl(ap)
1163 	struct vop_ioctl_args /* {
1164 		struct vnode *a_vp;
1165 		int  a_command;
1166 		caddr_t  a_data;
1167 		int  a_fflag;
1168 		struct ucred *a_cred;
1169 		struct proc *a_p;
1170 	} */ *ap;
1171 {
1172 	struct vnode *ovp = OTHERVP(ap->a_vp);
1173 
1174 	ap->a_vp = ovp;
1175 	return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1176 }
1177 
1178 static int
1179 union_poll(ap)
1180 	struct vop_poll_args /* {
1181 		struct vnode *a_vp;
1182 		int  a_events;
1183 		struct ucred *a_cred;
1184 		struct proc *a_p;
1185 	} */ *ap;
1186 {
1187 	struct vnode *ovp = OTHERVP(ap->a_vp);
1188 
1189 	ap->a_vp = ovp;
1190 	return (VCALL(ovp, VOFFSET(vop_poll), ap));
1191 }
1192 
1193 static int
1194 union_revoke(ap)
1195 	struct vop_revoke_args /* {
1196 		struct vnode *a_vp;
1197 		int a_flags;
1198 		struct proc *a_p;
1199 	} */ *ap;
1200 {
1201 	struct vnode *vp = ap->a_vp;
1202 
1203 	if (UPPERVP(vp))
1204 		VOP_REVOKE(UPPERVP(vp), ap->a_flags);
1205 	if (LOWERVP(vp))
1206 		VOP_REVOKE(LOWERVP(vp), ap->a_flags);
1207 	vgone(vp);
1208 	return (0);
1209 }
1210 
1211 static int
1212 union_mmap(ap)
1213 	struct vop_mmap_args /* {
1214 		struct vnode *a_vp;
1215 		int  a_fflags;
1216 		struct ucred *a_cred;
1217 		struct proc *a_p;
1218 	} */ *ap;
1219 {
1220 	struct vnode *ovp = OTHERVP(ap->a_vp);
1221 
1222 	ap->a_vp = ovp;
1223 	return (VCALL(ovp, VOFFSET(vop_mmap), ap));
1224 }
1225 
1226 static int
1227 union_fsync(ap)
1228 	struct vop_fsync_args /* {
1229 		struct vnode *a_vp;
1230 		struct ucred *a_cred;
1231 		int  a_waitfor;
1232 		struct proc *a_p;
1233 	} */ *ap;
1234 {
1235 	int error = 0;
1236 	struct proc *p = ap->a_p;
1237 	struct vnode *targetvp;
1238 	struct union_node *un = VTOUNION(ap->a_vp);
1239 
1240 	if ((targetvp = union_lock_other(un, p)) != NULLVP) {
1241 		error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, p);
1242 		union_unlock_other(targetvp, p);
1243 	}
1244 
1245 	return (error);
1246 }
1247 
1248 /*
1249  *	union_remove:
1250  *
1251  *	Remove the specified cnp.  The dvp and vp are passed to us locked
1252  *	and must remain locked on return.
1253  */
1254 
1255 static int
1256 union_remove(ap)
1257 	struct vop_remove_args /* {
1258 		struct vnode *a_dvp;
1259 		struct vnode *a_vp;
1260 		struct componentname *a_cnp;
1261 	} */ *ap;
1262 {
1263 	struct union_node *dun = VTOUNION(ap->a_dvp);
1264 	struct union_node *un = VTOUNION(ap->a_vp);
1265 	struct componentname *cnp = ap->a_cnp;
1266 	struct proc *p = cnp->cn_proc;
1267 	struct vnode *uppervp;
1268 	struct vnode *upperdvp;
1269 	int error;
1270 
1271 	if ((upperdvp = union_lock_upper(dun, p)) == NULLVP)
1272 		panic("union remove: null upper vnode");
1273 
1274 	if ((uppervp = union_lock_upper(un, p)) != NULLVP) {
1275 		if (union_dowhiteout(un, cnp->cn_cred, p))
1276 			cnp->cn_flags |= DOWHITEOUT;
1277 		error = VOP_REMOVE(upperdvp, uppervp, cnp);
1278 #if 0
1279 		/* XXX */
1280 		if (!error)
1281 			union_removed_upper(un);
1282 #endif
1283 		union_unlock_upper(uppervp, p);
1284 	} else {
1285 		error = union_mkwhiteout(
1286 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1287 			    upperdvp, ap->a_cnp, un->un_path);
1288 	}
1289 	union_unlock_upper(upperdvp, p);
1290 	return (error);
1291 }
1292 
1293 /*
1294  *	union_link:
1295  *
1296  *	tdvp will be locked on entry, vp will not be locked on entry.
1297  *	tdvp should remain locked on return and vp should remain unlocked
1298  *	on return.
1299  */
1300 
1301 static int
1302 union_link(ap)
1303 	struct vop_link_args /* {
1304 		struct vnode *a_tdvp;
1305 		struct vnode *a_vp;
1306 		struct componentname *a_cnp;
1307 	} */ *ap;
1308 {
1309 	struct componentname *cnp = ap->a_cnp;
1310 	struct proc *p = cnp->cn_proc;
1311 	struct union_node *dun = VTOUNION(ap->a_tdvp);
1312 	struct vnode *vp;
1313 	struct vnode *tdvp;
1314 	int error = 0;
1315 
1316 	if (ap->a_tdvp->v_op != ap->a_vp->v_op) {
1317 		vp = ap->a_vp;
1318 	} else {
1319 		struct union_node *tun = VTOUNION(ap->a_vp);
1320 
1321 		if (tun->un_uppervp == NULLVP) {
1322 			vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p);
1323 #if 0
1324 			if (dun->un_uppervp == tun->un_dirvp) {
1325 				if (dun->un_flags & UN_ULOCK) {
1326 					dun->un_flags &= ~UN_ULOCK;
1327 					VOP_UNLOCK(dun->un_uppervp, 0, p);
1328 				}
1329 			}
1330 #endif
1331 			error = union_copyup(tun, 1, cnp->cn_cred, p);
1332 #if 0
1333 			if (dun->un_uppervp == tun->un_dirvp) {
1334 				vn_lock(dun->un_uppervp,
1335 					    LK_EXCLUSIVE | LK_RETRY, p);
1336 				dun->un_flags |= UN_ULOCK;
1337 			}
1338 #endif
1339 			VOP_UNLOCK(ap->a_vp, 0, p);
1340 		}
1341 		vp = tun->un_uppervp;
1342 	}
1343 
1344 	if (error)
1345 		return (error);
1346 
1347 	/*
1348 	 * Make sure upper is locked, then unlock the union directory we were
1349 	 * called with to avoid a deadlock while we are calling VOP_LINK on
1350 	 * the upper (with tdvp locked and vp not locked).  Our ap->a_tdvp
1351 	 * is expected to be locked on return.
1352 	 */
1353 
1354 	if ((tdvp = union_lock_upper(dun, p)) == NULLVP)
1355 		return (EROFS);
1356 
1357 	VOP_UNLOCK(ap->a_tdvp, 0, p);		/* unlock calling node */
1358 	error = VOP_LINK(tdvp, vp, cnp);	/* call link on upper */
1359 
1360 	/*
1361 	 * We have to unlock tdvp prior to relocking our calling node in
1362 	 * order to avoid a deadlock.
1363 	 */
1364 	union_unlock_upper(tdvp, p);
1365 	vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY, p);
1366 	return (error);
1367 }
1368 
1369 static int
1370 union_rename(ap)
1371 	struct vop_rename_args  /* {
1372 		struct vnode *a_fdvp;
1373 		struct vnode *a_fvp;
1374 		struct componentname *a_fcnp;
1375 		struct vnode *a_tdvp;
1376 		struct vnode *a_tvp;
1377 		struct componentname *a_tcnp;
1378 	} */ *ap;
1379 {
1380 	int error;
1381 	struct vnode *fdvp = ap->a_fdvp;
1382 	struct vnode *fvp = ap->a_fvp;
1383 	struct vnode *tdvp = ap->a_tdvp;
1384 	struct vnode *tvp = ap->a_tvp;
1385 
1386 	/*
1387 	 * Figure out what fdvp to pass to our upper or lower vnode.  If we
1388 	 * replace the fdvp, release the original one and ref the new one.
1389 	 */
1390 
1391 	if (fdvp->v_op == union_vnodeop_p) {	/* always true */
1392 		struct union_node *un = VTOUNION(fdvp);
1393 		if (un->un_uppervp == NULLVP) {
1394 			/*
1395 			 * this should never happen in normal
1396 			 * operation but might if there was
1397 			 * a problem creating the top-level shadow
1398 			 * directory.
1399 			 */
1400 			error = EXDEV;
1401 			goto bad;
1402 		}
1403 		fdvp = un->un_uppervp;
1404 		VREF(fdvp);
1405 		vrele(ap->a_fdvp);
1406 	}
1407 
1408 	/*
1409 	 * Figure out what fvp to pass to our upper or lower vnode.  If we
1410 	 * replace the fvp, release the original one and ref the new one.
1411 	 */
1412 
1413 	if (fvp->v_op == union_vnodeop_p) {	/* always true */
1414 		struct union_node *un = VTOUNION(fvp);
1415 #if 0
1416 		struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1417 #endif
1418 
1419 		if (un->un_uppervp == NULLVP) {
1420 			switch(fvp->v_type) {
1421 			case VREG:
1422 				vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_proc);
1423 				error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_proc);
1424 				VOP_UNLOCK(un->un_vnode, 0, ap->a_fcnp->cn_proc);
1425 				if (error)
1426 					goto bad;
1427 				break;
1428 			case VDIR:
1429 				/*
1430 				 * XXX not yet.
1431 				 *
1432 				 * There is only one way to rename a directory
1433 				 * based in the lowervp, and that is to copy
1434 				 * the entire directory hierarchy.  Otherwise
1435 				 * it would not last across a reboot.
1436 				 */
1437 #if 0
1438 				vrele(fvp);
1439 				fvp = NULL;
1440 				vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_proc);
1441 				error = union_mkshadow(um, fdvp,
1442 					    ap->a_fcnp, &un->un_uppervp);
1443 				VOP_UNLOCK(fdvp, 0, ap->a_fcnp->cn_proc);
1444 				if (un->un_uppervp)
1445 					VOP_UNLOCK(un->un_uppervp, 0, ap->a_fcnp->cn_proc);
1446 				if (error)
1447 					goto bad;
1448 				break;
1449 #endif
1450 			default:
1451 				error = EXDEV;
1452 				goto bad;
1453 			}
1454 		}
1455 
1456 		if (un->un_lowervp != NULLVP)
1457 			ap->a_fcnp->cn_flags |= DOWHITEOUT;
1458 		fvp = un->un_uppervp;
1459 		VREF(fvp);
1460 		vrele(ap->a_fvp);
1461 	}
1462 
1463 	/*
1464 	 * Figure out what tdvp (destination directory) to pass to the
1465 	 * lower level.  If we replace it with uppervp, we need to vput the
1466 	 * old one.  The exclusive lock is transfered to what we will pass
1467 	 * down in the VOP_RENAME and we replace uppervp with a simple
1468 	 * reference.
1469 	 */
1470 
1471 	if (tdvp->v_op == union_vnodeop_p) {
1472 		struct union_node *un = VTOUNION(tdvp);
1473 
1474 		if (un->un_uppervp == NULLVP) {
1475 			/*
1476 			 * this should never happen in normal
1477 			 * operation but might if there was
1478 			 * a problem creating the top-level shadow
1479 			 * directory.
1480 			 */
1481 			error = EXDEV;
1482 			goto bad;
1483 		}
1484 
1485 		/*
1486 		 * new tdvp is a lock and reference on uppervp, put away
1487 		 * the old tdvp.
1488 		 */
1489 		tdvp = union_lock_upper(un, ap->a_tcnp->cn_proc);
1490 		vput(ap->a_tdvp);
1491 	}
1492 
1493 	/*
1494 	 * Figure out what tvp (destination file) to pass to the
1495 	 * lower level.
1496 	 *
1497 	 * If the uppervp file does not exist put away the (wrong)
1498 	 * file and change tvp to NULL.
1499 	 */
1500 
1501 	if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) {
1502 		struct union_node *un = VTOUNION(tvp);
1503 
1504 		tvp = union_lock_upper(un, ap->a_tcnp->cn_proc);
1505 		vput(ap->a_tvp);
1506 		/* note: tvp may be NULL */
1507 	}
1508 
1509 	/*
1510 	 * VOP_RENAME releases/vputs prior to returning, so we have no
1511 	 * cleanup to do.
1512 	 */
1513 
1514 	return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1515 
1516 	/*
1517 	 * Error.  We still have to release / vput the various elements.
1518 	 */
1519 
1520 bad:
1521 	vrele(fdvp);
1522 	if (fvp)
1523 		vrele(fvp);
1524 	vput(tdvp);
1525 	if (tvp != NULLVP) {
1526 		if (tvp != tdvp)
1527 			vput(tvp);
1528 		else
1529 			vrele(tvp);
1530 	}
1531 	return (error);
1532 }
1533 
1534 static int
1535 union_mkdir(ap)
1536 	struct vop_mkdir_args /* {
1537 		struct vnode *a_dvp;
1538 		struct vnode **a_vpp;
1539 		struct componentname *a_cnp;
1540 		struct vattr *a_vap;
1541 	} */ *ap;
1542 {
1543 	struct union_node *dun = VTOUNION(ap->a_dvp);
1544 	struct componentname *cnp = ap->a_cnp;
1545 	struct proc *p = cnp->cn_proc;
1546 	struct vnode *upperdvp;
1547 	int error = EROFS;
1548 
1549 	if ((upperdvp = union_lock_upper(dun, p)) != NULLVP) {
1550 		struct vnode *vp;
1551 
1552 		error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
1553 		union_unlock_upper(upperdvp, p);
1554 
1555 		if (error == 0) {
1556 			VOP_UNLOCK(vp, 0, p);
1557 			UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vp->v_usecount));
1558 			error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1559 				ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1560 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
1561 		}
1562 	}
1563 	return (error);
1564 }
1565 
1566 static int
1567 union_rmdir(ap)
1568 	struct vop_rmdir_args /* {
1569 		struct vnode *a_dvp;
1570 		struct vnode *a_vp;
1571 		struct componentname *a_cnp;
1572 	} */ *ap;
1573 {
1574 	struct union_node *dun = VTOUNION(ap->a_dvp);
1575 	struct union_node *un = VTOUNION(ap->a_vp);
1576 	struct componentname *cnp = ap->a_cnp;
1577 	struct proc *p = cnp->cn_proc;
1578 	struct vnode *upperdvp;
1579 	struct vnode *uppervp;
1580 	int error;
1581 
1582 	if ((upperdvp = union_lock_upper(dun, p)) == NULLVP)
1583 		panic("union rmdir: null upper vnode");
1584 
1585 	if ((uppervp = union_lock_upper(un, p)) != NULLVP) {
1586 		if (union_dowhiteout(un, cnp->cn_cred, p))
1587 			cnp->cn_flags |= DOWHITEOUT;
1588 		error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
1589 		union_unlock_upper(uppervp, p);
1590 	} else {
1591 		error = union_mkwhiteout(
1592 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1593 			    dun->un_uppervp, ap->a_cnp, un->un_path);
1594 	}
1595 	union_unlock_upper(upperdvp, p);
1596 	return (error);
1597 }
1598 
1599 /*
1600  *	union_symlink:
1601  *
1602  *	dvp is locked on entry and remains locked on return.  a_vpp is garbage
1603  *	(unused).
1604  */
1605 
1606 static int
1607 union_symlink(ap)
1608 	struct vop_symlink_args /* {
1609 		struct vnode *a_dvp;
1610 		struct vnode **a_vpp;
1611 		struct componentname *a_cnp;
1612 		struct vattr *a_vap;
1613 		char *a_target;
1614 	} */ *ap;
1615 {
1616 	struct union_node *dun = VTOUNION(ap->a_dvp);
1617 	struct componentname *cnp = ap->a_cnp;
1618 	struct proc *p = cnp->cn_proc;
1619 	struct vnode *dvp;
1620 	int error = EROFS;
1621 
1622 	if ((dvp = union_lock_upper(dun, p)) != NULLVP) {
1623 		error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1624 			    ap->a_target);
1625 		union_unlock_upper(dvp, p);
1626 	}
1627 	return (error);
1628 }
1629 
1630 /*
1631  * union_readdir works in concert with getdirentries and
1632  * readdir(3) to provide a list of entries in the unioned
1633  * directories.  getdirentries is responsible for walking
1634  * down the union stack.  readdir(3) is responsible for
1635  * eliminating duplicate names from the returned data stream.
1636  */
1637 static int
1638 union_readdir(ap)
1639 	struct vop_readdir_args /* {
1640 		struct vnode *a_vp;
1641 		struct uio *a_uio;
1642 		struct ucred *a_cred;
1643 		int *a_eofflag;
1644 		u_long *a_cookies;
1645 		int a_ncookies;
1646 	} */ *ap;
1647 {
1648 	struct union_node *un = VTOUNION(ap->a_vp);
1649 	struct proc *p = ap->a_uio->uio_procp;
1650 	struct vnode *uvp;
1651 	int error = 0;
1652 
1653 	if ((uvp = union_lock_upper(un, p)) != NULLVP) {
1654 		ap->a_vp = uvp;
1655 		error = VCALL(uvp, VOFFSET(vop_readdir), ap);
1656 		union_unlock_upper(uvp, p);
1657 	}
1658 	return(error);
1659 }
1660 
1661 static int
1662 union_readlink(ap)
1663 	struct vop_readlink_args /* {
1664 		struct vnode *a_vp;
1665 		struct uio *a_uio;
1666 		struct ucred *a_cred;
1667 	} */ *ap;
1668 {
1669 	int error;
1670 	struct union_node *un = VTOUNION(ap->a_vp);
1671 	struct uio *uio = ap->a_uio;
1672 	struct proc *p = uio->uio_procp;
1673 	struct vnode *vp;
1674 
1675 	vp = union_lock_other(un, p);
1676 	KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1677 
1678 	ap->a_vp = vp;
1679 	error = VCALL(vp, VOFFSET(vop_readlink), ap);
1680 	union_unlock_other(vp, p);
1681 
1682 	return (error);
1683 }
1684 
1685 static int
1686 union_getwritemount(ap)
1687 	struct vop_getwritemount_args /* {
1688 		struct vnode *a_vp;
1689 		struct mount **a_mpp;
1690 	} */ *ap;
1691 {
1692 	struct vnode *vp = UPPERVP(ap->a_vp);
1693 
1694 	if (vp == NULL)
1695 		panic("union: missing upper layer in getwritemount");
1696 	return(VOP_GETWRITEMOUNT(vp, ap->a_mpp));
1697 }
1698 
1699 /*
1700  *	union_inactive:
1701  *
1702  *	Called with the vnode locked.  We are expected to unlock the vnode.
1703  */
1704 
1705 static int
1706 union_inactive(ap)
1707 	struct vop_inactive_args /* {
1708 		struct vnode *a_vp;
1709 		struct proc *a_p;
1710 	} */ *ap;
1711 {
1712 	struct vnode *vp = ap->a_vp;
1713 	struct proc *p = ap->a_p;
1714 	struct union_node *un = VTOUNION(vp);
1715 	struct vnode **vpp;
1716 
1717 	/*
1718 	 * Do nothing (and _don't_ bypass).
1719 	 * Wait to vrele lowervp until reclaim,
1720 	 * so that until then our union_node is in the
1721 	 * cache and reusable.
1722 	 *
1723 	 * NEEDSWORK: Someday, consider inactive'ing
1724 	 * the lowervp and then trying to reactivate it
1725 	 * with capabilities (v_id)
1726 	 * like they do in the name lookup cache code.
1727 	 * That's too much work for now.
1728 	 */
1729 
1730 	if (un->un_dircache != 0) {
1731 		for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1732 			vrele(*vpp);
1733 		free (un->un_dircache, M_TEMP);
1734 		un->un_dircache = 0;
1735 	}
1736 
1737 #if 0
1738 	if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1739 		un->un_flags &= ~UN_ULOCK;
1740 		VOP_UNLOCK(un->un_uppervp, 0, p);
1741 	}
1742 #endif
1743 
1744 	VOP_UNLOCK(vp, 0, p);
1745 
1746 	if ((un->un_flags & UN_CACHED) == 0)
1747 		vgone(vp);
1748 
1749 	return (0);
1750 }
1751 
1752 static int
1753 union_reclaim(ap)
1754 	struct vop_reclaim_args /* {
1755 		struct vnode *a_vp;
1756 	} */ *ap;
1757 {
1758 	union_freevp(ap->a_vp);
1759 
1760 	return (0);
1761 }
1762 
1763 static int
1764 union_lock(ap)
1765 	struct vop_lock_args *ap;
1766 {
1767 #if 0
1768 	struct vnode *vp = ap->a_vp;
1769 	struct proc *p = ap->a_p;
1770 	int flags = ap->a_flags;
1771 	struct union_node *un;
1772 #endif
1773 	int error;
1774 
1775 	error = vop_stdlock(ap);
1776 #if 0
1777 	un = VTOUNION(vp);
1778 
1779 	if (error == 0) {
1780 		/*
1781 		 * Lock the upper if it exists and this is an exclusive lock
1782 		 * request.
1783 		 */
1784 		if (un->un_uppervp != NULLVP &&
1785 		    (flags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1786 			if ((un->un_flags & UN_ULOCK) == 0 && vp->v_usecount) {
1787 				error = vn_lock(un->un_uppervp, flags, p);
1788 				if (error) {
1789 					struct vop_unlock_args uap = { 0 };
1790 					uap.a_vp = ap->a_vp;
1791 					uap.a_flags = ap->a_flags;
1792 					uap.a_p = ap->a_p;
1793 					vop_stdunlock(&uap);
1794 					return (error);
1795 				}
1796 				un->un_flags |= UN_ULOCK;
1797 			}
1798 		}
1799 	}
1800 #endif
1801 	return (error);
1802 }
1803 
1804 /*
1805  *	union_unlock:
1806  *
1807  *	Unlock our union node.  This also unlocks uppervp.
1808  */
1809 static int
1810 union_unlock(ap)
1811 	struct vop_unlock_args /* {
1812 		struct vnode *a_vp;
1813 		int a_flags;
1814 		struct proc *a_p;
1815 	} */ *ap;
1816 {
1817 	struct union_node *un = VTOUNION(ap->a_vp);
1818 	int error;
1819 
1820 	KASSERT((un->un_uppervp == NULL || un->un_uppervp->v_usecount > 0), ("uppervp usecount is 0"));
1821 
1822 	error = vop_stdunlock(ap);
1823 #if 0
1824 
1825 	/*
1826 	 * If no exclusive locks remain and we are holding an uppervp lock,
1827 	 * remove the uppervp lock.
1828 	 */
1829 
1830 	if ((un->un_flags & UN_ULOCK) &&
1831 	    lockstatus(&un->un_lock, NULL) != LK_EXCLUSIVE) {
1832 		un->un_flags &= ~UN_ULOCK;
1833 		VOP_UNLOCK(un->un_uppervp, LK_EXCLUSIVE, p);
1834 	}
1835 #endif
1836 	return(error);
1837 }
1838 
1839 /*
1840  *	union_bmap:
1841  *
1842  *	There isn't much we can do.  We cannot push through to the real vnode
1843  *	to get to the underlying device because this will bypass data
1844  *	cached by the real vnode.
1845  *
1846  *	For some reason we cannot return the 'real' vnode either, it seems
1847  *	to blow up memory maps.
1848  */
1849 
1850 static int
1851 union_bmap(ap)
1852 	struct vop_bmap_args /* {
1853 		struct vnode *a_vp;
1854 		daddr_t  a_bn;
1855 		struct vnode **a_vpp;
1856 		daddr_t *a_bnp;
1857 		int *a_runp;
1858 		int *a_runb;
1859 	} */ *ap;
1860 {
1861 	return(EOPNOTSUPP);
1862 }
1863 
1864 static int
1865 union_print(ap)
1866 	struct vop_print_args /* {
1867 		struct vnode *a_vp;
1868 	} */ *ap;
1869 {
1870 	struct vnode *vp = ap->a_vp;
1871 
1872 	printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1873 			vp, UPPERVP(vp), LOWERVP(vp));
1874 	if (UPPERVP(vp) != NULLVP)
1875 		vprint("union: upper", UPPERVP(vp));
1876 	if (LOWERVP(vp) != NULLVP)
1877 		vprint("union: lower", LOWERVP(vp));
1878 
1879 	return (0);
1880 }
1881 
1882 static int
1883 union_pathconf(ap)
1884 	struct vop_pathconf_args /* {
1885 		struct vnode *a_vp;
1886 		int a_name;
1887 		int *a_retval;
1888 	} */ *ap;
1889 {
1890 	int error;
1891 	struct proc *p = curproc;		/* XXX */
1892 	struct union_node *un = VTOUNION(ap->a_vp);
1893 	struct vnode *vp;
1894 
1895 	vp = union_lock_other(un, p);
1896 	KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1897 
1898 	ap->a_vp = vp;
1899 	error = VCALL(vp, VOFFSET(vop_pathconf), ap);
1900 	union_unlock_other(vp, p);
1901 
1902 	return (error);
1903 }
1904 
1905 static int
1906 union_advlock(ap)
1907 	struct vop_advlock_args /* {
1908 		struct vnode *a_vp;
1909 		caddr_t  a_id;
1910 		int  a_op;
1911 		struct flock *a_fl;
1912 		int  a_flags;
1913 	} */ *ap;
1914 {
1915 	register struct vnode *ovp = OTHERVP(ap->a_vp);
1916 
1917 	ap->a_vp = ovp;
1918 	return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1919 }
1920 
1921 
1922 /*
1923  * XXX - vop_strategy must be hand coded because it has no
1924  * YYY - and it is not coherent with anything
1925  *
1926  * vnode in its arguments.
1927  * This goes away with a merged VM/buffer cache.
1928  */
1929 static int
1930 union_strategy(ap)
1931 	struct vop_strategy_args /* {
1932 		struct vnode *a_vp;
1933 		struct buf *a_bp;
1934 	} */ *ap;
1935 {
1936 	struct buf *bp = ap->a_bp;
1937 	struct vnode *othervp = OTHERVP(bp->b_vp);
1938 
1939 #ifdef DIAGNOSTIC
1940 	if (othervp == NULLVP)
1941 		panic("union_strategy: nil vp");
1942 	if ((bp->b_iocmd == BIO_WRITE) &&
1943 	    (othervp == LOWERVP(bp->b_vp)))
1944 		panic("union_strategy: writing to lowervp");
1945 #endif
1946 	return (VOP_STRATEGY(othervp, bp));
1947 }
1948 
1949 /*
1950  * Global vfs data structures
1951  */
1952 vop_t **union_vnodeop_p;
1953 static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
1954 	{ &vop_default_desc,		(vop_t *) vop_defaultop },
1955 	{ &vop_access_desc,		(vop_t *) union_access },
1956 	{ &vop_advlock_desc,		(vop_t *) union_advlock },
1957 	{ &vop_bmap_desc,		(vop_t *) union_bmap },
1958 	{ &vop_close_desc,		(vop_t *) union_close },
1959 	{ &vop_create_desc,		(vop_t *) union_create },
1960 	{ &vop_fsync_desc,		(vop_t *) union_fsync },
1961 	{ &vop_getpages_desc,		(vop_t *) union_getpages },
1962 	{ &vop_putpages_desc,		(vop_t *) union_putpages },
1963 	{ &vop_getattr_desc,		(vop_t *) union_getattr },
1964 	{ &vop_inactive_desc,		(vop_t *) union_inactive },
1965 	{ &vop_ioctl_desc,		(vop_t *) union_ioctl },
1966 	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
1967 	{ &vop_lease_desc,		(vop_t *) union_lease },
1968 	{ &vop_link_desc,		(vop_t *) union_link },
1969 	{ &vop_lock_desc,		(vop_t *) union_lock },
1970 	{ &vop_lookup_desc,		(vop_t *) union_lookup },
1971 	{ &vop_mkdir_desc,		(vop_t *) union_mkdir },
1972 	{ &vop_mknod_desc,		(vop_t *) union_mknod },
1973 	{ &vop_mmap_desc,		(vop_t *) union_mmap },
1974 	{ &vop_open_desc,		(vop_t *) union_open },
1975 	{ &vop_pathconf_desc,		(vop_t *) union_pathconf },
1976 	{ &vop_poll_desc,		(vop_t *) union_poll },
1977 	{ &vop_print_desc,		(vop_t *) union_print },
1978 	{ &vop_read_desc,		(vop_t *) union_read },
1979 	{ &vop_readdir_desc,		(vop_t *) union_readdir },
1980 	{ &vop_readlink_desc,		(vop_t *) union_readlink },
1981 	{ &vop_getwritemount_desc,	(vop_t *) union_getwritemount },
1982 	{ &vop_reclaim_desc,		(vop_t *) union_reclaim },
1983 	{ &vop_remove_desc,		(vop_t *) union_remove },
1984 	{ &vop_rename_desc,		(vop_t *) union_rename },
1985 	{ &vop_revoke_desc,		(vop_t *) union_revoke },
1986 	{ &vop_rmdir_desc,		(vop_t *) union_rmdir },
1987 	{ &vop_setattr_desc,		(vop_t *) union_setattr },
1988 	{ &vop_strategy_desc,		(vop_t *) union_strategy },
1989 	{ &vop_symlink_desc,		(vop_t *) union_symlink },
1990 	{ &vop_unlock_desc,		(vop_t *) union_unlock },
1991 	{ &vop_whiteout_desc,		(vop_t *) union_whiteout },
1992 	{ &vop_write_desc,		(vop_t *) union_write },
1993 	{ NULL, NULL }
1994 };
1995 static struct vnodeopv_desc union_vnodeop_opv_desc =
1996 	{ &union_vnodeop_p, union_vnodeop_entries };
1997 
1998 VNODEOP_SET(union_vnodeop_opv_desc);
1999