xref: /freebsd/sys/fs/unionfs/union_vnops.c (revision daf1cffce2e07931f27c6c6998652e90df6ba87e)
1 /*
2  * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3  * Copyright (c) 1992, 1993, 1994, 1995
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)union_vnops.c	8.32 (Berkeley) 6/23/95
38  * $FreeBSD$
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/fcntl.h>
45 #include <sys/stat.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/malloc.h>
51 #include <sys/buf.h>
52 #include <sys/lock.h>
53 #include <sys/sysctl.h>
54 #include <miscfs/union/union.h>
55 
56 #include <vm/vm.h>
57 #include <vm/vnode_pager.h>
58 
59 #include <vm/vm_page.h>
60 #include <vm/vm_object.h>
61 
62 int uniondebug = 0;
63 
64 #if UDEBUG_ENABLED
65 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
66 #else
67 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
68 #endif
69 
70 static int	union_access __P((struct vop_access_args *ap));
71 static int	union_advlock __P((struct vop_advlock_args *ap));
72 static int	union_bmap __P((struct vop_bmap_args *ap));
73 static int	union_close __P((struct vop_close_args *ap));
74 static int	union_create __P((struct vop_create_args *ap));
75 static int	union_fsync __P((struct vop_fsync_args *ap));
76 static int	union_getattr __P((struct vop_getattr_args *ap));
77 static int	union_inactive __P((struct vop_inactive_args *ap));
78 static int	union_ioctl __P((struct vop_ioctl_args *ap));
79 static int	union_lease __P((struct vop_lease_args *ap));
80 static int	union_link __P((struct vop_link_args *ap));
81 static int	union_lock __P((struct vop_lock_args *ap));
82 static int	union_lookup __P((struct vop_lookup_args *ap));
83 static int	union_lookup1 __P((struct vnode *udvp, struct vnode **dvp,
84 				   struct vnode **vpp,
85 				   struct componentname *cnp));
86 static int	union_mkdir __P((struct vop_mkdir_args *ap));
87 static int	union_mknod __P((struct vop_mknod_args *ap));
88 static int	union_mmap __P((struct vop_mmap_args *ap));
89 static int	union_open __P((struct vop_open_args *ap));
90 static int	union_pathconf __P((struct vop_pathconf_args *ap));
91 static int	union_print __P((struct vop_print_args *ap));
92 static int	union_read __P((struct vop_read_args *ap));
93 static int	union_readdir __P((struct vop_readdir_args *ap));
94 static int	union_readlink __P((struct vop_readlink_args *ap));
95 static int	union_reclaim __P((struct vop_reclaim_args *ap));
96 static int	union_remove __P((struct vop_remove_args *ap));
97 static int	union_rename __P((struct vop_rename_args *ap));
98 static int	union_revoke __P((struct vop_revoke_args *ap));
99 static int	union_rmdir __P((struct vop_rmdir_args *ap));
100 static int	union_poll __P((struct vop_poll_args *ap));
101 static int	union_setattr __P((struct vop_setattr_args *ap));
102 static int	union_strategy __P((struct vop_strategy_args *ap));
103 static int	union_getpages __P((struct vop_getpages_args *ap));
104 static int	union_putpages __P((struct vop_putpages_args *ap));
105 static int	union_symlink __P((struct vop_symlink_args *ap));
106 static int	union_unlock __P((struct vop_unlock_args *ap));
107 static int	union_whiteout __P((struct vop_whiteout_args *ap));
108 static int	union_write __P((struct vop_read_args *ap));
109 
110 static __inline
111 struct vnode *
112 union_lock_upper(struct union_node *un, struct proc *p)
113 {
114 	struct vnode *uppervp;
115 
116 	if ((uppervp = un->un_uppervp) != NULL) {
117 		VREF(uppervp);
118 		vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
119 	}
120 	KASSERT((uppervp == NULL || uppervp->v_usecount > 0), ("uppervp usecount is 0"));
121 	return(uppervp);
122 }
123 
124 static __inline
125 void
126 union_unlock_upper(struct vnode *uppervp, struct proc *p)
127 {
128 	vput(uppervp);
129 }
130 
131 static __inline
132 struct vnode *
133 union_lock_other(struct union_node *un, struct proc *p)
134 {
135 	struct vnode *vp;
136 
137 	if (un->un_uppervp != NULL) {
138 		vp = union_lock_upper(un, p);
139 	} else if ((vp = un->un_lowervp) != NULL) {
140 		VREF(vp);
141 		vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
142 	}
143 	return(vp);
144 }
145 
146 static __inline
147 void
148 union_unlock_other(struct vnode *vp, struct proc *p)
149 {
150 	vput(vp);
151 }
152 
153 /*
154  *	union_lookup:
155  *
156  *	udvp	must be exclusively locked on call and will remain
157  *		exclusively locked on return.  This is the mount point
158  *		for out filesystem.
159  *
160  *	dvp	Our base directory, locked and referenced.
161  *		The passed dvp will be dereferenced and unlocked on return
162  *		and a new dvp will be returned which is locked and
163  *		referenced in the same variable.
164  *
165  *	vpp	is filled in with the result if no error occured,
166  *		locked and ref'd.
167  *
168  *		If an error is returned, *vpp is set to NULLVP.  If no
169  *		error occurs, *vpp is returned with a reference and an
170  *		exclusive lock.
171  */
172 
173 static int
174 union_lookup1(udvp, pdvp, vpp, cnp)
175 	struct vnode *udvp;
176 	struct vnode **pdvp;
177 	struct vnode **vpp;
178 	struct componentname *cnp;
179 {
180 	int error;
181 	struct proc *p = cnp->cn_proc;
182 	struct vnode *dvp = *pdvp;
183 	struct vnode *tdvp;
184 	struct mount *mp;
185 
186 	/*
187 	 * If stepping up the directory tree, check for going
188 	 * back across the mount point, in which case do what
189 	 * lookup would do by stepping back down the mount
190 	 * hierarchy.
191 	 */
192 	if (cnp->cn_flags & ISDOTDOT) {
193 		while ((dvp != udvp) && (dvp->v_flag & VROOT)) {
194 			/*
195 			 * Don't do the NOCROSSMOUNT check
196 			 * at this level.  By definition,
197 			 * union fs deals with namespaces, not
198 			 * filesystems.
199 			 */
200 			tdvp = dvp;
201 			dvp = dvp->v_mount->mnt_vnodecovered;
202 			VREF(dvp);
203 			vput(tdvp);
204 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
205 		}
206 	}
207 
208 	/*
209 	 * Set return dvp to be the upperdvp 'parent directory.
210 	 */
211 	*pdvp = dvp;
212 
213 	/*
214 	 * If the VOP_LOOKUP call generates an error, tdvp is invalid and no
215 	 * changes will have been made to dvp, so we are set to return.
216 	 */
217 
218         error = VOP_LOOKUP(dvp, &tdvp, cnp);
219 	if (error) {
220 		UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
221 		*vpp = NULL;
222 		return (error);
223 	}
224 
225 	/*
226 	 * The parent directory will have been unlocked, unless lookup
227 	 * found the last component or if dvp == tdvp (tdvp must be locked).
228 	 *
229 	 * We want our dvp to remain locked and ref'd.  We also want tdvp
230 	 * to remain locked and ref'd.
231 	 */
232 	UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
233 
234 	if (dvp != tdvp && (cnp->cn_flags & ISLASTCN) == 0)
235 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
236 
237 	/*
238 	 * Lastly check if the current node is a mount point in
239 	 * which case walk up the mount hierarchy making sure not to
240 	 * bump into the root of the mount tree (ie. dvp != udvp).
241 	 *
242 	 * We use dvp as a temporary variable here, it is no longer related
243 	 * to the dvp above.  However, we have to ensure that both *pdvp and
244 	 * tdvp are locked on return.
245 	 */
246 
247 	dvp = tdvp;
248 	while (
249 	    dvp != udvp &&
250 	    (dvp->v_type == VDIR) &&
251 	    (mp = dvp->v_mountedhere)
252 	) {
253 		int relock_pdvp = 0;
254 
255 		if (vfs_busy(mp, 0, 0, p))
256 			continue;
257 
258 		if (dvp == *pdvp)
259 			relock_pdvp = 1;
260 		vput(dvp);
261 		dvp = NULL;
262 		error = VFS_ROOT(mp, &dvp);
263 
264 		vfs_unbusy(mp, p);
265 
266 		if (relock_pdvp)
267 			vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY, p);
268 
269 		if (error) {
270 			*vpp = NULL;
271 			return (error);
272 		}
273 	}
274 	*vpp = dvp;
275 	return (0);
276 }
277 
278 static int
279 union_lookup(ap)
280 	struct vop_lookup_args /* {
281 		struct vnodeop_desc *a_desc;
282 		struct vnode *a_dvp;
283 		struct vnode **a_vpp;
284 		struct componentname *a_cnp;
285 	} */ *ap;
286 {
287 	int error;
288 	int uerror, lerror;
289 	struct vnode *uppervp, *lowervp;
290 	struct vnode *upperdvp, *lowerdvp;
291 	struct vnode *dvp = ap->a_dvp;		/* starting dir */
292 	struct union_node *dun = VTOUNION(dvp);	/* associated union node */
293 	struct componentname *cnp = ap->a_cnp;
294 	struct proc *p = cnp->cn_proc;
295 	int lockparent = cnp->cn_flags & LOCKPARENT;
296 	struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
297 	struct ucred *saved_cred = NULL;
298 	int iswhiteout;
299 	struct vattr va;
300 
301 	*ap->a_vpp = NULLVP;
302 
303 	/*
304 	 * Disallow write attemps to the filesystem mounted read-only.
305 	 */
306 	if ((cnp->cn_flags & ISLASTCN) &&
307 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
308 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
309 		return (EROFS);
310 	}
311 
312 	/*
313 	 * For any lookup's we do, always return with the parent locked
314 	 */
315 	cnp->cn_flags |= LOCKPARENT;
316 
317 	lowerdvp = dun->un_lowervp;
318 	uppervp = NULLVP;
319 	lowervp = NULLVP;
320 	iswhiteout = 0;
321 
322 	uerror = ENOENT;
323 	lerror = ENOENT;
324 
325 	/*
326 	 * Get a private lock on uppervp and a reference, effectively
327 	 * taking it out of the union_node's control.
328 	 *
329 	 * We must lock upperdvp while holding our lock on dvp
330 	 * to avoid a deadlock.
331 	 */
332 	upperdvp = union_lock_upper(dun, p);
333 
334 	/*
335 	 * do the lookup in the upper level.
336 	 * if that level comsumes additional pathnames,
337 	 * then assume that something special is going
338 	 * on and just return that vnode.
339 	 */
340 	if (upperdvp != NULLVP) {
341 		/*
342 		 * We do not have to worry about the DOTDOT case, we've
343 		 * already unlocked dvp.
344 		 */
345 		UDEBUG(("A %p\n", upperdvp));
346 
347 		/*
348 		 * Do the lookup.   We must supply a locked and referenced
349 		 * upperdvp to the function and will get a new locked and
350 		 * referenced upperdvp back with the old having been
351 		 * dereferenced.
352 		 *
353 		 * If an error is returned, uppervp will be NULLVP.  If no
354 		 * error occurs, uppervp will be the locked and referenced
355 		 * return vnode or possibly NULL, depending on what is being
356 		 * requested.  It is possible that the returned uppervp
357 		 * will be the same as upperdvp.
358 		 */
359 		uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
360 		UDEBUG((
361 		    "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
362 		    uerror,
363 		    upperdvp,
364 		    upperdvp->v_usecount,
365 		    VOP_ISLOCKED(upperdvp, NULL),
366 		    uppervp,
367 		    (uppervp ? uppervp->v_usecount : -99),
368 		    (uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99)
369 		));
370 
371 		/*
372 		 * Disallow write attemps to the filesystem mounted read-only.
373 		 */
374 		if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) &&
375 		    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
376 		    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) {
377 			error = EROFS;
378 			goto out;
379 		}
380 
381 		/*
382 		 * Special case.  If cn_consume != 0 skip out.  The result
383 		 * of the lookup is transfered to our return variable.  If
384 		 * an error occured we have to throw away the results.
385 		 */
386 
387 		if (cnp->cn_consume != 0) {
388 			if ((error = uerror) == 0) {
389 				*ap->a_vpp = uppervp;
390 				uppervp = NULL;
391 			}
392 			goto out;
393 		}
394 
395 		/*
396 		 * Calculate whiteout, fall through
397 		 */
398 
399 		if (uerror == ENOENT || uerror == EJUSTRETURN) {
400 			if (cnp->cn_flags & ISWHITEOUT) {
401 				iswhiteout = 1;
402 			} else if (lowerdvp != NULLVP) {
403 				int terror;
404 
405 				terror = VOP_GETATTR(upperdvp, &va,
406 					cnp->cn_cred, cnp->cn_proc);
407 				if (terror == 0 && (va.va_flags & OPAQUE))
408 					iswhiteout = 1;
409 			}
410 		}
411 	}
412 
413 	/*
414 	 * in a similar way to the upper layer, do the lookup
415 	 * in the lower layer.   this time, if there is some
416 	 * component magic going on, then vput whatever we got
417 	 * back from the upper layer and return the lower vnode
418 	 * instead.
419 	 */
420 
421 	if (lowerdvp != NULLVP && !iswhiteout) {
422 		int nameiop;
423 
424 		UDEBUG(("B %p\n", lowerdvp));
425 
426 		/*
427 		 * Force only LOOKUPs on the lower node, since
428 		 * we won't be making changes to it anyway.
429 		 */
430 		nameiop = cnp->cn_nameiop;
431 		cnp->cn_nameiop = LOOKUP;
432 		if (um->um_op == UNMNT_BELOW) {
433 			saved_cred = cnp->cn_cred;
434 			cnp->cn_cred = um->um_cred;
435 		}
436 
437 		/*
438 		 * We shouldn't have to worry about locking interactions
439 		 * between the lower layer and our union layer (w.r.t.
440 		 * `..' processing) because we don't futz with lowervp
441 		 * locks in the union-node instantiation code path.
442 		 *
443 		 * union_lookup1() requires lowervp to be locked on entry,
444 		 * and it will be unlocked on return.  The ref count will
445 		 * not change.  On return lowervp doesn't represent anything
446 		 * to us so we NULL it out.
447 		 */
448 		VREF(lowerdvp);
449 		vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, p);
450 		lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
451 		if (lowerdvp == lowervp)
452 			vrele(lowerdvp);
453 		else
454 			vput(lowerdvp);
455 		lowerdvp = NULL;	/* lowerdvp invalid after vput */
456 
457 		if (um->um_op == UNMNT_BELOW)
458 			cnp->cn_cred = saved_cred;
459 		cnp->cn_nameiop = nameiop;
460 
461 		if (cnp->cn_consume != 0 || lerror == EACCES) {
462 			if ((error = lerror) == 0) {
463 				*ap->a_vpp = lowervp;
464 				lowervp = NULL;
465 			}
466 			goto out;
467 		}
468 	} else {
469 		UDEBUG(("C %p\n", lowerdvp));
470 		if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
471 			if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
472 				VREF(lowervp);
473 				vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, p);
474 				lerror = 0;
475 			}
476 		}
477 	}
478 
479 	/*
480 	 * Ok.  Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
481 	 *
482 	 * 1. If both layers returned an error, select the upper layer.
483 	 *
484 	 * 2. If the upper layer faile and the bottom layer succeeded,
485 	 *    two subcases occur:
486 	 *
487 	 *	a.  The bottom vnode is not a directory, in which case
488 	 *	    just return a new union vnode referencing an
489 	 *	    empty top layer and the existing bottom layer.
490 	 *
491 	 *	b.  The button vnode is a directory, in which case
492 	 *	    create a new directory in the top layer and
493 	 *	    and fall through to case 3.
494 	 *
495 	 * 3. If the top layer succeeded then return a new union
496 	 *    vnode referencing whatever the new top layer and
497 	 *    whatever the bottom layer returned.
498 	 */
499 
500 	/* case 1. */
501 	if ((uerror != 0) && (lerror != 0)) {
502 		error = uerror;
503 		goto out;
504 	}
505 
506 	/* case 2. */
507 	if (uerror != 0 /* && (lerror == 0) */ ) {
508 		if (lowervp->v_type == VDIR) { /* case 2b. */
509 			KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
510 			/*
511 			 * oops, uppervp has a problem, we may have to shadow.
512 			 */
513 			uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
514 			if (uerror) {
515 				error = uerror;
516 				goto out;
517 			}
518 		}
519 	}
520 
521 	/*
522 	 * Must call union_allocvp with both the upper and lower vnodes
523 	 * referenced and the upper vnode locked.   ap->a_vpp is returned
524 	 * referenced and locked.  lowervp, uppervp, and upperdvp are
525 	 * absorbed by union_allocvp() whether it succeeds or fails.
526 	 *
527 	 * upperdvp is the parent directory of uppervp which may be
528 	 * different, depending on the path, from dvp->un_uppervp.  That's
529 	 * why it is a separate argument.  Note that it must be unlocked.
530 	 *
531 	 * dvp must be locked on entry to the call and will be locked on
532 	 * return.
533 	 */
534 
535 	if (uppervp && uppervp != upperdvp)
536 		VOP_UNLOCK(uppervp, 0, p);
537 	if (lowervp)
538 		VOP_UNLOCK(lowervp, 0, p);
539 	if (upperdvp)
540 		VOP_UNLOCK(upperdvp, 0, p);
541 
542 	error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
543 			      uppervp, lowervp, 1);
544 
545 	UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? ((*ap->a_vpp)->v_usecount) : -99));
546 
547 	uppervp = NULL;
548 	upperdvp = NULL;
549 	lowervp = NULL;
550 
551 	/*
552 	 *	Termination Code
553 	 *
554 	 *	- put away any extra junk laying around.  Note that lowervp
555 	 *	  (if not NULL) will never be the same as *ap->a_vp and
556 	 *	  neither will uppervp, because when we set that state we
557 	 *	  NULL-out lowervp or uppervp.  On the otherhand, upperdvp
558 	 *	  may match uppervp or *ap->a_vpp.
559 	 *
560 	 *	- relock/unlock dvp if appropriate.
561 	 */
562 
563 out:
564 	if (upperdvp) {
565 		if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
566 			vrele(upperdvp);
567 		else
568 			vput(upperdvp);
569 	}
570 
571 	if (uppervp)
572 		vput(uppervp);
573 
574 	if (lowervp)
575 		vput(lowervp);
576 
577 	/*
578 	 * Restore LOCKPARENT state
579 	 */
580 
581 	if (!lockparent)
582 		cnp->cn_flags &= ~LOCKPARENT;
583 
584 	UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
585 		((*ap->a_vpp) ? (*ap->a_vpp)->v_usecount : -99),
586 		lowervp, uppervp));
587 
588 	/*
589 	 * dvp lock state, determine whether to relock dvp.  dvp is expected
590 	 * to be locked on return if:
591 	 *
592 	 *	- there was an error (except not EJUSTRETURN), or
593 	 *	- we hit the last component and lockparent is true
594 	 *
595 	 * dvp_is_locked is the current state of the dvp lock, not counting
596 	 * the possibility that *ap->a_vpp == dvp (in which case it is locked
597 	 * anyway).  Note that *ap->a_vpp == dvp only if no error occured.
598 	 */
599 
600 	if (*ap->a_vpp != dvp) {
601 		if ((error == 0 || error == EJUSTRETURN) &&
602 		    (!lockparent || (cnp->cn_flags & ISLASTCN) == 0)) {
603 			VOP_UNLOCK(dvp, 0, p);
604 		}
605 	}
606 
607 	/*
608 	 * Diagnostics
609 	 */
610 
611 #ifdef DIAGNOSTIC
612 	if (cnp->cn_namelen == 1 &&
613 	    cnp->cn_nameptr[0] == '.' &&
614 	    *ap->a_vpp != dvp) {
615 		panic("union_lookup returning . (%p) not same as startdir (%p)", ap->a_vpp, dvp);
616 	}
617 #endif
618 
619 	return (error);
620 }
621 
622 /*
623  * 	union_create:
624  *
625  * a_dvp is locked on entry and remains locked on return.  a_vpp is returned
626  * locked if no error occurs, otherwise it is garbage.
627  */
628 
629 static int
630 union_create(ap)
631 	struct vop_create_args /* {
632 		struct vnode *a_dvp;
633 		struct vnode **a_vpp;
634 		struct componentname *a_cnp;
635 		struct vattr *a_vap;
636 	} */ *ap;
637 {
638 	struct union_node *dun = VTOUNION(ap->a_dvp);
639 	struct componentname *cnp = ap->a_cnp;
640 	struct proc *p = cnp->cn_proc;
641 	struct vnode *dvp;
642 	int error = EROFS;
643 
644 	if ((dvp = union_lock_upper(dun, p)) != NULL) {
645 		struct vnode *vp;
646 		struct mount *mp;
647 
648 		error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
649 		if (error == 0) {
650 			mp = ap->a_dvp->v_mount;
651 			VOP_UNLOCK(vp, 0, p);
652 			UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vp->v_usecount));
653 			error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
654 				cnp, vp, NULLVP, 1);
655 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
656 		}
657 		union_unlock_upper(dvp, p);
658 	}
659 	return (error);
660 }
661 
662 static int
663 union_whiteout(ap)
664 	struct vop_whiteout_args /* {
665 		struct vnode *a_dvp;
666 		struct componentname *a_cnp;
667 		int a_flags;
668 	} */ *ap;
669 {
670 	struct union_node *un = VTOUNION(ap->a_dvp);
671 	struct componentname *cnp = ap->a_cnp;
672 	struct vnode *uppervp;
673 	int error = EOPNOTSUPP;
674 
675 	if ((uppervp = union_lock_upper(un, cnp->cn_proc)) != NULLVP) {
676 		error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
677 		union_unlock_upper(uppervp, cnp->cn_proc);
678 	}
679 	return(error);
680 }
681 
682 /*
683  * 	union_mknod:
684  *
685  *	a_dvp is locked on entry and should remain locked on return.
686  *	a_vpp is garbagre whether an error occurs or not.
687  */
688 
689 static int
690 union_mknod(ap)
691 	struct vop_mknod_args /* {
692 		struct vnode *a_dvp;
693 		struct vnode **a_vpp;
694 		struct componentname *a_cnp;
695 		struct vattr *a_vap;
696 	} */ *ap;
697 {
698 	struct union_node *dun = VTOUNION(ap->a_dvp);
699 	struct componentname *cnp = ap->a_cnp;
700 	struct vnode *dvp;
701 	int error = EROFS;
702 
703 	if ((dvp = union_lock_upper(dun, cnp->cn_proc)) != NULL) {
704 		error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap);
705 		union_unlock_upper(dvp, cnp->cn_proc);
706 	}
707 	return (error);
708 }
709 
710 /*
711  *	union_open:
712  *
713  *	run open VOP.  When opening the underlying vnode we have to mimic
714  *	vn_open.  What we *really* need to do to avoid screwups if the
715  *	open semantics change is to call vn_open().  For example, ufs blows
716  *	up if you open a file but do not vmio it prior to writing.
717  */
718 
719 static int
720 union_open(ap)
721 	struct vop_open_args /* {
722 		struct vnodeop_desc *a_desc;
723 		struct vnode *a_vp;
724 		int a_mode;
725 		struct ucred *a_cred;
726 		struct proc *a_p;
727 	} */ *ap;
728 {
729 	struct union_node *un = VTOUNION(ap->a_vp);
730 	struct vnode *tvp;
731 	int mode = ap->a_mode;
732 	struct ucred *cred = ap->a_cred;
733 	struct proc *p = ap->a_p;
734 	int error = 0;
735 	int tvpisupper = 1;
736 
737 	/*
738 	 * If there is an existing upper vp then simply open that.
739 	 * The upper vp takes precedence over the lower vp.  When opening
740 	 * a lower vp for writing copy it to the uppervp and then open the
741 	 * uppervp.
742 	 *
743 	 * At the end of this section tvp will be left locked.
744 	 */
745 	if ((tvp = union_lock_upper(un, p)) == NULLVP) {
746 		/*
747 		 * If the lower vnode is being opened for writing, then
748 		 * copy the file contents to the upper vnode and open that,
749 		 * otherwise can simply open the lower vnode.
750 		 */
751 		tvp = un->un_lowervp;
752 		if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
753 			int docopy = !(mode & O_TRUNC);
754 			error = union_copyup(un, docopy, cred, p);
755 			tvp = union_lock_upper(un, p);
756 		} else {
757 			un->un_openl++;
758 			VREF(tvp);
759 			vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p);
760 			tvpisupper = 0;
761 		}
762 	}
763 
764 	/*
765 	 * We are holding the correct vnode, open it
766 	 */
767 
768 	if (error == 0)
769 		error = VOP_OPEN(tvp, mode, cred, p);
770 
771 	/*
772 	 * Absolutely necessary or UFS will blowup
773 	 */
774         if (error == 0 && vn_canvmio(tvp) == TRUE) {
775                 error = vfs_object_create(tvp, p, cred);
776         }
777 
778 	/*
779 	 * Release any locks held
780 	 */
781 	if (tvpisupper) {
782 		if (tvp)
783 			union_unlock_upper(tvp, p);
784 	} else {
785 		vput(tvp);
786 	}
787 	return (error);
788 }
789 
790 /*
791  *	union_close:
792  *
793  *	It is unclear whether a_vp is passed locked or unlocked.  Whatever
794  *	the case we do not change it.
795  */
796 
797 static int
798 union_close(ap)
799 	struct vop_close_args /* {
800 		struct vnode *a_vp;
801 		int  a_fflag;
802 		struct ucred *a_cred;
803 		struct proc *a_p;
804 	} */ *ap;
805 {
806 	struct union_node *un = VTOUNION(ap->a_vp);
807 	struct vnode *vp;
808 
809 	if ((vp = un->un_uppervp) == NULLVP) {
810 #ifdef UNION_DIAGNOSTIC
811 		if (un->un_openl <= 0)
812 			panic("union: un_openl cnt");
813 #endif
814 		--un->un_openl;
815 		vp = un->un_lowervp;
816 	}
817 	ap->a_vp = vp;
818 	return (VCALL(vp, VOFFSET(vop_close), ap));
819 }
820 
821 /*
822  * Check access permission on the union vnode.
823  * The access check being enforced is to check
824  * against both the underlying vnode, and any
825  * copied vnode.  This ensures that no additional
826  * file permissions are given away simply because
827  * the user caused an implicit file copy.
828  */
829 static int
830 union_access(ap)
831 	struct vop_access_args /* {
832 		struct vnodeop_desc *a_desc;
833 		struct vnode *a_vp;
834 		int a_mode;
835 		struct ucred *a_cred;
836 		struct proc *a_p;
837 	} */ *ap;
838 {
839 	struct union_node *un = VTOUNION(ap->a_vp);
840 	struct proc *p = ap->a_p;
841 	int error = EACCES;
842 	struct vnode *vp;
843 
844 	/*
845 	 * Disallow write attempts on filesystems mounted read-only.
846 	 */
847 	if ((ap->a_mode & VWRITE) &&
848 	    (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
849 		switch (ap->a_vp->v_type) {
850 		case VREG:
851 		case VDIR:
852 		case VLNK:
853 			return (EROFS);
854 		default:
855 			break;
856 		}
857 	}
858 
859 	if ((vp = union_lock_upper(un, p)) != NULLVP) {
860 		ap->a_vp = vp;
861 		error = VCALL(vp, VOFFSET(vop_access), ap);
862 		union_unlock_upper(vp, p);
863 		return(error);
864 	}
865 
866 	if ((vp = un->un_lowervp) != NULLVP) {
867 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
868 		ap->a_vp = vp;
869 
870 		/*
871 		 * Remove VWRITE from a_mode if our mount point is RW, because
872 		 * we want to allow writes and lowervp may be read-only.
873 		 */
874 		if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
875 			ap->a_mode &= ~VWRITE;
876 
877 		error = VCALL(vp, VOFFSET(vop_access), ap);
878 		if (error == 0) {
879 			struct union_mount *um;
880 
881 			um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
882 
883 			if (um->um_op == UNMNT_BELOW) {
884 				ap->a_cred = um->um_cred;
885 				error = VCALL(vp, VOFFSET(vop_access), ap);
886 			}
887 		}
888 		VOP_UNLOCK(vp, 0, p);
889 	}
890 	return(error);
891 }
892 
893 /*
894  * We handle getattr only to change the fsid and
895  * track object sizes
896  *
897  * It's not clear whether VOP_GETATTR is to be
898  * called with the vnode locked or not.  stat() calls
899  * it with (vp) locked, and fstat calls it with
900  * (vp) unlocked.
901  *
902  * Because of this we cannot use our normal locking functions
903  * if we do not intend to lock the main a_vp node.  At the moment
904  * we are running without any specific locking at all, but beware
905  * to any programmer that care must be taken if locking is added
906  * to this function.
907  */
908 
909 static int
910 union_getattr(ap)
911 	struct vop_getattr_args /* {
912 		struct vnode *a_vp;
913 		struct vattr *a_vap;
914 		struct ucred *a_cred;
915 		struct proc *a_p;
916 	} */ *ap;
917 {
918 	int error;
919 	struct union_node *un = VTOUNION(ap->a_vp);
920 	struct vnode *vp;
921 	struct vattr *vap;
922 	struct vattr va;
923 
924 	/*
925 	 * Some programs walk the filesystem hierarchy by counting
926 	 * links to directories to avoid stat'ing all the time.
927 	 * This means the link count on directories needs to be "correct".
928 	 * The only way to do that is to call getattr on both layers
929 	 * and fix up the link count.  The link count will not necessarily
930 	 * be accurate but will be large enough to defeat the tree walkers.
931 	 */
932 
933 	vap = ap->a_vap;
934 
935 	if ((vp = un->un_uppervp) != NULLVP) {
936 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p);
937 		if (error)
938 			return (error);
939 		/* XXX isn't this dangerouso without a lock? */
940 		union_newsize(ap->a_vp, vap->va_size, VNOVAL);
941 	}
942 
943 	if (vp == NULLVP) {
944 		vp = un->un_lowervp;
945 	} else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
946 		vp = un->un_lowervp;
947 		vap = &va;
948 	} else {
949 		vp = NULLVP;
950 	}
951 
952 	if (vp != NULLVP) {
953 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p);
954 		if (error)
955 			return (error);
956 		/* XXX isn't this dangerous without a lock? */
957 		union_newsize(ap->a_vp, VNOVAL, vap->va_size);
958 	}
959 
960 	if ((vap != ap->a_vap) && (vap->va_type == VDIR))
961 		ap->a_vap->va_nlink += vap->va_nlink;
962 	return (0);
963 }
964 
965 static int
966 union_setattr(ap)
967 	struct vop_setattr_args /* {
968 		struct vnode *a_vp;
969 		struct vattr *a_vap;
970 		struct ucred *a_cred;
971 		struct proc *a_p;
972 	} */ *ap;
973 {
974 	struct union_node *un = VTOUNION(ap->a_vp);
975 	struct proc *p = ap->a_p;
976 	struct vattr *vap = ap->a_vap;
977 	struct vnode *uppervp;
978 	int error;
979 
980 	/*
981 	 * Disallow write attempts on filesystems mounted read-only.
982 	 */
983 	if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
984 	    (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
985 	     vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
986 	     vap->va_mtime.tv_sec != VNOVAL ||
987 	     vap->va_mode != (mode_t)VNOVAL)) {
988 		return (EROFS);
989 	}
990 
991 	/*
992 	 * Handle case of truncating lower object to zero size,
993 	 * by creating a zero length upper object.  This is to
994 	 * handle the case of open with O_TRUNC and O_CREAT.
995 	 */
996 	if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
997 		error = union_copyup(un, (ap->a_vap->va_size != 0),
998 			    ap->a_cred, ap->a_p);
999 		if (error)
1000 			return (error);
1001 	}
1002 
1003 	/*
1004 	 * Try to set attributes in upper layer,
1005 	 * otherwise return read-only filesystem error.
1006 	 */
1007 	error = EROFS;
1008 	if ((uppervp = union_lock_upper(un, p)) != NULLVP) {
1009 		error = VOP_SETATTR(un->un_uppervp, ap->a_vap,
1010 					ap->a_cred, ap->a_p);
1011 		if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
1012 			union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
1013 		union_unlock_upper(uppervp, p);
1014 	}
1015 	return (error);
1016 }
1017 
1018 /*
1019  *	union_getpages:
1020  */
1021 
1022 static int
1023 union_getpages(struct vop_getpages_args *ap)
1024 {
1025 	int r;
1026 
1027 	r = vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
1028 		ap->a_count, ap->a_reqpage);
1029 	return(r);
1030 }
1031 
1032 /*
1033  *	union_putpages:
1034  */
1035 
1036 static int
1037 union_putpages(struct vop_putpages_args *ap)
1038 {
1039 	int r;
1040 
1041 	r = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
1042 		ap->a_sync, ap->a_rtvals);
1043 	return(r);
1044 }
1045 
1046 static int
1047 union_read(ap)
1048 	struct vop_read_args /* {
1049 		struct vnode *a_vp;
1050 		struct uio *a_uio;
1051 		int  a_ioflag;
1052 		struct ucred *a_cred;
1053 	} */ *ap;
1054 {
1055 	struct union_node *un = VTOUNION(ap->a_vp);
1056 	struct proc *p = ap->a_uio->uio_procp;
1057 	struct vnode *uvp;
1058 	int error;
1059 
1060 	uvp = union_lock_other(un, p);
1061 	KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1062 
1063 	if (ap->a_vp->v_flag & VOBJBUF)
1064 		union_vm_coherency(ap->a_vp, ap->a_uio, 0);
1065 
1066 	error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1067 	union_unlock_other(uvp, p);
1068 
1069 	/*
1070 	 * XXX
1071 	 * perhaps the size of the underlying object has changed under
1072 	 * our feet.  take advantage of the offset information present
1073 	 * in the uio structure.
1074 	 */
1075 	if (error == 0) {
1076 		struct union_node *un = VTOUNION(ap->a_vp);
1077 		off_t cur = ap->a_uio->uio_offset;
1078 
1079 		if (uvp == un->un_uppervp) {
1080 			if (cur > un->un_uppersz)
1081 				union_newsize(ap->a_vp, cur, VNOVAL);
1082 		} else {
1083 			if (cur > un->un_lowersz)
1084 				union_newsize(ap->a_vp, VNOVAL, cur);
1085 		}
1086 	}
1087 	return (error);
1088 }
1089 
1090 static int
1091 union_write(ap)
1092 	struct vop_read_args /* {
1093 		struct vnode *a_vp;
1094 		struct uio *a_uio;
1095 		int  a_ioflag;
1096 		struct ucred *a_cred;
1097 	} */ *ap;
1098 {
1099 	struct union_node *un = VTOUNION(ap->a_vp);
1100 	struct proc *p = ap->a_uio->uio_procp;
1101 	struct vnode *uppervp;
1102 	int error;
1103 
1104 	if ((uppervp = union_lock_upper(un, p)) == NULLVP)
1105 		panic("union: missing upper layer in write");
1106 
1107 	/*
1108 	 * Since our VM pages are associated with our vnode rather then
1109 	 * the real vnode, and since we do not run our reads and writes
1110 	 * through our own VM cache, we have a VM/VFS coherency problem.
1111 	 * We solve them by invalidating or flushing the associated VM
1112 	 * pages prior to allowing a normal read or write to occur.
1113 	 *
1114 	 * VM-backed writes (UIO_NOCOPY) have to be converted to normal
1115 	 * writes because we are not cache-coherent.  Normal writes need
1116 	 * to be made coherent with our VM-backing store, which we do by
1117 	 * first flushing any dirty VM pages associated with the write
1118 	 * range, and then destroying any clean VM pages associated with
1119 	 * the write range.
1120 	 */
1121 
1122 	if (ap->a_uio->uio_segflg == UIO_NOCOPY) {
1123 		ap->a_uio->uio_segflg = UIO_SYSSPACE;
1124 	} else if (ap->a_vp->v_flag & VOBJBUF) {
1125 		union_vm_coherency(ap->a_vp, ap->a_uio, 1);
1126 	}
1127 
1128 	error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1129 
1130 	/*
1131 	 * the size of the underlying object may be changed by the
1132 	 * write.
1133 	 */
1134 	if (error == 0) {
1135 		off_t cur = ap->a_uio->uio_offset;
1136 
1137 		if (cur > un->un_uppersz)
1138 			union_newsize(ap->a_vp, cur, VNOVAL);
1139 	}
1140 	union_unlock_upper(uppervp, p);
1141 	return (error);
1142 }
1143 
1144 static int
1145 union_lease(ap)
1146 	struct vop_lease_args /* {
1147 		struct vnode *a_vp;
1148 		struct proc *a_p;
1149 		struct ucred *a_cred;
1150 		int a_flag;
1151 	} */ *ap;
1152 {
1153 	struct vnode *ovp = OTHERVP(ap->a_vp);
1154 
1155 	ap->a_vp = ovp;
1156 	return (VCALL(ovp, VOFFSET(vop_lease), ap));
1157 }
1158 
1159 static int
1160 union_ioctl(ap)
1161 	struct vop_ioctl_args /* {
1162 		struct vnode *a_vp;
1163 		int  a_command;
1164 		caddr_t  a_data;
1165 		int  a_fflag;
1166 		struct ucred *a_cred;
1167 		struct proc *a_p;
1168 	} */ *ap;
1169 {
1170 	struct vnode *ovp = OTHERVP(ap->a_vp);
1171 
1172 	ap->a_vp = ovp;
1173 	return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1174 }
1175 
1176 static int
1177 union_poll(ap)
1178 	struct vop_poll_args /* {
1179 		struct vnode *a_vp;
1180 		int  a_events;
1181 		struct ucred *a_cred;
1182 		struct proc *a_p;
1183 	} */ *ap;
1184 {
1185 	struct vnode *ovp = OTHERVP(ap->a_vp);
1186 
1187 	ap->a_vp = ovp;
1188 	return (VCALL(ovp, VOFFSET(vop_poll), ap));
1189 }
1190 
1191 static int
1192 union_revoke(ap)
1193 	struct vop_revoke_args /* {
1194 		struct vnode *a_vp;
1195 		int a_flags;
1196 		struct proc *a_p;
1197 	} */ *ap;
1198 {
1199 	struct vnode *vp = ap->a_vp;
1200 
1201 	if (UPPERVP(vp))
1202 		VOP_REVOKE(UPPERVP(vp), ap->a_flags);
1203 	if (LOWERVP(vp))
1204 		VOP_REVOKE(LOWERVP(vp), ap->a_flags);
1205 	vgone(vp);
1206 	return (0);
1207 }
1208 
1209 static int
1210 union_mmap(ap)
1211 	struct vop_mmap_args /* {
1212 		struct vnode *a_vp;
1213 		int  a_fflags;
1214 		struct ucred *a_cred;
1215 		struct proc *a_p;
1216 	} */ *ap;
1217 {
1218 	struct vnode *ovp = OTHERVP(ap->a_vp);
1219 
1220 	ap->a_vp = ovp;
1221 	return (VCALL(ovp, VOFFSET(vop_mmap), ap));
1222 }
1223 
1224 static int
1225 union_fsync(ap)
1226 	struct vop_fsync_args /* {
1227 		struct vnode *a_vp;
1228 		struct ucred *a_cred;
1229 		int  a_waitfor;
1230 		struct proc *a_p;
1231 	} */ *ap;
1232 {
1233 	int error = 0;
1234 	struct proc *p = ap->a_p;
1235 	struct vnode *targetvp;
1236 	struct union_node *un = VTOUNION(ap->a_vp);
1237 
1238 	if ((targetvp = union_lock_other(un, p)) != NULLVP) {
1239 		error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, p);
1240 		union_unlock_other(targetvp, p);
1241 	}
1242 
1243 	return (error);
1244 }
1245 
1246 /*
1247  *	union_remove:
1248  *
1249  *	Remove the specified cnp.  The dvp and vp are passed to us locked
1250  *	and must remain locked on return.
1251  */
1252 
1253 static int
1254 union_remove(ap)
1255 	struct vop_remove_args /* {
1256 		struct vnode *a_dvp;
1257 		struct vnode *a_vp;
1258 		struct componentname *a_cnp;
1259 	} */ *ap;
1260 {
1261 	struct union_node *dun = VTOUNION(ap->a_dvp);
1262 	struct union_node *un = VTOUNION(ap->a_vp);
1263 	struct componentname *cnp = ap->a_cnp;
1264 	struct proc *p = cnp->cn_proc;
1265 	struct vnode *uppervp;
1266 	struct vnode *upperdvp;
1267 	int error;
1268 
1269 	if ((upperdvp = union_lock_upper(dun, p)) == NULLVP)
1270 		panic("union remove: null upper vnode");
1271 
1272 	if ((uppervp = union_lock_upper(un, p)) != NULLVP) {
1273 		if (union_dowhiteout(un, cnp->cn_cred, p))
1274 			cnp->cn_flags |= DOWHITEOUT;
1275 		error = VOP_REMOVE(upperdvp, uppervp, cnp);
1276 #if 0
1277 		/* XXX */
1278 		if (!error)
1279 			union_removed_upper(un);
1280 #endif
1281 		union_unlock_upper(uppervp, p);
1282 	} else {
1283 		error = union_mkwhiteout(
1284 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1285 			    upperdvp, ap->a_cnp, un->un_path);
1286 	}
1287 	union_unlock_upper(upperdvp, p);
1288 	return (error);
1289 }
1290 
1291 /*
1292  *	union_link:
1293  *
1294  *	tdvp will be locked on entry, vp will not be locked on entry.
1295  *	tdvp should remain locked on return and vp should remain unlocked
1296  *	on return.
1297  */
1298 
1299 static int
1300 union_link(ap)
1301 	struct vop_link_args /* {
1302 		struct vnode *a_tdvp;
1303 		struct vnode *a_vp;
1304 		struct componentname *a_cnp;
1305 	} */ *ap;
1306 {
1307 	struct componentname *cnp = ap->a_cnp;
1308 	struct proc *p = cnp->cn_proc;
1309 	struct union_node *dun = VTOUNION(ap->a_tdvp);
1310 	struct vnode *vp;
1311 	struct vnode *tdvp;
1312 	int error = 0;
1313 
1314 	if (ap->a_tdvp->v_op != ap->a_vp->v_op) {
1315 		vp = ap->a_vp;
1316 	} else {
1317 		struct union_node *tun = VTOUNION(ap->a_vp);
1318 
1319 		if (tun->un_uppervp == NULLVP) {
1320 			vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p);
1321 #if 0
1322 			if (dun->un_uppervp == tun->un_dirvp) {
1323 				if (dun->un_flags & UN_ULOCK) {
1324 					dun->un_flags &= ~UN_ULOCK;
1325 					VOP_UNLOCK(dun->un_uppervp, 0, p);
1326 				}
1327 			}
1328 #endif
1329 			error = union_copyup(tun, 1, cnp->cn_cred, p);
1330 #if 0
1331 			if (dun->un_uppervp == tun->un_dirvp) {
1332 				vn_lock(dun->un_uppervp,
1333 					    LK_EXCLUSIVE | LK_RETRY, p);
1334 				dun->un_flags |= UN_ULOCK;
1335 			}
1336 #endif
1337 			VOP_UNLOCK(ap->a_vp, 0, p);
1338 		}
1339 		vp = tun->un_uppervp;
1340 	}
1341 
1342 	if (error)
1343 		return (error);
1344 
1345 	/*
1346 	 * Make sure upper is locked, then unlock the union directory we were
1347 	 * called with to avoid a deadlock while we are calling VOP_LINK on
1348 	 * the upper (with tdvp locked and vp not locked).  Our ap->a_tdvp
1349 	 * is expected to be locked on return.
1350 	 */
1351 
1352 	if ((tdvp = union_lock_upper(dun, p)) == NULLVP)
1353 		return (EROFS);
1354 
1355 	VOP_UNLOCK(ap->a_tdvp, 0, p);		/* unlock calling node */
1356 	error = VOP_LINK(tdvp, vp, cnp);	/* call link on upper */
1357 
1358 	/*
1359 	 * We have to unlock tdvp prior to relocking our calling node in
1360 	 * order to avoid a deadlock.
1361 	 */
1362 	union_unlock_upper(tdvp, p);
1363 	vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY, p);
1364 	return (error);
1365 }
1366 
1367 static int
1368 union_rename(ap)
1369 	struct vop_rename_args  /* {
1370 		struct vnode *a_fdvp;
1371 		struct vnode *a_fvp;
1372 		struct componentname *a_fcnp;
1373 		struct vnode *a_tdvp;
1374 		struct vnode *a_tvp;
1375 		struct componentname *a_tcnp;
1376 	} */ *ap;
1377 {
1378 	int error;
1379 	struct vnode *fdvp = ap->a_fdvp;
1380 	struct vnode *fvp = ap->a_fvp;
1381 	struct vnode *tdvp = ap->a_tdvp;
1382 	struct vnode *tvp = ap->a_tvp;
1383 
1384 	/*
1385 	 * Figure out what fdvp to pass to our upper or lower vnode.  If we
1386 	 * replace the fdvp, release the original one and ref the new one.
1387 	 */
1388 
1389 	if (fdvp->v_op == union_vnodeop_p) {	/* always true */
1390 		struct union_node *un = VTOUNION(fdvp);
1391 		if (un->un_uppervp == NULLVP) {
1392 			/*
1393 			 * this should never happen in normal
1394 			 * operation but might if there was
1395 			 * a problem creating the top-level shadow
1396 			 * directory.
1397 			 */
1398 			error = EXDEV;
1399 			goto bad;
1400 		}
1401 		fdvp = un->un_uppervp;
1402 		VREF(fdvp);
1403 		vrele(ap->a_fdvp);
1404 	}
1405 
1406 	/*
1407 	 * Figure out what fvp to pass to our upper or lower vnode.  If we
1408 	 * replace the fvp, release the original one and ref the new one.
1409 	 */
1410 
1411 	if (fvp->v_op == union_vnodeop_p) {	/* always true */
1412 		struct union_node *un = VTOUNION(fvp);
1413 #if 0
1414 		struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1415 #endif
1416 
1417 		if (un->un_uppervp == NULLVP) {
1418 			switch(fvp->v_type) {
1419 			case VREG:
1420 				vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_proc);
1421 				error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_proc);
1422 				VOP_UNLOCK(un->un_vnode, 0, ap->a_fcnp->cn_proc);
1423 				if (error)
1424 					goto bad;
1425 				break;
1426 			case VDIR:
1427 				/*
1428 				 * XXX not yet.
1429 				 *
1430 				 * There is only one way to rename a directory
1431 				 * based in the lowervp, and that is to copy
1432 				 * the entire directory hierarchy.  Otherwise
1433 				 * it would not last across a reboot.
1434 				 */
1435 #if 0
1436 				vrele(fvp);
1437 				fvp = NULL;
1438 				vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_proc);
1439 				error = union_mkshadow(um, fdvp,
1440 					    ap->a_fcnp, &un->un_uppervp);
1441 				VOP_UNLOCK(fdvp, 0, ap->a_fcnp->cn_proc);
1442 				if (un->un_uppervp)
1443 					VOP_UNLOCK(un->un_uppervp, 0, ap->a_fcnp->cn_proc);
1444 				if (error)
1445 					goto bad;
1446 				break;
1447 #endif
1448 			default:
1449 				error = EXDEV;
1450 				goto bad;
1451 			}
1452 		}
1453 
1454 		if (un->un_lowervp != NULLVP)
1455 			ap->a_fcnp->cn_flags |= DOWHITEOUT;
1456 		fvp = un->un_uppervp;
1457 		VREF(fvp);
1458 		vrele(ap->a_fvp);
1459 	}
1460 
1461 	/*
1462 	 * Figure out what tdvp (destination directory) to pass to the
1463 	 * lower level.  If we replace it with uppervp, we need to vput the
1464 	 * old one.  The exclusive lock is transfered to what we will pass
1465 	 * down in the VOP_RENAME and we replace uppervp with a simple
1466 	 * reference.
1467 	 */
1468 
1469 	if (tdvp->v_op == union_vnodeop_p) {
1470 		struct union_node *un = VTOUNION(tdvp);
1471 
1472 		if (un->un_uppervp == NULLVP) {
1473 			/*
1474 			 * this should never happen in normal
1475 			 * operation but might if there was
1476 			 * a problem creating the top-level shadow
1477 			 * directory.
1478 			 */
1479 			error = EXDEV;
1480 			goto bad;
1481 		}
1482 
1483 		/*
1484 		 * new tdvp is a lock and reference on uppervp, put away
1485 		 * the old tdvp.
1486 		 */
1487 		tdvp = union_lock_upper(un, ap->a_tcnp->cn_proc);
1488 		vput(ap->a_tdvp);
1489 	}
1490 
1491 	/*
1492 	 * Figure out what tvp (destination file) to pass to the
1493 	 * lower level.
1494 	 *
1495 	 * If the uppervp file does not exist put away the (wrong)
1496 	 * file and change tvp to NULL.
1497 	 */
1498 
1499 	if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) {
1500 		struct union_node *un = VTOUNION(tvp);
1501 
1502 		tvp = union_lock_upper(un, ap->a_tcnp->cn_proc);
1503 		vput(ap->a_tvp);
1504 		/* note: tvp may be NULL */
1505 	}
1506 
1507 	/*
1508 	 * VOP_RENAME releases/vputs prior to returning, so we have no
1509 	 * cleanup to do.
1510 	 */
1511 
1512 	return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1513 
1514 	/*
1515 	 * Error.  We still have to release / vput the various elements.
1516 	 */
1517 
1518 bad:
1519 	vrele(fdvp);
1520 	if (fvp)
1521 		vrele(fvp);
1522 	vput(tdvp);
1523 	if (tvp != NULLVP) {
1524 		if (tvp != tdvp)
1525 			vput(tvp);
1526 		else
1527 			vrele(tvp);
1528 	}
1529 	return (error);
1530 }
1531 
1532 static int
1533 union_mkdir(ap)
1534 	struct vop_mkdir_args /* {
1535 		struct vnode *a_dvp;
1536 		struct vnode **a_vpp;
1537 		struct componentname *a_cnp;
1538 		struct vattr *a_vap;
1539 	} */ *ap;
1540 {
1541 	struct union_node *dun = VTOUNION(ap->a_dvp);
1542 	struct componentname *cnp = ap->a_cnp;
1543 	struct proc *p = cnp->cn_proc;
1544 	struct vnode *upperdvp;
1545 	int error = EROFS;
1546 
1547 	if ((upperdvp = union_lock_upper(dun, p)) != NULLVP) {
1548 		struct vnode *vp;
1549 
1550 		error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
1551 		union_unlock_upper(upperdvp, p);
1552 
1553 		if (error == 0) {
1554 			VOP_UNLOCK(vp, 0, p);
1555 			UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vp->v_usecount));
1556 			error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1557 				ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1558 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
1559 		}
1560 	}
1561 	return (error);
1562 }
1563 
1564 static int
1565 union_rmdir(ap)
1566 	struct vop_rmdir_args /* {
1567 		struct vnode *a_dvp;
1568 		struct vnode *a_vp;
1569 		struct componentname *a_cnp;
1570 	} */ *ap;
1571 {
1572 	struct union_node *dun = VTOUNION(ap->a_dvp);
1573 	struct union_node *un = VTOUNION(ap->a_vp);
1574 	struct componentname *cnp = ap->a_cnp;
1575 	struct proc *p = cnp->cn_proc;
1576 	struct vnode *upperdvp;
1577 	struct vnode *uppervp;
1578 	int error;
1579 
1580 	if ((upperdvp = union_lock_upper(dun, p)) == NULLVP)
1581 		panic("union rmdir: null upper vnode");
1582 
1583 	if ((uppervp = union_lock_upper(un, p)) != NULLVP) {
1584 		if (union_dowhiteout(un, cnp->cn_cred, p))
1585 			cnp->cn_flags |= DOWHITEOUT;
1586 		error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
1587 		union_unlock_upper(uppervp, p);
1588 	} else {
1589 		error = union_mkwhiteout(
1590 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1591 			    dun->un_uppervp, ap->a_cnp, un->un_path);
1592 	}
1593 	union_unlock_upper(upperdvp, p);
1594 	return (error);
1595 }
1596 
1597 /*
1598  *	union_symlink:
1599  *
1600  *	dvp is locked on entry and remains locked on return.  a_vpp is garbage
1601  *	(unused).
1602  */
1603 
1604 static int
1605 union_symlink(ap)
1606 	struct vop_symlink_args /* {
1607 		struct vnode *a_dvp;
1608 		struct vnode **a_vpp;
1609 		struct componentname *a_cnp;
1610 		struct vattr *a_vap;
1611 		char *a_target;
1612 	} */ *ap;
1613 {
1614 	struct union_node *dun = VTOUNION(ap->a_dvp);
1615 	struct componentname *cnp = ap->a_cnp;
1616 	struct proc *p = cnp->cn_proc;
1617 	struct vnode *dvp;
1618 	int error = EROFS;
1619 
1620 	if ((dvp = union_lock_upper(dun, p)) != NULLVP) {
1621 		error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1622 			    ap->a_target);
1623 		union_unlock_upper(dvp, p);
1624 	}
1625 	return (error);
1626 }
1627 
1628 /*
1629  * union_readdir works in concert with getdirentries and
1630  * readdir(3) to provide a list of entries in the unioned
1631  * directories.  getdirentries is responsible for walking
1632  * down the union stack.  readdir(3) is responsible for
1633  * eliminating duplicate names from the returned data stream.
1634  */
1635 static int
1636 union_readdir(ap)
1637 	struct vop_readdir_args /* {
1638 		struct vnode *a_vp;
1639 		struct uio *a_uio;
1640 		struct ucred *a_cred;
1641 		int *a_eofflag;
1642 		u_long *a_cookies;
1643 		int a_ncookies;
1644 	} */ *ap;
1645 {
1646 	struct union_node *un = VTOUNION(ap->a_vp);
1647 	struct proc *p = ap->a_uio->uio_procp;
1648 	struct vnode *uvp;
1649 	int error = 0;
1650 
1651 	if ((uvp = union_lock_upper(un, p)) != NULLVP) {
1652 		ap->a_vp = uvp;
1653 		error = VCALL(uvp, VOFFSET(vop_readdir), ap);
1654 		union_unlock_upper(uvp, p);
1655 	}
1656 	return(error);
1657 }
1658 
1659 static int
1660 union_readlink(ap)
1661 	struct vop_readlink_args /* {
1662 		struct vnode *a_vp;
1663 		struct uio *a_uio;
1664 		struct ucred *a_cred;
1665 	} */ *ap;
1666 {
1667 	int error;
1668 	struct union_node *un = VTOUNION(ap->a_vp);
1669 	struct uio *uio = ap->a_uio;
1670 	struct proc *p = uio->uio_procp;
1671 	struct vnode *vp;
1672 
1673 	vp = union_lock_other(un, p);
1674 	KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1675 
1676 	ap->a_vp = vp;
1677 	error = VCALL(vp, VOFFSET(vop_readlink), ap);
1678 	union_unlock_other(vp, p);
1679 
1680 	return (error);
1681 }
1682 
1683 /*
1684  *	union_inactive:
1685  *
1686  *	Called with the vnode locked.  We are expected to unlock the vnode.
1687  */
1688 
1689 static int
1690 union_inactive(ap)
1691 	struct vop_inactive_args /* {
1692 		struct vnode *a_vp;
1693 		struct proc *a_p;
1694 	} */ *ap;
1695 {
1696 	struct vnode *vp = ap->a_vp;
1697 	struct proc *p = ap->a_p;
1698 	struct union_node *un = VTOUNION(vp);
1699 	struct vnode **vpp;
1700 
1701 	/*
1702 	 * Do nothing (and _don't_ bypass).
1703 	 * Wait to vrele lowervp until reclaim,
1704 	 * so that until then our union_node is in the
1705 	 * cache and reusable.
1706 	 *
1707 	 * NEEDSWORK: Someday, consider inactive'ing
1708 	 * the lowervp and then trying to reactivate it
1709 	 * with capabilities (v_id)
1710 	 * like they do in the name lookup cache code.
1711 	 * That's too much work for now.
1712 	 */
1713 
1714 	if (un->un_dircache != 0) {
1715 		for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1716 			vrele(*vpp);
1717 		free (un->un_dircache, M_TEMP);
1718 		un->un_dircache = 0;
1719 	}
1720 
1721 #if 0
1722 	if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1723 		un->un_flags &= ~UN_ULOCK;
1724 		VOP_UNLOCK(un->un_uppervp, 0, p);
1725 	}
1726 #endif
1727 
1728 	VOP_UNLOCK(vp, 0, p);
1729 
1730 	if ((un->un_flags & UN_CACHED) == 0)
1731 		vgone(vp);
1732 
1733 	return (0);
1734 }
1735 
1736 static int
1737 union_reclaim(ap)
1738 	struct vop_reclaim_args /* {
1739 		struct vnode *a_vp;
1740 	} */ *ap;
1741 {
1742 	union_freevp(ap->a_vp);
1743 
1744 	return (0);
1745 }
1746 
1747 static int
1748 union_lock(ap)
1749 	struct vop_lock_args *ap;
1750 {
1751 #if 0
1752 	struct vnode *vp = ap->a_vp;
1753 	struct proc *p = ap->a_p;
1754 	int flags = ap->a_flags;
1755 	struct union_node *un;
1756 #endif
1757 	int error;
1758 
1759 	error = vop_stdlock(ap);
1760 #if 0
1761 	un = VTOUNION(vp);
1762 
1763 	if (error == 0) {
1764 		/*
1765 		 * Lock the upper if it exists and this is an exclusive lock
1766 		 * request.
1767 		 */
1768 		if (un->un_uppervp != NULLVP &&
1769 		    (flags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1770 			if ((un->un_flags & UN_ULOCK) == 0 && vp->v_usecount) {
1771 				error = vn_lock(un->un_uppervp, flags, p);
1772 				if (error) {
1773 					struct vop_unlock_args uap = { 0 };
1774 					uap.a_vp = ap->a_vp;
1775 					uap.a_flags = ap->a_flags;
1776 					uap.a_p = ap->a_p;
1777 					vop_stdunlock(&uap);
1778 					return (error);
1779 				}
1780 				un->un_flags |= UN_ULOCK;
1781 			}
1782 		}
1783 	}
1784 #endif
1785 	return (error);
1786 }
1787 
1788 /*
1789  *	union_unlock:
1790  *
1791  *	Unlock our union node.  This also unlocks uppervp.
1792  */
1793 static int
1794 union_unlock(ap)
1795 	struct vop_unlock_args /* {
1796 		struct vnode *a_vp;
1797 		int a_flags;
1798 		struct proc *a_p;
1799 	} */ *ap;
1800 {
1801 	struct union_node *un = VTOUNION(ap->a_vp);
1802 	int error;
1803 
1804 	KASSERT((un->un_uppervp == NULL || un->un_uppervp->v_usecount > 0), ("uppervp usecount is 0"));
1805 
1806 	error = vop_stdunlock(ap);
1807 #if 0
1808 
1809 	/*
1810 	 * If no exclusive locks remain and we are holding an uppervp lock,
1811 	 * remove the uppervp lock.
1812 	 */
1813 
1814 	if ((un->un_flags & UN_ULOCK) &&
1815 	    lockstatus(&un->un_lock, NULL) != LK_EXCLUSIVE) {
1816 		un->un_flags &= ~UN_ULOCK;
1817 		VOP_UNLOCK(un->un_uppervp, LK_EXCLUSIVE, p);
1818 	}
1819 #endif
1820 	return(error);
1821 }
1822 
1823 /*
1824  *	union_bmap:
1825  *
1826  *	There isn't much we can do.  We cannot push through to the real vnode
1827  *	to get to the underlying device because this will bypass data
1828  *	cached by the real vnode.
1829  *
1830  *	For some reason we cannot return the 'real' vnode either, it seems
1831  *	to blow up memory maps.
1832  */
1833 
1834 static int
1835 union_bmap(ap)
1836 	struct vop_bmap_args /* {
1837 		struct vnode *a_vp;
1838 		daddr_t  a_bn;
1839 		struct vnode **a_vpp;
1840 		daddr_t *a_bnp;
1841 		int *a_runp;
1842 		int *a_runb;
1843 	} */ *ap;
1844 {
1845 	return(EOPNOTSUPP);
1846 }
1847 
1848 static int
1849 union_print(ap)
1850 	struct vop_print_args /* {
1851 		struct vnode *a_vp;
1852 	} */ *ap;
1853 {
1854 	struct vnode *vp = ap->a_vp;
1855 
1856 	printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1857 			vp, UPPERVP(vp), LOWERVP(vp));
1858 	if (UPPERVP(vp) != NULLVP)
1859 		vprint("union: upper", UPPERVP(vp));
1860 	if (LOWERVP(vp) != NULLVP)
1861 		vprint("union: lower", LOWERVP(vp));
1862 
1863 	return (0);
1864 }
1865 
1866 static int
1867 union_pathconf(ap)
1868 	struct vop_pathconf_args /* {
1869 		struct vnode *a_vp;
1870 		int a_name;
1871 		int *a_retval;
1872 	} */ *ap;
1873 {
1874 	int error;
1875 	struct proc *p = curproc;		/* XXX */
1876 	struct union_node *un = VTOUNION(ap->a_vp);
1877 	struct vnode *vp;
1878 
1879 	vp = union_lock_other(un, p);
1880 	KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1881 
1882 	ap->a_vp = vp;
1883 	error = VCALL(vp, VOFFSET(vop_pathconf), ap);
1884 	union_unlock_other(vp, p);
1885 
1886 	return (error);
1887 }
1888 
1889 static int
1890 union_advlock(ap)
1891 	struct vop_advlock_args /* {
1892 		struct vnode *a_vp;
1893 		caddr_t  a_id;
1894 		int  a_op;
1895 		struct flock *a_fl;
1896 		int  a_flags;
1897 	} */ *ap;
1898 {
1899 	register struct vnode *ovp = OTHERVP(ap->a_vp);
1900 
1901 	ap->a_vp = ovp;
1902 	return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1903 }
1904 
1905 
1906 /*
1907  * XXX - vop_strategy must be hand coded because it has no
1908  * YYY - and it is not coherent with anything
1909  *
1910  * vnode in its arguments.
1911  * This goes away with a merged VM/buffer cache.
1912  */
1913 static int
1914 union_strategy(ap)
1915 	struct vop_strategy_args /* {
1916 		struct vnode *a_vp;
1917 		struct buf *a_bp;
1918 	} */ *ap;
1919 {
1920 	struct buf *bp = ap->a_bp;
1921 	struct vnode *othervp = OTHERVP(bp->b_vp);
1922 
1923 #ifdef DIAGNOSTIC
1924 	if (othervp == NULLVP)
1925 		panic("union_strategy: nil vp");
1926 	if (((bp->b_flags & B_READ) == 0) &&
1927 	    (othervp == LOWERVP(bp->b_vp)))
1928 		panic("union_strategy: writing to lowervp");
1929 #endif
1930 	return (VOP_STRATEGY(othervp, bp));
1931 }
1932 
1933 /*
1934  * Global vfs data structures
1935  */
1936 vop_t **union_vnodeop_p;
1937 static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
1938 	{ &vop_default_desc,		(vop_t *) vop_defaultop },
1939 	{ &vop_access_desc,		(vop_t *) union_access },
1940 	{ &vop_advlock_desc,		(vop_t *) union_advlock },
1941 	{ &vop_bmap_desc,		(vop_t *) union_bmap },
1942 	{ &vop_close_desc,		(vop_t *) union_close },
1943 	{ &vop_create_desc,		(vop_t *) union_create },
1944 	{ &vop_fsync_desc,		(vop_t *) union_fsync },
1945 	{ &vop_getpages_desc,		(vop_t *) union_getpages },
1946 	{ &vop_putpages_desc,		(vop_t *) union_putpages },
1947 	{ &vop_getattr_desc,		(vop_t *) union_getattr },
1948 	{ &vop_inactive_desc,		(vop_t *) union_inactive },
1949 	{ &vop_ioctl_desc,		(vop_t *) union_ioctl },
1950 	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
1951 	{ &vop_lease_desc,		(vop_t *) union_lease },
1952 	{ &vop_link_desc,		(vop_t *) union_link },
1953 	{ &vop_lock_desc,		(vop_t *) union_lock },
1954 	{ &vop_lookup_desc,		(vop_t *) union_lookup },
1955 	{ &vop_mkdir_desc,		(vop_t *) union_mkdir },
1956 	{ &vop_mknod_desc,		(vop_t *) union_mknod },
1957 	{ &vop_mmap_desc,		(vop_t *) union_mmap },
1958 	{ &vop_open_desc,		(vop_t *) union_open },
1959 	{ &vop_pathconf_desc,		(vop_t *) union_pathconf },
1960 	{ &vop_poll_desc,		(vop_t *) union_poll },
1961 	{ &vop_print_desc,		(vop_t *) union_print },
1962 	{ &vop_read_desc,		(vop_t *) union_read },
1963 	{ &vop_readdir_desc,		(vop_t *) union_readdir },
1964 	{ &vop_readlink_desc,		(vop_t *) union_readlink },
1965 	{ &vop_reclaim_desc,		(vop_t *) union_reclaim },
1966 	{ &vop_remove_desc,		(vop_t *) union_remove },
1967 	{ &vop_rename_desc,		(vop_t *) union_rename },
1968 	{ &vop_revoke_desc,		(vop_t *) union_revoke },
1969 	{ &vop_rmdir_desc,		(vop_t *) union_rmdir },
1970 	{ &vop_setattr_desc,		(vop_t *) union_setattr },
1971 	{ &vop_strategy_desc,		(vop_t *) union_strategy },
1972 	{ &vop_symlink_desc,		(vop_t *) union_symlink },
1973 	{ &vop_unlock_desc,		(vop_t *) union_unlock },
1974 	{ &vop_whiteout_desc,		(vop_t *) union_whiteout },
1975 	{ &vop_write_desc,		(vop_t *) union_write },
1976 	{ NULL, NULL }
1977 };
1978 static struct vnodeopv_desc union_vnodeop_opv_desc =
1979 	{ &union_vnodeop_p, union_vnodeop_entries };
1980 
1981 VNODEOP_SET(union_vnodeop_opv_desc);
1982