xref: /freebsd/sys/fs/unionfs/union_vnops.c (revision 17d6c636720d00f77e5d098daf4c278f89d84f7b)
1 /*
2  * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3  * Copyright (c) 1992, 1993, 1994, 1995
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)union_vnops.c	8.32 (Berkeley) 6/23/95
38  * $FreeBSD$
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/fcntl.h>
44 #include <sys/stat.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/malloc.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/lock.h>
53 #include <sys/sysctl.h>
54 #include <fs/unionfs/union.h>
55 
56 #include <vm/vm.h>
57 #include <vm/vnode_pager.h>
58 
59 #include <vm/vm_page.h>
60 #include <vm/vm_object.h>
61 
62 int uniondebug = 0;
63 
64 #if UDEBUG_ENABLED
65 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
66 #else
67 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
68 #endif
69 
70 static int	union_access __P((struct vop_access_args *ap));
71 static int	union_advlock __P((struct vop_advlock_args *ap));
72 static int	union_close __P((struct vop_close_args *ap));
73 static int	union_create __P((struct vop_create_args *ap));
74 static int	union_createvobject __P((struct vop_createvobject_args *ap));
75 static int	union_destroyvobject __P((struct vop_destroyvobject_args *ap));
76 static int	union_fsync __P((struct vop_fsync_args *ap));
77 static int	union_getattr __P((struct vop_getattr_args *ap));
78 static int	union_getvobject __P((struct vop_getvobject_args *ap));
79 static int	union_inactive __P((struct vop_inactive_args *ap));
80 static int	union_ioctl __P((struct vop_ioctl_args *ap));
81 static int	union_lease __P((struct vop_lease_args *ap));
82 static int	union_link __P((struct vop_link_args *ap));
83 static int	union_lock __P((struct vop_lock_args *ap));
84 static int	union_lookup __P((struct vop_lookup_args *ap));
85 static int	union_lookup1 __P((struct vnode *udvp, struct vnode **dvp,
86 				   struct vnode **vpp,
87 				   struct componentname *cnp));
88 static int	union_mkdir __P((struct vop_mkdir_args *ap));
89 static int	union_mknod __P((struct vop_mknod_args *ap));
90 static int	union_open __P((struct vop_open_args *ap));
91 static int	union_pathconf __P((struct vop_pathconf_args *ap));
92 static int	union_print __P((struct vop_print_args *ap));
93 static int	union_read __P((struct vop_read_args *ap));
94 static int	union_readdir __P((struct vop_readdir_args *ap));
95 static int	union_readlink __P((struct vop_readlink_args *ap));
96 static int	union_getwritemount __P((struct vop_getwritemount_args *ap));
97 static int	union_reclaim __P((struct vop_reclaim_args *ap));
98 static int	union_remove __P((struct vop_remove_args *ap));
99 static int	union_rename __P((struct vop_rename_args *ap));
100 static int	union_revoke __P((struct vop_revoke_args *ap));
101 static int	union_rmdir __P((struct vop_rmdir_args *ap));
102 static int	union_poll __P((struct vop_poll_args *ap));
103 static int	union_setattr __P((struct vop_setattr_args *ap));
104 static int	union_strategy __P((struct vop_strategy_args *ap));
105 static int	union_symlink __P((struct vop_symlink_args *ap));
106 static int	union_unlock __P((struct vop_unlock_args *ap));
107 static int	union_whiteout __P((struct vop_whiteout_args *ap));
108 static int	union_write __P((struct vop_read_args *ap));
109 
110 static __inline
111 struct vnode *
112 union_lock_upper(struct union_node *un, struct thread *td)
113 {
114 	struct vnode *uppervp;
115 
116 	if ((uppervp = un->un_uppervp) != NULL) {
117 		VREF(uppervp);
118 		vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
119 	}
120 	KASSERT((uppervp == NULL || uppervp->v_usecount > 0), ("uppervp usecount is 0"));
121 	return(uppervp);
122 }
123 
124 static __inline
125 void
126 union_unlock_upper(struct vnode *uppervp, struct thread *td)
127 {
128 	vput(uppervp);
129 }
130 
131 static __inline
132 struct vnode *
133 union_lock_other(struct union_node *un, struct thread *td)
134 {
135 	struct vnode *vp;
136 
137 	if (un->un_uppervp != NULL) {
138 		vp = union_lock_upper(un, td);
139 	} else if ((vp = un->un_lowervp) != NULL) {
140 		VREF(vp);
141 		vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
142 	}
143 	return(vp);
144 }
145 
146 static __inline
147 void
148 union_unlock_other(struct vnode *vp, struct thread *td)
149 {
150 	vput(vp);
151 }
152 
153 /*
154  *	union_lookup:
155  *
156  *	udvp	must be exclusively locked on call and will remain
157  *		exclusively locked on return.  This is the mount point
158  *		for out filesystem.
159  *
160  *	dvp	Our base directory, locked and referenced.
161  *		The passed dvp will be dereferenced and unlocked on return
162  *		and a new dvp will be returned which is locked and
163  *		referenced in the same variable.
164  *
165  *	vpp	is filled in with the result if no error occured,
166  *		locked and ref'd.
167  *
168  *		If an error is returned, *vpp is set to NULLVP.  If no
169  *		error occurs, *vpp is returned with a reference and an
170  *		exclusive lock.
171  */
172 
173 static int
174 union_lookup1(udvp, pdvp, vpp, cnp)
175 	struct vnode *udvp;
176 	struct vnode **pdvp;
177 	struct vnode **vpp;
178 	struct componentname *cnp;
179 {
180 	int error;
181 	struct thread *td = cnp->cn_thread;
182 	struct vnode *dvp = *pdvp;
183 	struct vnode *tdvp;
184 	struct mount *mp;
185 
186 	/*
187 	 * If stepping up the directory tree, check for going
188 	 * back across the mount point, in which case do what
189 	 * lookup would do by stepping back down the mount
190 	 * hierarchy.
191 	 */
192 	if (cnp->cn_flags & ISDOTDOT) {
193 		while ((dvp != udvp) && (dvp->v_flag & VROOT)) {
194 			/*
195 			 * Don't do the NOCROSSMOUNT check
196 			 * at this level.  By definition,
197 			 * union fs deals with namespaces, not
198 			 * filesystems.
199 			 */
200 			tdvp = dvp;
201 			dvp = dvp->v_mount->mnt_vnodecovered;
202 			VREF(dvp);
203 			vput(tdvp);
204 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
205 		}
206 	}
207 
208 	/*
209 	 * Set return dvp to be the upperdvp 'parent directory.
210 	 */
211 	*pdvp = dvp;
212 
213 	/*
214 	 * If the VOP_LOOKUP call generates an error, tdvp is invalid and no
215 	 * changes will have been made to dvp, so we are set to return.
216 	 */
217 
218         error = VOP_LOOKUP(dvp, &tdvp, cnp);
219 	if (error) {
220 		UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
221 		*vpp = NULL;
222 		return (error);
223 	}
224 
225 	/*
226 	 * The parent directory will have been unlocked, unless lookup
227 	 * found the last component or if dvp == tdvp (tdvp must be locked).
228 	 *
229 	 * We want our dvp to remain locked and ref'd.  We also want tdvp
230 	 * to remain locked and ref'd.
231 	 */
232 	UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
233 
234 	if (dvp != tdvp && (cnp->cn_flags & ISLASTCN) == 0)
235 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
236 
237 	/*
238 	 * Lastly check if the current node is a mount point in
239 	 * which case walk up the mount hierarchy making sure not to
240 	 * bump into the root of the mount tree (ie. dvp != udvp).
241 	 *
242 	 * We use dvp as a temporary variable here, it is no longer related
243 	 * to the dvp above.  However, we have to ensure that both *pdvp and
244 	 * tdvp are locked on return.
245 	 */
246 
247 	dvp = tdvp;
248 	while (
249 	    dvp != udvp &&
250 	    (dvp->v_type == VDIR) &&
251 	    (mp = dvp->v_mountedhere)
252 	) {
253 		int relock_pdvp = 0;
254 
255 		if (vfs_busy(mp, 0, 0, td))
256 			continue;
257 
258 		if (dvp == *pdvp)
259 			relock_pdvp = 1;
260 		vput(dvp);
261 		dvp = NULL;
262 		error = VFS_ROOT(mp, &dvp);
263 
264 		vfs_unbusy(mp, td);
265 
266 		if (relock_pdvp)
267 			vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY, td);
268 
269 		if (error) {
270 			*vpp = NULL;
271 			return (error);
272 		}
273 	}
274 	*vpp = dvp;
275 	return (0);
276 }
277 
278 static int
279 union_lookup(ap)
280 	struct vop_lookup_args /* {
281 		struct vnodeop_desc *a_desc;
282 		struct vnode *a_dvp;
283 		struct vnode **a_vpp;
284 		struct componentname *a_cnp;
285 	} */ *ap;
286 {
287 	int error;
288 	int uerror, lerror;
289 	struct vnode *uppervp, *lowervp;
290 	struct vnode *upperdvp, *lowerdvp;
291 	struct vnode *dvp = ap->a_dvp;		/* starting dir */
292 	struct union_node *dun = VTOUNION(dvp);	/* associated union node */
293 	struct componentname *cnp = ap->a_cnp;
294 	struct thread *td = cnp->cn_thread;
295 	int lockparent = cnp->cn_flags & LOCKPARENT;
296 	struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
297 	struct ucred *saved_cred = NULL;
298 	int iswhiteout;
299 	struct vattr va;
300 
301 	*ap->a_vpp = NULLVP;
302 
303 	/*
304 	 * Disallow write attemps to the filesystem mounted read-only.
305 	 */
306 	if ((cnp->cn_flags & ISLASTCN) &&
307 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
308 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
309 		return (EROFS);
310 	}
311 
312 	/*
313 	 * For any lookup's we do, always return with the parent locked
314 	 */
315 	cnp->cn_flags |= LOCKPARENT;
316 
317 	lowerdvp = dun->un_lowervp;
318 	uppervp = NULLVP;
319 	lowervp = NULLVP;
320 	iswhiteout = 0;
321 
322 	uerror = ENOENT;
323 	lerror = ENOENT;
324 
325 	/*
326 	 * Get a private lock on uppervp and a reference, effectively
327 	 * taking it out of the union_node's control.
328 	 *
329 	 * We must lock upperdvp while holding our lock on dvp
330 	 * to avoid a deadlock.
331 	 */
332 	upperdvp = union_lock_upper(dun, td);
333 
334 	/*
335 	 * do the lookup in the upper level.
336 	 * if that level comsumes additional pathnames,
337 	 * then assume that something special is going
338 	 * on and just return that vnode.
339 	 */
340 	if (upperdvp != NULLVP) {
341 		/*
342 		 * We do not have to worry about the DOTDOT case, we've
343 		 * already unlocked dvp.
344 		 */
345 		UDEBUG(("A %p\n", upperdvp));
346 
347 		/*
348 		 * Do the lookup.   We must supply a locked and referenced
349 		 * upperdvp to the function and will get a new locked and
350 		 * referenced upperdvp back with the old having been
351 		 * dereferenced.
352 		 *
353 		 * If an error is returned, uppervp will be NULLVP.  If no
354 		 * error occurs, uppervp will be the locked and referenced
355 		 * return vnode or possibly NULL, depending on what is being
356 		 * requested.  It is possible that the returned uppervp
357 		 * will be the same as upperdvp.
358 		 */
359 		uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
360 		UDEBUG((
361 		    "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
362 		    uerror,
363 		    upperdvp,
364 		    upperdvp->v_usecount,
365 		    VOP_ISLOCKED(upperdvp, NULL),
366 		    uppervp,
367 		    (uppervp ? uppervp->v_usecount : -99),
368 		    (uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99)
369 		));
370 
371 		/*
372 		 * Disallow write attemps to the filesystem mounted read-only.
373 		 */
374 		if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) &&
375 		    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
376 		    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) {
377 			error = EROFS;
378 			goto out;
379 		}
380 
381 		/*
382 		 * Special case.  If cn_consume != 0 skip out.  The result
383 		 * of the lookup is transfered to our return variable.  If
384 		 * an error occured we have to throw away the results.
385 		 */
386 
387 		if (cnp->cn_consume != 0) {
388 			if ((error = uerror) == 0) {
389 				*ap->a_vpp = uppervp;
390 				uppervp = NULL;
391 			}
392 			goto out;
393 		}
394 
395 		/*
396 		 * Calculate whiteout, fall through
397 		 */
398 
399 		if (uerror == ENOENT || uerror == EJUSTRETURN) {
400 			if (cnp->cn_flags & ISWHITEOUT) {
401 				iswhiteout = 1;
402 			} else if (lowerdvp != NULLVP) {
403 				int terror;
404 
405 				terror = VOP_GETATTR(upperdvp, &va,
406 					cnp->cn_cred, cnp->cn_thread);
407 				if (terror == 0 && (va.va_flags & OPAQUE))
408 					iswhiteout = 1;
409 			}
410 		}
411 	}
412 
413 	/*
414 	 * in a similar way to the upper layer, do the lookup
415 	 * in the lower layer.   this time, if there is some
416 	 * component magic going on, then vput whatever we got
417 	 * back from the upper layer and return the lower vnode
418 	 * instead.
419 	 */
420 
421 	if (lowerdvp != NULLVP && !iswhiteout) {
422 		int nameiop;
423 
424 		UDEBUG(("B %p\n", lowerdvp));
425 
426 		/*
427 		 * Force only LOOKUPs on the lower node, since
428 		 * we won't be making changes to it anyway.
429 		 */
430 		nameiop = cnp->cn_nameiop;
431 		cnp->cn_nameiop = LOOKUP;
432 		if (um->um_op == UNMNT_BELOW) {
433 			saved_cred = cnp->cn_cred;
434 			cnp->cn_cred = um->um_cred;
435 		}
436 
437 		/*
438 		 * We shouldn't have to worry about locking interactions
439 		 * between the lower layer and our union layer (w.r.t.
440 		 * `..' processing) because we don't futz with lowervp
441 		 * locks in the union-node instantiation code path.
442 		 *
443 		 * union_lookup1() requires lowervp to be locked on entry,
444 		 * and it will be unlocked on return.  The ref count will
445 		 * not change.  On return lowervp doesn't represent anything
446 		 * to us so we NULL it out.
447 		 */
448 		VREF(lowerdvp);
449 		vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, td);
450 		lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
451 		if (lowerdvp == lowervp)
452 			vrele(lowerdvp);
453 		else
454 			vput(lowerdvp);
455 		lowerdvp = NULL;	/* lowerdvp invalid after vput */
456 
457 		if (um->um_op == UNMNT_BELOW)
458 			cnp->cn_cred = saved_cred;
459 		cnp->cn_nameiop = nameiop;
460 
461 		if (cnp->cn_consume != 0 || lerror == EACCES) {
462 			if ((error = lerror) == 0) {
463 				*ap->a_vpp = lowervp;
464 				lowervp = NULL;
465 			}
466 			goto out;
467 		}
468 	} else {
469 		UDEBUG(("C %p\n", lowerdvp));
470 		if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
471 			if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
472 				VREF(lowervp);
473 				vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, td);
474 				lerror = 0;
475 			}
476 		}
477 	}
478 
479 	/*
480 	 * Ok.  Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
481 	 *
482 	 * 1. If both layers returned an error, select the upper layer.
483 	 *
484 	 * 2. If the upper layer faile and the bottom layer succeeded,
485 	 *    two subcases occur:
486 	 *
487 	 *	a.  The bottom vnode is not a directory, in which case
488 	 *	    just return a new union vnode referencing an
489 	 *	    empty top layer and the existing bottom layer.
490 	 *
491 	 *	b.  The button vnode is a directory, in which case
492 	 *	    create a new directory in the top layer and
493 	 *	    and fall through to case 3.
494 	 *
495 	 * 3. If the top layer succeeded then return a new union
496 	 *    vnode referencing whatever the new top layer and
497 	 *    whatever the bottom layer returned.
498 	 */
499 
500 	/* case 1. */
501 	if ((uerror != 0) && (lerror != 0)) {
502 		error = uerror;
503 		goto out;
504 	}
505 
506 	/* case 2. */
507 	if (uerror != 0 /* && (lerror == 0) */ ) {
508 		if (lowervp->v_type == VDIR) { /* case 2b. */
509 			KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
510 			/*
511 			 * oops, uppervp has a problem, we may have to shadow.
512 			 */
513 			uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
514 			if (uerror) {
515 				error = uerror;
516 				goto out;
517 			}
518 		}
519 	}
520 
521 	/*
522 	 * Must call union_allocvp with both the upper and lower vnodes
523 	 * referenced and the upper vnode locked.   ap->a_vpp is returned
524 	 * referenced and locked.  lowervp, uppervp, and upperdvp are
525 	 * absorbed by union_allocvp() whether it succeeds or fails.
526 	 *
527 	 * upperdvp is the parent directory of uppervp which may be
528 	 * different, depending on the path, from dvp->un_uppervp.  That's
529 	 * why it is a separate argument.  Note that it must be unlocked.
530 	 *
531 	 * dvp must be locked on entry to the call and will be locked on
532 	 * return.
533 	 */
534 
535 	if (uppervp && uppervp != upperdvp)
536 		VOP_UNLOCK(uppervp, 0, td);
537 	if (lowervp)
538 		VOP_UNLOCK(lowervp, 0, td);
539 	if (upperdvp)
540 		VOP_UNLOCK(upperdvp, 0, td);
541 
542 	error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
543 			      uppervp, lowervp, 1);
544 
545 	UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? ((*ap->a_vpp)->v_usecount) : -99));
546 
547 	uppervp = NULL;
548 	upperdvp = NULL;
549 	lowervp = NULL;
550 
551 	/*
552 	 *	Termination Code
553 	 *
554 	 *	- put away any extra junk laying around.  Note that lowervp
555 	 *	  (if not NULL) will never be the same as *ap->a_vp and
556 	 *	  neither will uppervp, because when we set that state we
557 	 *	  NULL-out lowervp or uppervp.  On the otherhand, upperdvp
558 	 *	  may match uppervp or *ap->a_vpp.
559 	 *
560 	 *	- relock/unlock dvp if appropriate.
561 	 */
562 
563 out:
564 	if (upperdvp) {
565 		if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
566 			vrele(upperdvp);
567 		else
568 			vput(upperdvp);
569 	}
570 
571 	if (uppervp)
572 		vput(uppervp);
573 
574 	if (lowervp)
575 		vput(lowervp);
576 
577 	/*
578 	 * Restore LOCKPARENT state
579 	 */
580 
581 	if (!lockparent)
582 		cnp->cn_flags &= ~LOCKPARENT;
583 
584 	UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
585 		((*ap->a_vpp) ? (*ap->a_vpp)->v_usecount : -99),
586 		lowervp, uppervp));
587 
588 	/*
589 	 * dvp lock state, determine whether to relock dvp.  dvp is expected
590 	 * to be locked on return if:
591 	 *
592 	 *	- there was an error (except not EJUSTRETURN), or
593 	 *	- we hit the last component and lockparent is true
594 	 *
595 	 * dvp_is_locked is the current state of the dvp lock, not counting
596 	 * the possibility that *ap->a_vpp == dvp (in which case it is locked
597 	 * anyway).  Note that *ap->a_vpp == dvp only if no error occured.
598 	 */
599 
600 	if (*ap->a_vpp != dvp) {
601 		if ((error == 0 || error == EJUSTRETURN) &&
602 		    (!lockparent || (cnp->cn_flags & ISLASTCN) == 0)) {
603 			VOP_UNLOCK(dvp, 0, td);
604 		}
605 	}
606 
607 	/*
608 	 * Diagnostics
609 	 */
610 
611 #ifdef DIAGNOSTIC
612 	if (cnp->cn_namelen == 1 &&
613 	    cnp->cn_nameptr[0] == '.' &&
614 	    *ap->a_vpp != dvp) {
615 		panic("union_lookup returning . (%p) not same as startdir (%p)", ap->a_vpp, dvp);
616 	}
617 #endif
618 
619 	return (error);
620 }
621 
622 /*
623  * 	union_create:
624  *
625  * a_dvp is locked on entry and remains locked on return.  a_vpp is returned
626  * locked if no error occurs, otherwise it is garbage.
627  */
628 
629 static int
630 union_create(ap)
631 	struct vop_create_args /* {
632 		struct vnode *a_dvp;
633 		struct vnode **a_vpp;
634 		struct componentname *a_cnp;
635 		struct vattr *a_vap;
636 	} */ *ap;
637 {
638 	struct union_node *dun = VTOUNION(ap->a_dvp);
639 	struct componentname *cnp = ap->a_cnp;
640 	struct thread *td = cnp->cn_thread;
641 	struct vnode *dvp;
642 	int error = EROFS;
643 
644 	if ((dvp = union_lock_upper(dun, td)) != NULL) {
645 		struct vnode *vp;
646 		struct mount *mp;
647 
648 		error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
649 		if (error == 0) {
650 			mp = ap->a_dvp->v_mount;
651 			VOP_UNLOCK(vp, 0, td);
652 			UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vp->v_usecount));
653 			error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
654 				cnp, vp, NULLVP, 1);
655 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
656 		}
657 		union_unlock_upper(dvp, td);
658 	}
659 	return (error);
660 }
661 
662 static int
663 union_whiteout(ap)
664 	struct vop_whiteout_args /* {
665 		struct vnode *a_dvp;
666 		struct componentname *a_cnp;
667 		int a_flags;
668 	} */ *ap;
669 {
670 	struct union_node *un = VTOUNION(ap->a_dvp);
671 	struct componentname *cnp = ap->a_cnp;
672 	struct vnode *uppervp;
673 	int error = EOPNOTSUPP;
674 
675 	if ((uppervp = union_lock_upper(un, cnp->cn_thread)) != NULLVP) {
676 		error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
677 		union_unlock_upper(uppervp, cnp->cn_thread);
678 	}
679 	return(error);
680 }
681 
682 /*
683  * 	union_mknod:
684  *
685  *	a_dvp is locked on entry and should remain locked on return.
686  *	a_vpp is garbagre whether an error occurs or not.
687  */
688 
689 static int
690 union_mknod(ap)
691 	struct vop_mknod_args /* {
692 		struct vnode *a_dvp;
693 		struct vnode **a_vpp;
694 		struct componentname *a_cnp;
695 		struct vattr *a_vap;
696 	} */ *ap;
697 {
698 	struct union_node *dun = VTOUNION(ap->a_dvp);
699 	struct componentname *cnp = ap->a_cnp;
700 	struct vnode *dvp;
701 	int error = EROFS;
702 
703 	if ((dvp = union_lock_upper(dun, cnp->cn_thread)) != NULL) {
704 		error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap);
705 		union_unlock_upper(dvp, cnp->cn_thread);
706 	}
707 	return (error);
708 }
709 
710 /*
711  *	union_open:
712  *
713  *	run open VOP.  When opening the underlying vnode we have to mimic
714  *	vn_open.  What we *really* need to do to avoid screwups if the
715  *	open semantics change is to call vn_open().  For example, ufs blows
716  *	up if you open a file but do not vmio it prior to writing.
717  */
718 
719 static int
720 union_open(ap)
721 	struct vop_open_args /* {
722 		struct vnodeop_desc *a_desc;
723 		struct vnode *a_vp;
724 		int a_mode;
725 		struct ucred *a_cred;
726 		struct thread *a_td;
727 	} */ *ap;
728 {
729 	struct union_node *un = VTOUNION(ap->a_vp);
730 	struct vnode *tvp;
731 	int mode = ap->a_mode;
732 	struct ucred *cred = ap->a_cred;
733 	struct thread *td = ap->a_td;
734 	int error = 0;
735 	int tvpisupper = 1;
736 
737 	/*
738 	 * If there is an existing upper vp then simply open that.
739 	 * The upper vp takes precedence over the lower vp.  When opening
740 	 * a lower vp for writing copy it to the uppervp and then open the
741 	 * uppervp.
742 	 *
743 	 * At the end of this section tvp will be left locked.
744 	 */
745 	if ((tvp = union_lock_upper(un, td)) == NULLVP) {
746 		/*
747 		 * If the lower vnode is being opened for writing, then
748 		 * copy the file contents to the upper vnode and open that,
749 		 * otherwise can simply open the lower vnode.
750 		 */
751 		tvp = un->un_lowervp;
752 		if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
753 			int docopy = !(mode & O_TRUNC);
754 			error = union_copyup(un, docopy, cred, td);
755 			tvp = union_lock_upper(un, td);
756 		} else {
757 			un->un_openl++;
758 			VREF(tvp);
759 			vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, td);
760 			tvpisupper = 0;
761 		}
762 	}
763 
764 	/*
765 	 * We are holding the correct vnode, open it
766 	 */
767 
768 	if (error == 0)
769 		error = VOP_OPEN(tvp, mode, cred, td);
770 
771 	/*
772 	 * Absolutely necessary or UFS will blowup
773 	 */
774         if (error == 0 && vn_canvmio(tvp) == TRUE) {
775                 error = vfs_object_create(tvp, td, cred);
776         }
777 
778 	/*
779 	 * Release any locks held
780 	 */
781 	if (tvpisupper) {
782 		if (tvp)
783 			union_unlock_upper(tvp, td);
784 	} else {
785 		vput(tvp);
786 	}
787 	return (error);
788 }
789 
790 /*
791  *	union_close:
792  *
793  *	It is unclear whether a_vp is passed locked or unlocked.  Whatever
794  *	the case we do not change it.
795  */
796 
797 static int
798 union_close(ap)
799 	struct vop_close_args /* {
800 		struct vnode *a_vp;
801 		int  a_fflag;
802 		struct ucred *a_cred;
803 		struct thread *a_td;
804 	} */ *ap;
805 {
806 	struct union_node *un = VTOUNION(ap->a_vp);
807 	struct vnode *vp;
808 
809 	if ((vp = un->un_uppervp) == NULLVP) {
810 #ifdef UNION_DIAGNOSTIC
811 		if (un->un_openl <= 0)
812 			panic("union: un_openl cnt");
813 #endif
814 		--un->un_openl;
815 		vp = un->un_lowervp;
816 	}
817 	ap->a_vp = vp;
818 	return (VCALL(vp, VOFFSET(vop_close), ap));
819 }
820 
821 /*
822  * Check access permission on the union vnode.
823  * The access check being enforced is to check
824  * against both the underlying vnode, and any
825  * copied vnode.  This ensures that no additional
826  * file permissions are given away simply because
827  * the user caused an implicit file copy.
828  */
829 static int
830 union_access(ap)
831 	struct vop_access_args /* {
832 		struct vnodeop_desc *a_desc;
833 		struct vnode *a_vp;
834 		int a_mode;
835 		struct ucred *a_cred;
836 		struct thread *a_td;
837 	} */ *ap;
838 {
839 	struct union_node *un = VTOUNION(ap->a_vp);
840 	struct thread *td = ap->a_td;
841 	int error = EACCES;
842 	struct vnode *vp;
843 
844 	/*
845 	 * Disallow write attempts on filesystems mounted read-only.
846 	 */
847 	if ((ap->a_mode & VWRITE) &&
848 	    (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
849 		switch (ap->a_vp->v_type) {
850 		case VREG:
851 		case VDIR:
852 		case VLNK:
853 			return (EROFS);
854 		default:
855 			break;
856 		}
857 	}
858 
859 	if ((vp = union_lock_upper(un, td)) != NULLVP) {
860 		ap->a_vp = vp;
861 		error = VCALL(vp, VOFFSET(vop_access), ap);
862 		union_unlock_upper(vp, td);
863 		return(error);
864 	}
865 
866 	if ((vp = un->un_lowervp) != NULLVP) {
867 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
868 		ap->a_vp = vp;
869 
870 		/*
871 		 * Remove VWRITE from a_mode if our mount point is RW, because
872 		 * we want to allow writes and lowervp may be read-only.
873 		 */
874 		if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
875 			ap->a_mode &= ~VWRITE;
876 
877 		error = VCALL(vp, VOFFSET(vop_access), ap);
878 		if (error == 0) {
879 			struct union_mount *um;
880 
881 			um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
882 
883 			if (um->um_op == UNMNT_BELOW) {
884 				ap->a_cred = um->um_cred;
885 				error = VCALL(vp, VOFFSET(vop_access), ap);
886 			}
887 		}
888 		VOP_UNLOCK(vp, 0, td);
889 	}
890 	return(error);
891 }
892 
893 /*
894  * We handle getattr only to change the fsid and
895  * track object sizes
896  *
897  * It's not clear whether VOP_GETATTR is to be
898  * called with the vnode locked or not.  stat() calls
899  * it with (vp) locked, and fstat calls it with
900  * (vp) unlocked.
901  *
902  * Because of this we cannot use our normal locking functions
903  * if we do not intend to lock the main a_vp node.  At the moment
904  * we are running without any specific locking at all, but beware
905  * to any programmer that care must be taken if locking is added
906  * to this function.
907  */
908 
909 static int
910 union_getattr(ap)
911 	struct vop_getattr_args /* {
912 		struct vnode *a_vp;
913 		struct vattr *a_vap;
914 		struct ucred *a_cred;
915 		struct thread *a_td;
916 	} */ *ap;
917 {
918 	int error;
919 	struct union_node *un = VTOUNION(ap->a_vp);
920 	struct vnode *vp;
921 	struct vattr *vap;
922 	struct vattr va;
923 
924 	/*
925 	 * Some programs walk the filesystem hierarchy by counting
926 	 * links to directories to avoid stat'ing all the time.
927 	 * This means the link count on directories needs to be "correct".
928 	 * The only way to do that is to call getattr on both layers
929 	 * and fix up the link count.  The link count will not necessarily
930 	 * be accurate but will be large enough to defeat the tree walkers.
931 	 */
932 
933 	vap = ap->a_vap;
934 
935 	if ((vp = un->un_uppervp) != NULLVP) {
936 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td);
937 		if (error)
938 			return (error);
939 		/* XXX isn't this dangerouso without a lock? */
940 		union_newsize(ap->a_vp, vap->va_size, VNOVAL);
941 	}
942 
943 	if (vp == NULLVP) {
944 		vp = un->un_lowervp;
945 	} else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
946 		vp = un->un_lowervp;
947 		vap = &va;
948 	} else {
949 		vp = NULLVP;
950 	}
951 
952 	if (vp != NULLVP) {
953 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td);
954 		if (error)
955 			return (error);
956 		/* XXX isn't this dangerous without a lock? */
957 		union_newsize(ap->a_vp, VNOVAL, vap->va_size);
958 	}
959 
960 	if ((vap != ap->a_vap) && (vap->va_type == VDIR))
961 		ap->a_vap->va_nlink += vap->va_nlink;
962 	return (0);
963 }
964 
965 static int
966 union_setattr(ap)
967 	struct vop_setattr_args /* {
968 		struct vnode *a_vp;
969 		struct vattr *a_vap;
970 		struct ucred *a_cred;
971 		struct thread *a_td;
972 	} */ *ap;
973 {
974 	struct union_node *un = VTOUNION(ap->a_vp);
975 	struct thread *td = ap->a_td;
976 	struct vattr *vap = ap->a_vap;
977 	struct vnode *uppervp;
978 	int error;
979 
980 	/*
981 	 * Disallow write attempts on filesystems mounted read-only.
982 	 */
983 	if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
984 	    (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
985 	     vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
986 	     vap->va_mtime.tv_sec != VNOVAL ||
987 	     vap->va_mode != (mode_t)VNOVAL)) {
988 		return (EROFS);
989 	}
990 
991 	/*
992 	 * Handle case of truncating lower object to zero size,
993 	 * by creating a zero length upper object.  This is to
994 	 * handle the case of open with O_TRUNC and O_CREAT.
995 	 */
996 	if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
997 		error = union_copyup(un, (ap->a_vap->va_size != 0),
998 			    ap->a_cred, ap->a_td);
999 		if (error)
1000 			return (error);
1001 	}
1002 
1003 	/*
1004 	 * Try to set attributes in upper layer,
1005 	 * otherwise return read-only filesystem error.
1006 	 */
1007 	error = EROFS;
1008 	if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1009 		error = VOP_SETATTR(un->un_uppervp, ap->a_vap,
1010 					ap->a_cred, ap->a_td);
1011 		if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
1012 			union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
1013 		union_unlock_upper(uppervp, td);
1014 	}
1015 	return (error);
1016 }
1017 
1018 static int
1019 union_read(ap)
1020 	struct vop_read_args /* {
1021 		struct vnode *a_vp;
1022 		struct uio *a_uio;
1023 		int  a_ioflag;
1024 		struct ucred *a_cred;
1025 	} */ *ap;
1026 {
1027 	struct union_node *un = VTOUNION(ap->a_vp);
1028 	struct thread *td = ap->a_uio->uio_td;
1029 	struct vnode *uvp;
1030 	int error;
1031 
1032 	uvp = union_lock_other(un, td);
1033 	KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1034 
1035 	error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1036 	union_unlock_other(uvp, td);
1037 
1038 	/*
1039 	 * XXX
1040 	 * perhaps the size of the underlying object has changed under
1041 	 * our feet.  take advantage of the offset information present
1042 	 * in the uio structure.
1043 	 */
1044 	if (error == 0) {
1045 		struct union_node *un = VTOUNION(ap->a_vp);
1046 		off_t cur = ap->a_uio->uio_offset;
1047 
1048 		if (uvp == un->un_uppervp) {
1049 			if (cur > un->un_uppersz)
1050 				union_newsize(ap->a_vp, cur, VNOVAL);
1051 		} else {
1052 			if (cur > un->un_lowersz)
1053 				union_newsize(ap->a_vp, VNOVAL, cur);
1054 		}
1055 	}
1056 	return (error);
1057 }
1058 
1059 static int
1060 union_write(ap)
1061 	struct vop_read_args /* {
1062 		struct vnode *a_vp;
1063 		struct uio *a_uio;
1064 		int  a_ioflag;
1065 		struct ucred *a_cred;
1066 	} */ *ap;
1067 {
1068 	struct union_node *un = VTOUNION(ap->a_vp);
1069 	struct thread *td = ap->a_uio->uio_td;
1070 	struct vnode *uppervp;
1071 	int error;
1072 
1073 	if ((uppervp = union_lock_upper(un, td)) == NULLVP)
1074 		panic("union: missing upper layer in write");
1075 
1076 	error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1077 
1078 	/*
1079 	 * the size of the underlying object may be changed by the
1080 	 * write.
1081 	 */
1082 	if (error == 0) {
1083 		off_t cur = ap->a_uio->uio_offset;
1084 
1085 		if (cur > un->un_uppersz)
1086 			union_newsize(ap->a_vp, cur, VNOVAL);
1087 	}
1088 	union_unlock_upper(uppervp, td);
1089 	return (error);
1090 }
1091 
1092 static int
1093 union_lease(ap)
1094 	struct vop_lease_args /* {
1095 		struct vnode *a_vp;
1096 		struct thread *a_td;
1097 		struct ucred *a_cred;
1098 		int a_flag;
1099 	} */ *ap;
1100 {
1101 	struct vnode *ovp = OTHERVP(ap->a_vp);
1102 
1103 	ap->a_vp = ovp;
1104 	return (VCALL(ovp, VOFFSET(vop_lease), ap));
1105 }
1106 
1107 static int
1108 union_ioctl(ap)
1109 	struct vop_ioctl_args /* {
1110 		struct vnode *a_vp;
1111 		int  a_command;
1112 		caddr_t  a_data;
1113 		int  a_fflag;
1114 		struct ucred *a_cred;
1115 		struct thread *a_td;
1116 	} */ *ap;
1117 {
1118 	struct vnode *ovp = OTHERVP(ap->a_vp);
1119 
1120 	ap->a_vp = ovp;
1121 	return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1122 }
1123 
1124 static int
1125 union_poll(ap)
1126 	struct vop_poll_args /* {
1127 		struct vnode *a_vp;
1128 		int  a_events;
1129 		struct ucred *a_cred;
1130 		struct thread *a_td;
1131 	} */ *ap;
1132 {
1133 	struct vnode *ovp = OTHERVP(ap->a_vp);
1134 
1135 	ap->a_vp = ovp;
1136 	return (VCALL(ovp, VOFFSET(vop_poll), ap));
1137 }
1138 
1139 static int
1140 union_revoke(ap)
1141 	struct vop_revoke_args /* {
1142 		struct vnode *a_vp;
1143 		int a_flags;
1144 		struct thread *a_td;
1145 	} */ *ap;
1146 {
1147 	struct vnode *vp = ap->a_vp;
1148 
1149 	if (UPPERVP(vp))
1150 		VOP_REVOKE(UPPERVP(vp), ap->a_flags);
1151 	if (LOWERVP(vp))
1152 		VOP_REVOKE(LOWERVP(vp), ap->a_flags);
1153 	vgone(vp);
1154 	return (0);
1155 }
1156 
1157 static int
1158 union_fsync(ap)
1159 	struct vop_fsync_args /* {
1160 		struct vnode *a_vp;
1161 		struct ucred *a_cred;
1162 		int  a_waitfor;
1163 		struct thread *a_td;
1164 	} */ *ap;
1165 {
1166 	int error = 0;
1167 	struct thread *td = ap->a_td;
1168 	struct vnode *targetvp;
1169 	struct union_node *un = VTOUNION(ap->a_vp);
1170 
1171 	if ((targetvp = union_lock_other(un, td)) != NULLVP) {
1172 		error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, td);
1173 		union_unlock_other(targetvp, td);
1174 	}
1175 
1176 	return (error);
1177 }
1178 
1179 /*
1180  *	union_remove:
1181  *
1182  *	Remove the specified cnp.  The dvp and vp are passed to us locked
1183  *	and must remain locked on return.
1184  */
1185 
1186 static int
1187 union_remove(ap)
1188 	struct vop_remove_args /* {
1189 		struct vnode *a_dvp;
1190 		struct vnode *a_vp;
1191 		struct componentname *a_cnp;
1192 	} */ *ap;
1193 {
1194 	struct union_node *dun = VTOUNION(ap->a_dvp);
1195 	struct union_node *un = VTOUNION(ap->a_vp);
1196 	struct componentname *cnp = ap->a_cnp;
1197 	struct thread *td = cnp->cn_thread;
1198 	struct vnode *uppervp;
1199 	struct vnode *upperdvp;
1200 	int error;
1201 
1202 	if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1203 		panic("union remove: null upper vnode");
1204 
1205 	if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1206 		if (union_dowhiteout(un, cnp->cn_cred, td))
1207 			cnp->cn_flags |= DOWHITEOUT;
1208 		error = VOP_REMOVE(upperdvp, uppervp, cnp);
1209 #if 0
1210 		/* XXX */
1211 		if (!error)
1212 			union_removed_upper(un);
1213 #endif
1214 		union_unlock_upper(uppervp, td);
1215 	} else {
1216 		error = union_mkwhiteout(
1217 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1218 			    upperdvp, ap->a_cnp, un->un_path);
1219 	}
1220 	union_unlock_upper(upperdvp, td);
1221 	return (error);
1222 }
1223 
1224 /*
1225  *	union_link:
1226  *
1227  *	tdvp will be locked on entry, vp will not be locked on entry.
1228  *	tdvp should remain locked on return and vp should remain unlocked
1229  *	on return.
1230  */
1231 
1232 static int
1233 union_link(ap)
1234 	struct vop_link_args /* {
1235 		struct vnode *a_tdvp;
1236 		struct vnode *a_vp;
1237 		struct componentname *a_cnp;
1238 	} */ *ap;
1239 {
1240 	struct componentname *cnp = ap->a_cnp;
1241 	struct thread *td = cnp->cn_thread;
1242 	struct union_node *dun = VTOUNION(ap->a_tdvp);
1243 	struct vnode *vp;
1244 	struct vnode *tdvp;
1245 	int error = 0;
1246 
1247 	if (ap->a_tdvp->v_op != ap->a_vp->v_op) {
1248 		vp = ap->a_vp;
1249 	} else {
1250 		struct union_node *tun = VTOUNION(ap->a_vp);
1251 
1252 		if (tun->un_uppervp == NULLVP) {
1253 			vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, td);
1254 #if 0
1255 			if (dun->un_uppervp == tun->un_dirvp) {
1256 				if (dun->un_flags & UN_ULOCK) {
1257 					dun->un_flags &= ~UN_ULOCK;
1258 					VOP_UNLOCK(dun->un_uppervp, 0, td);
1259 				}
1260 			}
1261 #endif
1262 			error = union_copyup(tun, 1, cnp->cn_cred, td);
1263 #if 0
1264 			if (dun->un_uppervp == tun->un_dirvp) {
1265 				vn_lock(dun->un_uppervp,
1266 					    LK_EXCLUSIVE | LK_RETRY, td);
1267 				dun->un_flags |= UN_ULOCK;
1268 			}
1269 #endif
1270 			VOP_UNLOCK(ap->a_vp, 0, td);
1271 		}
1272 		vp = tun->un_uppervp;
1273 	}
1274 
1275 	if (error)
1276 		return (error);
1277 
1278 	/*
1279 	 * Make sure upper is locked, then unlock the union directory we were
1280 	 * called with to avoid a deadlock while we are calling VOP_LINK on
1281 	 * the upper (with tdvp locked and vp not locked).  Our ap->a_tdvp
1282 	 * is expected to be locked on return.
1283 	 */
1284 
1285 	if ((tdvp = union_lock_upper(dun, td)) == NULLVP)
1286 		return (EROFS);
1287 
1288 	VOP_UNLOCK(ap->a_tdvp, 0, td);		/* unlock calling node */
1289 	error = VOP_LINK(tdvp, vp, cnp);	/* call link on upper */
1290 
1291 	/*
1292 	 * We have to unlock tdvp prior to relocking our calling node in
1293 	 * order to avoid a deadlock.
1294 	 */
1295 	union_unlock_upper(tdvp, td);
1296 	vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY, td);
1297 	return (error);
1298 }
1299 
1300 static int
1301 union_rename(ap)
1302 	struct vop_rename_args  /* {
1303 		struct vnode *a_fdvp;
1304 		struct vnode *a_fvp;
1305 		struct componentname *a_fcnp;
1306 		struct vnode *a_tdvp;
1307 		struct vnode *a_tvp;
1308 		struct componentname *a_tcnp;
1309 	} */ *ap;
1310 {
1311 	int error;
1312 	struct vnode *fdvp = ap->a_fdvp;
1313 	struct vnode *fvp = ap->a_fvp;
1314 	struct vnode *tdvp = ap->a_tdvp;
1315 	struct vnode *tvp = ap->a_tvp;
1316 
1317 	/*
1318 	 * Figure out what fdvp to pass to our upper or lower vnode.  If we
1319 	 * replace the fdvp, release the original one and ref the new one.
1320 	 */
1321 
1322 	if (fdvp->v_op == union_vnodeop_p) {	/* always true */
1323 		struct union_node *un = VTOUNION(fdvp);
1324 		if (un->un_uppervp == NULLVP) {
1325 			/*
1326 			 * this should never happen in normal
1327 			 * operation but might if there was
1328 			 * a problem creating the top-level shadow
1329 			 * directory.
1330 			 */
1331 			error = EXDEV;
1332 			goto bad;
1333 		}
1334 		fdvp = un->un_uppervp;
1335 		VREF(fdvp);
1336 		vrele(ap->a_fdvp);
1337 	}
1338 
1339 	/*
1340 	 * Figure out what fvp to pass to our upper or lower vnode.  If we
1341 	 * replace the fvp, release the original one and ref the new one.
1342 	 */
1343 
1344 	if (fvp->v_op == union_vnodeop_p) {	/* always true */
1345 		struct union_node *un = VTOUNION(fvp);
1346 #if 0
1347 		struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1348 #endif
1349 
1350 		if (un->un_uppervp == NULLVP) {
1351 			switch(fvp->v_type) {
1352 			case VREG:
1353 				vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread);
1354 				error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_thread);
1355 				VOP_UNLOCK(un->un_vnode, 0, ap->a_fcnp->cn_thread);
1356 				if (error)
1357 					goto bad;
1358 				break;
1359 			case VDIR:
1360 				/*
1361 				 * XXX not yet.
1362 				 *
1363 				 * There is only one way to rename a directory
1364 				 * based in the lowervp, and that is to copy
1365 				 * the entire directory hierarchy.  Otherwise
1366 				 * it would not last across a reboot.
1367 				 */
1368 #if 0
1369 				vrele(fvp);
1370 				fvp = NULL;
1371 				vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread);
1372 				error = union_mkshadow(um, fdvp,
1373 					    ap->a_fcnp, &un->un_uppervp);
1374 				VOP_UNLOCK(fdvp, 0, ap->a_fcnp->cn_thread);
1375 				if (un->un_uppervp)
1376 					VOP_UNLOCK(un->un_uppervp, 0, ap->a_fcnp->cn_thread);
1377 				if (error)
1378 					goto bad;
1379 				break;
1380 #endif
1381 			default:
1382 				error = EXDEV;
1383 				goto bad;
1384 			}
1385 		}
1386 
1387 		if (un->un_lowervp != NULLVP)
1388 			ap->a_fcnp->cn_flags |= DOWHITEOUT;
1389 		fvp = un->un_uppervp;
1390 		VREF(fvp);
1391 		vrele(ap->a_fvp);
1392 	}
1393 
1394 	/*
1395 	 * Figure out what tdvp (destination directory) to pass to the
1396 	 * lower level.  If we replace it with uppervp, we need to vput the
1397 	 * old one.  The exclusive lock is transfered to what we will pass
1398 	 * down in the VOP_RENAME and we replace uppervp with a simple
1399 	 * reference.
1400 	 */
1401 
1402 	if (tdvp->v_op == union_vnodeop_p) {
1403 		struct union_node *un = VTOUNION(tdvp);
1404 
1405 		if (un->un_uppervp == NULLVP) {
1406 			/*
1407 			 * this should never happen in normal
1408 			 * operation but might if there was
1409 			 * a problem creating the top-level shadow
1410 			 * directory.
1411 			 */
1412 			error = EXDEV;
1413 			goto bad;
1414 		}
1415 
1416 		/*
1417 		 * new tdvp is a lock and reference on uppervp, put away
1418 		 * the old tdvp.
1419 		 */
1420 		tdvp = union_lock_upper(un, ap->a_tcnp->cn_thread);
1421 		vput(ap->a_tdvp);
1422 	}
1423 
1424 	/*
1425 	 * Figure out what tvp (destination file) to pass to the
1426 	 * lower level.
1427 	 *
1428 	 * If the uppervp file does not exist put away the (wrong)
1429 	 * file and change tvp to NULL.
1430 	 */
1431 
1432 	if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) {
1433 		struct union_node *un = VTOUNION(tvp);
1434 
1435 		tvp = union_lock_upper(un, ap->a_tcnp->cn_thread);
1436 		vput(ap->a_tvp);
1437 		/* note: tvp may be NULL */
1438 	}
1439 
1440 	/*
1441 	 * VOP_RENAME releases/vputs prior to returning, so we have no
1442 	 * cleanup to do.
1443 	 */
1444 
1445 	return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1446 
1447 	/*
1448 	 * Error.  We still have to release / vput the various elements.
1449 	 */
1450 
1451 bad:
1452 	vrele(fdvp);
1453 	if (fvp)
1454 		vrele(fvp);
1455 	vput(tdvp);
1456 	if (tvp != NULLVP) {
1457 		if (tvp != tdvp)
1458 			vput(tvp);
1459 		else
1460 			vrele(tvp);
1461 	}
1462 	return (error);
1463 }
1464 
1465 static int
1466 union_mkdir(ap)
1467 	struct vop_mkdir_args /* {
1468 		struct vnode *a_dvp;
1469 		struct vnode **a_vpp;
1470 		struct componentname *a_cnp;
1471 		struct vattr *a_vap;
1472 	} */ *ap;
1473 {
1474 	struct union_node *dun = VTOUNION(ap->a_dvp);
1475 	struct componentname *cnp = ap->a_cnp;
1476 	struct thread *td = cnp->cn_thread;
1477 	struct vnode *upperdvp;
1478 	int error = EROFS;
1479 
1480 	if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) {
1481 		struct vnode *vp;
1482 
1483 		error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
1484 		union_unlock_upper(upperdvp, td);
1485 
1486 		if (error == 0) {
1487 			VOP_UNLOCK(vp, 0, td);
1488 			UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vp->v_usecount));
1489 			error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1490 				ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1491 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
1492 		}
1493 	}
1494 	return (error);
1495 }
1496 
1497 static int
1498 union_rmdir(ap)
1499 	struct vop_rmdir_args /* {
1500 		struct vnode *a_dvp;
1501 		struct vnode *a_vp;
1502 		struct componentname *a_cnp;
1503 	} */ *ap;
1504 {
1505 	struct union_node *dun = VTOUNION(ap->a_dvp);
1506 	struct union_node *un = VTOUNION(ap->a_vp);
1507 	struct componentname *cnp = ap->a_cnp;
1508 	struct thread *td = cnp->cn_thread;
1509 	struct vnode *upperdvp;
1510 	struct vnode *uppervp;
1511 	int error;
1512 
1513 	if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1514 		panic("union rmdir: null upper vnode");
1515 
1516 	if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1517 		if (union_dowhiteout(un, cnp->cn_cred, td))
1518 			cnp->cn_flags |= DOWHITEOUT;
1519 		error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
1520 		union_unlock_upper(uppervp, td);
1521 	} else {
1522 		error = union_mkwhiteout(
1523 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1524 			    dun->un_uppervp, ap->a_cnp, un->un_path);
1525 	}
1526 	union_unlock_upper(upperdvp, td);
1527 	return (error);
1528 }
1529 
1530 /*
1531  *	union_symlink:
1532  *
1533  *	dvp is locked on entry and remains locked on return.  a_vpp is garbage
1534  *	(unused).
1535  */
1536 
1537 static int
1538 union_symlink(ap)
1539 	struct vop_symlink_args /* {
1540 		struct vnode *a_dvp;
1541 		struct vnode **a_vpp;
1542 		struct componentname *a_cnp;
1543 		struct vattr *a_vap;
1544 		char *a_target;
1545 	} */ *ap;
1546 {
1547 	struct union_node *dun = VTOUNION(ap->a_dvp);
1548 	struct componentname *cnp = ap->a_cnp;
1549 	struct thread *td = cnp->cn_thread;
1550 	struct vnode *dvp;
1551 	int error = EROFS;
1552 
1553 	if ((dvp = union_lock_upper(dun, td)) != NULLVP) {
1554 		error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1555 			    ap->a_target);
1556 		union_unlock_upper(dvp, td);
1557 	}
1558 	return (error);
1559 }
1560 
1561 /*
1562  * union_readdir works in concert with getdirentries and
1563  * readdir(3) to provide a list of entries in the unioned
1564  * directories.  getdirentries is responsible for walking
1565  * down the union stack.  readdir(3) is responsible for
1566  * eliminating duplicate names from the returned data stream.
1567  */
1568 static int
1569 union_readdir(ap)
1570 	struct vop_readdir_args /* {
1571 		struct vnode *a_vp;
1572 		struct uio *a_uio;
1573 		struct ucred *a_cred;
1574 		int *a_eofflag;
1575 		u_long *a_cookies;
1576 		int a_ncookies;
1577 	} */ *ap;
1578 {
1579 	struct union_node *un = VTOUNION(ap->a_vp);
1580 	struct thread *td = ap->a_uio->uio_td;
1581 	struct vnode *uvp;
1582 	int error = 0;
1583 
1584 	if ((uvp = union_lock_upper(un, td)) != NULLVP) {
1585 		ap->a_vp = uvp;
1586 		error = VCALL(uvp, VOFFSET(vop_readdir), ap);
1587 		union_unlock_upper(uvp, td);
1588 	}
1589 	return(error);
1590 }
1591 
1592 static int
1593 union_readlink(ap)
1594 	struct vop_readlink_args /* {
1595 		struct vnode *a_vp;
1596 		struct uio *a_uio;
1597 		struct ucred *a_cred;
1598 	} */ *ap;
1599 {
1600 	int error;
1601 	struct union_node *un = VTOUNION(ap->a_vp);
1602 	struct uio *uio = ap->a_uio;
1603 	struct thread *td = uio->uio_td;
1604 	struct vnode *vp;
1605 
1606 	vp = union_lock_other(un, td);
1607 	KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1608 
1609 	ap->a_vp = vp;
1610 	error = VCALL(vp, VOFFSET(vop_readlink), ap);
1611 	union_unlock_other(vp, td);
1612 
1613 	return (error);
1614 }
1615 
1616 static int
1617 union_getwritemount(ap)
1618 	struct vop_getwritemount_args /* {
1619 		struct vnode *a_vp;
1620 		struct mount **a_mpp;
1621 	} */ *ap;
1622 {
1623 	struct vnode *vp = ap->a_vp;
1624 	struct vnode *uvp = UPPERVP(vp);
1625 
1626 	if (uvp == NULL) {
1627 		VI_LOCK(vp);
1628 		if (vp->v_flag & VFREE) {
1629 			VI_UNLOCK(vp);
1630 			return (EOPNOTSUPP);
1631 		}
1632 		VI_UNLOCK(vp);
1633 		return (EACCES);
1634 	}
1635 	return(VOP_GETWRITEMOUNT(uvp, ap->a_mpp));
1636 }
1637 
1638 /*
1639  *	union_inactive:
1640  *
1641  *	Called with the vnode locked.  We are expected to unlock the vnode.
1642  */
1643 
1644 static int
1645 union_inactive(ap)
1646 	struct vop_inactive_args /* {
1647 		struct vnode *a_vp;
1648 		struct thread *a_td;
1649 	} */ *ap;
1650 {
1651 	struct vnode *vp = ap->a_vp;
1652 	struct thread *td = ap->a_td;
1653 	struct union_node *un = VTOUNION(vp);
1654 	struct vnode **vpp;
1655 
1656 	/*
1657 	 * Do nothing (and _don't_ bypass).
1658 	 * Wait to vrele lowervp until reclaim,
1659 	 * so that until then our union_node is in the
1660 	 * cache and reusable.
1661 	 *
1662 	 * NEEDSWORK: Someday, consider inactive'ing
1663 	 * the lowervp and then trying to reactivate it
1664 	 * with capabilities (v_id)
1665 	 * like they do in the name lookup cache code.
1666 	 * That's too much work for now.
1667 	 */
1668 
1669 	if (un->un_dircache != 0) {
1670 		for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1671 			vrele(*vpp);
1672 		free (un->un_dircache, M_TEMP);
1673 		un->un_dircache = 0;
1674 	}
1675 
1676 #if 0
1677 	if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1678 		un->un_flags &= ~UN_ULOCK;
1679 		VOP_UNLOCK(un->un_uppervp, 0, td);
1680 	}
1681 #endif
1682 
1683 	VOP_UNLOCK(vp, 0, td);
1684 
1685 	if ((un->un_flags & UN_CACHED) == 0)
1686 		vgone(vp);
1687 
1688 	return (0);
1689 }
1690 
1691 static int
1692 union_reclaim(ap)
1693 	struct vop_reclaim_args /* {
1694 		struct vnode *a_vp;
1695 	} */ *ap;
1696 {
1697 	union_freevp(ap->a_vp);
1698 
1699 	return (0);
1700 }
1701 
1702 static int
1703 union_lock(ap)
1704 	struct vop_lock_args *ap;
1705 {
1706 #if 0
1707 	struct vnode *vp = ap->a_vp;
1708 	struct thread *td = ap->a_td;
1709 	int flags = ap->a_flags;
1710 	struct union_node *un;
1711 #endif
1712 	int error;
1713 
1714 	error = vop_stdlock(ap);
1715 #if 0
1716 	un = VTOUNION(vp);
1717 
1718 	if (error == 0) {
1719 		/*
1720 		 * Lock the upper if it exists and this is an exclusive lock
1721 		 * request.
1722 		 */
1723 		if (un->un_uppervp != NULLVP &&
1724 		    (flags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1725 			if ((un->un_flags & UN_ULOCK) == 0 && vp->v_usecount) {
1726 				error = vn_lock(un->un_uppervp, flags, td);
1727 				if (error) {
1728 					struct vop_unlock_args uap = { 0 };
1729 					uap.a_vp = ap->a_vp;
1730 					uap.a_flags = ap->a_flags;
1731 					uap.a_td = ap->a_td;
1732 					vop_stdunlock(&uap);
1733 					return (error);
1734 				}
1735 				un->un_flags |= UN_ULOCK;
1736 			}
1737 		}
1738 	}
1739 #endif
1740 	return (error);
1741 }
1742 
1743 /*
1744  *	union_unlock:
1745  *
1746  *	Unlock our union node.  This also unlocks uppervp.
1747  */
1748 static int
1749 union_unlock(ap)
1750 	struct vop_unlock_args /* {
1751 		struct vnode *a_vp;
1752 		int a_flags;
1753 		struct thread *a_td;
1754 	} */ *ap;
1755 {
1756 #if 0
1757 	struct union_node *un = VTOUNION(ap->a_vp);
1758 #endif
1759 	int error;
1760 
1761 #if 0
1762 	KASSERT((un->un_uppervp == NULL || un->un_uppervp->v_usecount > 0), ("uppervp usecount is 0"));
1763 #endif
1764 
1765 	error = vop_stdunlock(ap);
1766 #if 0
1767 
1768 	/*
1769 	 * If no exclusive locks remain and we are holding an uppervp lock,
1770 	 * remove the uppervp lock.
1771 	 */
1772 
1773 	if ((un->un_flags & UN_ULOCK) &&
1774 	    lockstatus(&un->un_lock, NULL) != LK_EXCLUSIVE) {
1775 		un->un_flags &= ~UN_ULOCK;
1776 		VOP_UNLOCK(un->un_uppervp, LK_EXCLUSIVE, td);
1777 	}
1778 #endif
1779 	return(error);
1780 }
1781 
1782 /*
1783  * unionvp do not hold a VM object and there is no need to create one for
1784  * upper or lower vp because it is done in the union_open()
1785  */
1786 static int
1787 union_createvobject(ap)
1788 	struct vop_createvobject_args /* {
1789 		struct vnode *vp;
1790 		struct ucred *cred;
1791 		struct thread *td;
1792 	} */ *ap;
1793 {
1794 	struct vnode *vp = ap->a_vp;
1795 
1796 	vp->v_flag |= VOBJBUF;
1797 	return (0);
1798 }
1799 
1800 /*
1801  * We have nothing to destroy and this operation shouldn't be bypassed.
1802  */
1803 static int
1804 union_destroyvobject(ap)
1805 	struct vop_destroyvobject_args /* {
1806 		struct vnode *vp;
1807 	} */ *ap;
1808 {
1809 	struct vnode *vp = ap->a_vp;
1810 
1811 	vp->v_flag &= ~VOBJBUF;
1812 	return (0);
1813 }
1814 
1815 /*
1816  * Get VM object from the upper or lower vp
1817  */
1818 static int
1819 union_getvobject(ap)
1820 	struct vop_getvobject_args /* {
1821 		struct vnode *vp;
1822 		struct vm_object **objpp;
1823 	} */ *ap;
1824 {
1825 	struct vnode *ovp = OTHERVP(ap->a_vp);
1826 
1827 	if (ovp == NULL)
1828 		return EINVAL;
1829 	return (VOP_GETVOBJECT(ovp, ap->a_objpp));
1830 }
1831 
1832 static int
1833 union_print(ap)
1834 	struct vop_print_args /* {
1835 		struct vnode *a_vp;
1836 	} */ *ap;
1837 {
1838 	struct vnode *vp = ap->a_vp;
1839 
1840 	printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1841 			vp, UPPERVP(vp), LOWERVP(vp));
1842 	if (UPPERVP(vp) != NULLVP)
1843 		vprint("union: upper", UPPERVP(vp));
1844 	if (LOWERVP(vp) != NULLVP)
1845 		vprint("union: lower", LOWERVP(vp));
1846 
1847 	return (0);
1848 }
1849 
1850 static int
1851 union_pathconf(ap)
1852 	struct vop_pathconf_args /* {
1853 		struct vnode *a_vp;
1854 		int a_name;
1855 		int *a_retval;
1856 	} */ *ap;
1857 {
1858 	int error;
1859 	struct thread *td = curthread;		/* XXX */
1860 	struct union_node *un = VTOUNION(ap->a_vp);
1861 	struct vnode *vp;
1862 
1863 	vp = union_lock_other(un, td);
1864 	KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1865 
1866 	ap->a_vp = vp;
1867 	error = VCALL(vp, VOFFSET(vop_pathconf), ap);
1868 	union_unlock_other(vp, td);
1869 
1870 	return (error);
1871 }
1872 
1873 static int
1874 union_advlock(ap)
1875 	struct vop_advlock_args /* {
1876 		struct vnode *a_vp;
1877 		caddr_t  a_id;
1878 		int  a_op;
1879 		struct flock *a_fl;
1880 		int  a_flags;
1881 	} */ *ap;
1882 {
1883 	register struct vnode *ovp = OTHERVP(ap->a_vp);
1884 
1885 	ap->a_vp = ovp;
1886 	return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1887 }
1888 
1889 
1890 /*
1891  * XXX - vop_strategy must be hand coded because it has no
1892  * YYY - and it is not coherent with anything
1893  *
1894  * vnode in its arguments.
1895  * This goes away with a merged VM/buffer cache.
1896  */
1897 static int
1898 union_strategy(ap)
1899 	struct vop_strategy_args /* {
1900 		struct vnode *a_vp;
1901 		struct buf *a_bp;
1902 	} */ *ap;
1903 {
1904 	struct buf *bp = ap->a_bp;
1905 	struct vnode *othervp = OTHERVP(bp->b_vp);
1906 
1907 #ifdef DIAGNOSTIC
1908 	if (othervp == NULLVP)
1909 		panic("union_strategy: nil vp");
1910 	if ((bp->b_iocmd == BIO_WRITE) &&
1911 	    (othervp == LOWERVP(bp->b_vp)))
1912 		panic("union_strategy: writing to lowervp");
1913 #endif
1914 	return (VOP_STRATEGY(othervp, bp));
1915 }
1916 
1917 /*
1918  * Global vfs data structures
1919  */
1920 vop_t **union_vnodeop_p;
1921 static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
1922 	{ &vop_default_desc,		(vop_t *) vop_defaultop },
1923 	{ &vop_access_desc,		(vop_t *) union_access },
1924 	{ &vop_advlock_desc,		(vop_t *) union_advlock },
1925 	{ &vop_bmap_desc,		(vop_t *) vop_eopnotsupp },
1926 	{ &vop_close_desc,		(vop_t *) union_close },
1927 	{ &vop_create_desc,		(vop_t *) union_create },
1928 	{ &vop_createvobject_desc,	(vop_t *) union_createvobject },
1929 	{ &vop_destroyvobject_desc,	(vop_t *) union_destroyvobject },
1930 	{ &vop_fsync_desc,		(vop_t *) union_fsync },
1931 	{ &vop_getattr_desc,		(vop_t *) union_getattr },
1932 	{ &vop_getvobject_desc,		(vop_t *) union_getvobject },
1933 	{ &vop_inactive_desc,		(vop_t *) union_inactive },
1934 	{ &vop_ioctl_desc,		(vop_t *) union_ioctl },
1935 	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
1936 	{ &vop_lease_desc,		(vop_t *) union_lease },
1937 	{ &vop_link_desc,		(vop_t *) union_link },
1938 	{ &vop_lock_desc,		(vop_t *) union_lock },
1939 	{ &vop_lookup_desc,		(vop_t *) union_lookup },
1940 	{ &vop_mkdir_desc,		(vop_t *) union_mkdir },
1941 	{ &vop_mknod_desc,		(vop_t *) union_mknod },
1942 	{ &vop_open_desc,		(vop_t *) union_open },
1943 	{ &vop_pathconf_desc,		(vop_t *) union_pathconf },
1944 	{ &vop_poll_desc,		(vop_t *) union_poll },
1945 	{ &vop_print_desc,		(vop_t *) union_print },
1946 	{ &vop_read_desc,		(vop_t *) union_read },
1947 	{ &vop_readdir_desc,		(vop_t *) union_readdir },
1948 	{ &vop_readlink_desc,		(vop_t *) union_readlink },
1949 	{ &vop_getwritemount_desc,	(vop_t *) union_getwritemount },
1950 	{ &vop_reclaim_desc,		(vop_t *) union_reclaim },
1951 	{ &vop_remove_desc,		(vop_t *) union_remove },
1952 	{ &vop_rename_desc,		(vop_t *) union_rename },
1953 	{ &vop_revoke_desc,		(vop_t *) union_revoke },
1954 	{ &vop_rmdir_desc,		(vop_t *) union_rmdir },
1955 	{ &vop_setattr_desc,		(vop_t *) union_setattr },
1956 	{ &vop_strategy_desc,		(vop_t *) union_strategy },
1957 	{ &vop_symlink_desc,		(vop_t *) union_symlink },
1958 	{ &vop_unlock_desc,		(vop_t *) union_unlock },
1959 	{ &vop_whiteout_desc,		(vop_t *) union_whiteout },
1960 	{ &vop_write_desc,		(vop_t *) union_write },
1961 	{ NULL, NULL }
1962 };
1963 static struct vnodeopv_desc union_vnodeop_opv_desc =
1964 	{ &union_vnodeop_p, union_vnodeop_entries };
1965 
1966 VNODEOP_SET(union_vnodeop_opv_desc);
1967