xref: /freebsd/sys/fs/unionfs/union_vnops.c (revision 8847579c57d6aff2b3371c707dce7a2cee8389aa)
1 /*-
2  * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3  * Copyright (c) 1992, 1993, 1994, 1995
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)union_vnops.c	8.32 (Berkeley) 6/23/95
34  * $FreeBSD$
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/fcntl.h>
40 #include <sys/stat.h>
41 #include <sys/kernel.h>
42 #include <sys/vnode.h>
43 #include <sys/mount.h>
44 #include <sys/namei.h>
45 #include <sys/malloc.h>
46 #include <sys/bio.h>
47 #include <sys/buf.h>
48 #include <sys/lock.h>
49 #include <sys/sysctl.h>
50 #include <sys/unistd.h>
51 #include <sys/acl.h>
52 #include <sys/event.h>
53 #include <sys/extattr.h>
54 #include <fs/unionfs/union.h>
55 
56 #include <vm/vm.h>
57 #include <vm/vnode_pager.h>
58 
59 #include <vm/vm_page.h>
60 #include <vm/vm_object.h>
61 
62 int uniondebug = 0;
63 
64 #if UDEBUG_ENABLED
65 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
66 #else
67 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
68 #endif
69 
70 static vop_access_t	union_access;
71 static vop_aclcheck_t	union_aclcheck;
72 static vop_advlock_t	union_advlock;
73 static vop_close_t	union_close;
74 static vop_closeextattr_t	union_closeextattr;
75 static vop_create_t	union_create;
76 static vop_deleteextattr_t	union_deleteextattr;
77 static vop_fsync_t	union_fsync;
78 static vop_getacl_t	union_getacl;
79 static vop_getattr_t	union_getattr;
80 static vop_getextattr_t	union_getextattr;
81 static vop_inactive_t	union_inactive;
82 static vop_ioctl_t	union_ioctl;
83 static vop_lease_t	union_lease;
84 static vop_link_t	union_link;
85 static vop_listextattr_t	union_listextattr;
86 static vop_lookup_t	union_lookup;
87 static int	union_lookup1(struct vnode *udvp, struct vnode **dvp,
88 				   struct vnode **vpp,
89 				   struct componentname *cnp);
90 static vop_mkdir_t	union_mkdir;
91 static vop_mknod_t	union_mknod;
92 static vop_open_t	union_open;
93 static vop_openextattr_t	union_openextattr;
94 static vop_pathconf_t	union_pathconf;
95 static vop_print_t	union_print;
96 static vop_read_t	union_read;
97 static vop_readdir_t	union_readdir;
98 static vop_readlink_t	union_readlink;
99 static vop_getwritemount_t	union_getwritemount;
100 static vop_reclaim_t	union_reclaim;
101 static vop_remove_t	union_remove;
102 static vop_rename_t	union_rename;
103 static vop_rmdir_t	union_rmdir;
104 static vop_poll_t	union_poll;
105 static vop_setacl_t	union_setacl;
106 static vop_setattr_t	union_setattr;
107 static vop_setlabel_t	union_setlabel;
108 static vop_setextattr_t	union_setextattr;
109 static vop_strategy_t	union_strategy;
110 static vop_symlink_t	union_symlink;
111 static vop_whiteout_t	union_whiteout;
112 static vop_write_t	union_write;
113 
114 static __inline
115 struct vnode *
116 union_lock_upper(struct union_node *un, struct thread *td)
117 {
118 	struct vnode *uppervp;
119 
120 	if ((uppervp = un->un_uppervp) != NULL) {
121 		VREF(uppervp);
122 		vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
123 	}
124 	KASSERT((uppervp == NULL || vrefcnt(uppervp) > 0), ("uppervp usecount is 0"));
125 	return(uppervp);
126 }
127 
128 static __inline
129 void
130 union_unlock_upper(struct vnode *uppervp, struct thread *td)
131 {
132 	vput(uppervp);
133 }
134 
135 static __inline
136 struct vnode *
137 union_lock_other(struct union_node *un, struct thread *td)
138 {
139 	struct vnode *vp;
140 
141 	if (un->un_uppervp != NULL) {
142 		vp = union_lock_upper(un, td);
143 	} else if ((vp = un->un_lowervp) != NULL) {
144 		VREF(vp);
145 		vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
146 	}
147 	return(vp);
148 }
149 
150 static __inline
151 void
152 union_unlock_other(struct vnode *vp, struct thread *td)
153 {
154 	vput(vp);
155 }
156 
157 /*
158  *	union_lookup:
159  *
160  *	udvp	must be exclusively locked on call and will remain
161  *		exclusively locked on return.  This is the mount point
162  *		for our filesystem.
163  *
164  *	dvp	Our base directory, locked and referenced.
165  *		The passed dvp will be dereferenced and unlocked on return
166  *		and a new dvp will be returned which is locked and
167  *		referenced in the same variable.
168  *
169  *	vpp	is filled in with the result if no error occured,
170  *		locked and ref'd.
171  *
172  *		If an error is returned, *vpp is set to NULLVP.  If no
173  *		error occurs, *vpp is returned with a reference and an
174  *		exclusive lock.
175  */
176 
177 static int
178 union_lookup1(udvp, pdvp, vpp, cnp)
179 	struct vnode *udvp;
180 	struct vnode **pdvp;
181 	struct vnode **vpp;
182 	struct componentname *cnp;
183 {
184 	int error;
185 	struct thread *td = cnp->cn_thread;
186 	struct vnode *dvp = *pdvp;
187 	struct vnode *tdvp;
188 	struct mount *mp;
189 
190 	/*
191 	 * If stepping up the directory tree, check for going
192 	 * back across the mount point, in which case do what
193 	 * lookup would do by stepping back down the mount
194 	 * hierarchy.
195 	 */
196 	if (cnp->cn_flags & ISDOTDOT) {
197 		while ((dvp != udvp) && (dvp->v_vflag & VV_ROOT)) {
198 			/*
199 			 * Don't do the NOCROSSMOUNT check
200 			 * at this level.  By definition,
201 			 * union fs deals with namespaces, not
202 			 * filesystems.
203 			 */
204 			tdvp = dvp;
205 			dvp = dvp->v_mount->mnt_vnodecovered;
206 			VREF(dvp);
207 			vput(tdvp);
208 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
209 		}
210 	}
211 
212 	/*
213 	 * Set return dvp to be the upperdvp 'parent directory.
214 	 */
215 	*pdvp = dvp;
216 
217 	/*
218 	 * If the VOP_LOOKUP() call generates an error, tdvp is invalid and
219 	 * no changes will have been made to dvp, so we are set to return.
220 	 */
221 
222         error = VOP_LOOKUP(dvp, &tdvp, cnp);
223 	if (error) {
224 		UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
225 		*vpp = NULL;
226 		return (error);
227 	}
228 	UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
229 
230 	/*
231 	 * Lastly check if the current node is a mount point in
232 	 * which case walk up the mount hierarchy making sure not to
233 	 * bump into the root of the mount tree (ie. dvp != udvp).
234 	 *
235 	 * We use dvp as a temporary variable here, it is no longer related
236 	 * to the dvp above.  However, we have to ensure that both *pdvp and
237 	 * tdvp are locked on return.
238 	 */
239 
240 	dvp = tdvp;
241 	while (
242 	    dvp != udvp &&
243 	    (dvp->v_type == VDIR) &&
244 	    (mp = dvp->v_mountedhere)
245 	) {
246 		int relock_pdvp = 0;
247 
248 		if (vfs_busy(mp, 0, 0, td))
249 			continue;
250 
251 		if (dvp == *pdvp)
252 			relock_pdvp = 1;
253 		vput(dvp);
254 		dvp = NULL;
255 		error = VFS_ROOT(mp, LK_EXCLUSIVE, &dvp, td);
256 
257 		vfs_unbusy(mp, td);
258 
259 		if (relock_pdvp)
260 			vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY, td);
261 
262 		if (error) {
263 			*vpp = NULL;
264 			return (error);
265 		}
266 	}
267 	*vpp = dvp;
268 	return (0);
269 }
270 
271 static int
272 union_lookup(ap)
273 	struct vop_lookup_args /* {
274 		struct vnodeop_desc *a_desc;
275 		struct vnode *a_dvp;
276 		struct vnode **a_vpp;
277 		struct componentname *a_cnp;
278 	} */ *ap;
279 {
280 	int error;
281 	int uerror, lerror;
282 	struct vnode *uppervp, *lowervp;
283 	struct vnode *upperdvp, *lowerdvp;
284 	struct vnode *dvp = ap->a_dvp;		/* starting dir */
285 	struct union_node *dun = VTOUNION(dvp);	/* associated union node */
286 	struct componentname *cnp = ap->a_cnp;
287 	struct thread *td = cnp->cn_thread;
288 	struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
289 	struct ucred *saved_cred = NULL;
290 	int iswhiteout;
291 	struct vattr va;
292 
293 	*ap->a_vpp = NULLVP;
294 
295 	/*
296 	 * Disallow write attempts to the filesystem mounted read-only.
297 	 */
298 	if ((cnp->cn_flags & ISLASTCN) &&
299 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
300 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
301 		return (EROFS);
302 	}
303 
304 	/*
305 	 * For any lookups we do, always return with the parent locked.
306 	 */
307 	cnp->cn_flags |= LOCKPARENT;
308 
309 	lowerdvp = dun->un_lowervp;
310 	uppervp = NULLVP;
311 	lowervp = NULLVP;
312 	iswhiteout = 0;
313 
314 	uerror = ENOENT;
315 	lerror = ENOENT;
316 
317 	/*
318 	 * Get a private lock on uppervp and a reference, effectively
319 	 * taking it out of the union_node's control.
320 	 *
321 	 * We must lock upperdvp while holding our lock on dvp
322 	 * to avoid a deadlock.
323 	 */
324 	upperdvp = union_lock_upper(dun, td);
325 
326 	/*
327 	 * Do the lookup in the upper level.
328 	 * If that level consumes additional pathnames,
329 	 * then assume that something special is going
330 	 * on and just return that vnode.
331 	 */
332 	if (upperdvp != NULLVP) {
333 		/*
334 		 * We do not have to worry about the DOTDOT case, we've
335 		 * already unlocked dvp.
336 		 */
337 		UDEBUG(("A %p\n", upperdvp));
338 
339 		/*
340 		 * Do the lookup.   We must supply a locked and referenced
341 		 * upperdvp to the function and will get a new locked and
342 		 * referenced upperdvp back, with the old having been
343 		 * dereferenced.
344 		 *
345 		 * If an error is returned, uppervp will be NULLVP.  If no
346 		 * error occurs, uppervp will be the locked and referenced.
347 		 * Return vnode, or possibly NULL, depending on what is being
348 		 * requested.  It is possible that the returned uppervp
349 		 * will be the same as upperdvp.
350 		 */
351 		uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
352 		UDEBUG((
353 		    "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
354 		    uerror,
355 		    upperdvp,
356 		    vrefcnt(upperdvp),
357 		    VOP_ISLOCKED(upperdvp, NULL),
358 		    uppervp,
359 		    (uppervp ? vrefcnt(uppervp) : -99),
360 		    (uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99)
361 		));
362 
363 		/*
364 		 * Disallow write attempts to the filesystem mounted read-only.
365 		 */
366 		if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) &&
367 		    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
368 		    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) {
369 			error = EROFS;
370 			goto out;
371 		}
372 
373 		/*
374 		 * Special case: If cn_consume != 0 then skip out.  The result
375 		 * of the lookup is transfered to our return variable.  If
376 		 * an error occured we have to throw away the results.
377 		 */
378 
379 		if (cnp->cn_consume != 0) {
380 			if ((error = uerror) == 0) {
381 				*ap->a_vpp = uppervp;
382 				uppervp = NULL;
383 			}
384 			goto out;
385 		}
386 
387 		/*
388 		 * Calculate whiteout, fall through.
389 		 */
390 
391 		if (uerror == ENOENT || uerror == EJUSTRETURN) {
392 			if (cnp->cn_flags & ISWHITEOUT) {
393 				iswhiteout = 1;
394 			} else if (lowerdvp != NULLVP) {
395 				int terror;
396 
397 				terror = VOP_GETATTR(upperdvp, &va,
398 					cnp->cn_cred, cnp->cn_thread);
399 				if (terror == 0 && (va.va_flags & OPAQUE))
400 					iswhiteout = 1;
401 			}
402 		}
403 	}
404 
405 	/*
406 	 * In a similar way to the upper layer, do the lookup
407 	 * in the lower layer.   This time, if there is some
408 	 * component magic going on, then vput whatever we got
409 	 * back from the upper layer and return the lower vnode
410 	 * instead.
411 	 */
412 
413 	if (lowerdvp != NULLVP && !iswhiteout) {
414 		int nameiop;
415 
416 		UDEBUG(("B %p\n", lowerdvp));
417 
418 		/*
419 		 * Force only LOOKUPs on the lower node, since
420 		 * we won't be making changes to it anyway.
421 		 */
422 		nameiop = cnp->cn_nameiop;
423 		cnp->cn_nameiop = LOOKUP;
424 		if (um->um_op == UNMNT_BELOW) {
425 			saved_cred = cnp->cn_cred;
426 			cnp->cn_cred = um->um_cred;
427 		}
428 
429 		/*
430 		 * We shouldn't have to worry about locking interactions
431 		 * between the lower layer and our union layer (w.r.t.
432 		 * `..' processing) because we don't futz with lowervp
433 		 * locks in the union-node instantiation code path.
434 		 *
435 		 * union_lookup1() requires lowervp to be locked on entry,
436 		 * and it will be unlocked on return.  The ref count will
437 		 * not change.  On return lowervp doesn't represent anything
438 		 * to us so we NULL it out.
439 		 */
440 		VREF(lowerdvp);
441 		vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, td);
442 		lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
443 		if (lowerdvp == lowervp)
444 			vrele(lowerdvp);
445 		else
446 			vput(lowerdvp);
447 		lowerdvp = NULL;	/* lowerdvp invalid after vput */
448 
449 		if (um->um_op == UNMNT_BELOW)
450 			cnp->cn_cred = saved_cred;
451 		cnp->cn_nameiop = nameiop;
452 
453 		if (cnp->cn_consume != 0 || lerror == EACCES) {
454 			if ((error = lerror) == 0) {
455 				*ap->a_vpp = lowervp;
456 				lowervp = NULL;
457 			}
458 			goto out;
459 		}
460 	} else {
461 		UDEBUG(("C %p\n", lowerdvp));
462 		if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
463 			if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
464 				VREF(lowervp);
465 				vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, td);
466 				lerror = 0;
467 			}
468 		}
469 	}
470 
471 	/*
472 	 * Ok.  Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
473 	 *
474 	 * 1. If both layers returned an error, select the upper layer.
475 	 *
476 	 * 2. If the upper layer failed and the bottom layer succeeded,
477 	 *    two subcases occur:
478 	 *
479 	 *	a.  The bottom vnode is not a directory, in which case
480 	 *	    just return a new union vnode referencing an
481 	 *	    empty top layer and the existing bottom layer.
482 	 *
483 	 *	b.  The bottom vnode is a directory, in which case
484 	 *	    create a new directory in the top layer and
485 	 *	    and fall through to case 3.
486 	 *
487 	 * 3. If the top layer succeeded, then return a new union
488 	 *    vnode referencing whatever the new top layer and
489 	 *    whatever the bottom layer returned.
490 	 */
491 
492 	/* case 1. */
493 	if ((uerror != 0) && (lerror != 0)) {
494 		error = uerror;
495 		goto out;
496 	}
497 
498 	/* case 2. */
499 	if (uerror != 0 /* && (lerror == 0) */ ) {
500 		if (lowervp->v_type == VDIR) { /* case 2b. */
501 			KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
502 			/*
503 			 * Oops, uppervp has a problem, we may have to shadow.
504 			 */
505 			uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
506 			if (uerror) {
507 				error = uerror;
508 				goto out;
509 			}
510 		}
511 	}
512 
513 	/*
514 	 * Must call union_allocvp() with both the upper and lower vnodes
515 	 * referenced and the upper vnode locked.   ap->a_vpp is returned
516 	 * referenced and locked.  lowervp, uppervp, and upperdvp are
517 	 * absorbed by union_allocvp() whether it succeeds or fails.
518 	 *
519 	 * upperdvp is the parent directory of uppervp which may be
520 	 * different, depending on the path, from dvp->un_uppervp.  That's
521 	 * why it is a separate argument.  Note that it must be unlocked.
522 	 *
523 	 * dvp must be locked on entry to the call and will be locked on
524 	 * return.
525 	 */
526 
527 	if (uppervp && uppervp != upperdvp)
528 		VOP_UNLOCK(uppervp, 0, td);
529 	if (lowervp)
530 		VOP_UNLOCK(lowervp, 0, td);
531 	if (upperdvp)
532 		VOP_UNLOCK(upperdvp, 0, td);
533 
534 	error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
535 			      uppervp, lowervp, 1);
536 
537 	UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? vrefcnt(*ap->a_vpp) : -99));
538 
539 	uppervp = NULL;
540 	upperdvp = NULL;
541 	lowervp = NULL;
542 
543 	/*
544 	 *	Termination Code
545 	 *
546 	 *	- put away any extra junk laying around.  Note that lowervp
547 	 *	  (if not NULL) will never be the same as *ap->a_vp and
548 	 *	  neither will uppervp, because when we set that state we
549 	 *	  NULL-out lowervp or uppervp.  On the otherhand, upperdvp
550 	 *	  may match uppervp or *ap->a_vpp.
551 	 *
552 	 *	- relock/unlock dvp if appropriate.
553 	 */
554 
555 out:
556 	if (upperdvp) {
557 		if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
558 			vrele(upperdvp);
559 		else
560 			vput(upperdvp);
561 	}
562 
563 	if (uppervp)
564 		vput(uppervp);
565 
566 	if (lowervp)
567 		vput(lowervp);
568 
569 	UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
570 		((*ap->a_vpp) ? vrefcnt(*ap->a_vpp) : -99),
571 		lowervp, uppervp));
572 
573 	if (error == 0 || error == EJUSTRETURN) {
574 		if (cnp->cn_namelen == 1 &&
575 		    cnp->cn_nameptr[0] == '.' &&
576 		    *ap->a_vpp != dvp) {
577 #ifdef	DIAGNOSTIC
578 			vprint("union_lookup: vp", *ap->a_vpp);
579 			vprint("union_lookup: dvp", dvp);
580 #endif
581 			panic("union_lookup returning . (%p) != startdir (%p)",
582 			    *ap->a_vpp, dvp);
583 		}
584 	}
585 
586 	return (error);
587 }
588 
589 /*
590  * 	union_create:
591  *
592  * a_dvp is locked on entry and remains locked on return.  a_vpp is returned
593  * locked if no error occurs, otherwise it is garbage.
594  */
595 
596 static int
597 union_create(ap)
598 	struct vop_create_args /* {
599 		struct vnode *a_dvp;
600 		struct vnode **a_vpp;
601 		struct componentname *a_cnp;
602 		struct vattr *a_vap;
603 	} */ *ap;
604 {
605 	struct union_node *dun = VTOUNION(ap->a_dvp);
606 	struct componentname *cnp = ap->a_cnp;
607 	struct thread *td = cnp->cn_thread;
608 	struct vnode *dvp;
609 	int error = EROFS;
610 
611 	if ((dvp = union_lock_upper(dun, td)) != NULL) {
612 		struct vnode *vp;
613 		struct mount *mp;
614 
615 		error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
616 		if (error == 0) {
617 			mp = ap->a_dvp->v_mount;
618 			VOP_UNLOCK(vp, 0, td);
619 			UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vrefcnt(vp)));
620 			error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
621 				cnp, vp, NULLVP, 1);
622 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vrefcnt(vp)));
623 		}
624 		union_unlock_upper(dvp, td);
625 	}
626 	return (error);
627 }
628 
629 static int
630 union_whiteout(ap)
631 	struct vop_whiteout_args /* {
632 		struct vnode *a_dvp;
633 		struct componentname *a_cnp;
634 		int a_flags;
635 	} */ *ap;
636 {
637 	struct union_node *un = VTOUNION(ap->a_dvp);
638 	struct componentname *cnp = ap->a_cnp;
639 	struct vnode *uppervp;
640 	int error;
641 
642 	switch (ap->a_flags) {
643 	case CREATE:
644 	case DELETE:
645 		uppervp = union_lock_upper(un, cnp->cn_thread);
646 		if (uppervp != NULLVP) {
647 			error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
648 			union_unlock_upper(uppervp, cnp->cn_thread);
649 		} else
650 			error = EOPNOTSUPP;
651 		break;
652 	case LOOKUP:
653 		error = EOPNOTSUPP;
654 		break;
655 	default:
656 		panic("union_whiteout: unknown op");
657 	}
658 	return (error);
659 }
660 
661 /*
662  * 	union_mknod:
663  *
664  *	a_dvp is locked on entry and should remain locked on return.
665  *	a_vpp is garbage whether an error occurs or not.
666  */
667 
668 static int
669 union_mknod(ap)
670 	struct vop_mknod_args /* {
671 		struct vnode *a_dvp;
672 		struct vnode **a_vpp;
673 		struct componentname *a_cnp;
674 		struct vattr *a_vap;
675 	} */ *ap;
676 {
677 	struct union_node *dun = VTOUNION(ap->a_dvp);
678 	struct componentname *cnp = ap->a_cnp;
679 	struct vnode *dvp;
680 	int error = EROFS;
681 
682 	if ((dvp = union_lock_upper(dun, cnp->cn_thread)) != NULL) {
683 		error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap);
684 		union_unlock_upper(dvp, cnp->cn_thread);
685 	}
686 	return (error);
687 }
688 
689 /*
690  *	union_open:
691  *
692  *	run open VOP.  When opening the underlying vnode we have to mimic
693  *	vn_open().  What we *really* need to do to avoid screwups if the
694  *	open semantics change is to call vn_open().  For example, ufs blows
695  *	up if you open a file but do not vmio it prior to writing.
696  */
697 
698 static int
699 union_open(ap)
700 	struct vop_open_args /* {
701 		struct vnodeop_desc *a_desc;
702 		struct vnode *a_vp;
703 		int a_mode;
704 		struct ucred *a_cred;
705 		struct thread *a_td;
706 	} */ *ap;
707 {
708 	struct union_node *un = VTOUNION(ap->a_vp);
709 	struct vnode *tvp;
710 	int mode = ap->a_mode;
711 	struct ucred *cred = ap->a_cred;
712 	struct thread *td = ap->a_td;
713 	int error = 0;
714 	int tvpisupper = 1;
715 
716 	/*
717 	 * If there is an existing upper vp then simply open that.
718 	 * The upper vp takes precedence over the lower vp.  When opening
719 	 * a lower vp for writing copy it to the uppervp and then open the
720 	 * uppervp.
721 	 *
722 	 * At the end of this section tvp will be left locked.
723 	 */
724 	if ((tvp = union_lock_upper(un, td)) == NULLVP) {
725 		/*
726 		 * If the lower vnode is being opened for writing, then
727 		 * copy the file contents to the upper vnode and open that,
728 		 * otherwise can simply open the lower vnode.
729 		 */
730 		tvp = un->un_lowervp;
731 		if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
732 			int docopy = !(mode & O_TRUNC);
733 			error = union_copyup(un, docopy, cred, td);
734 			tvp = union_lock_upper(un, td);
735 		} else {
736 			un->un_openl++;
737 			VREF(tvp);
738 			vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, td);
739 			tvpisupper = 0;
740 		}
741 	}
742 
743 	/*
744 	 * We are holding the correct vnode, open it.
745 	 */
746 
747 	if (error == 0)
748 		error = VOP_OPEN(tvp, mode, cred, td, -1);
749 	if (error == 0)
750 		ap->a_vp->v_object = tvp->v_object;
751 	/*
752 	 * Release any locks held.
753 	 */
754 	if (tvpisupper) {
755 		if (tvp)
756 			union_unlock_upper(tvp, td);
757 	} else {
758 		vput(tvp);
759 	}
760 	return (error);
761 }
762 
763 /*
764  *	union_close:
765  *
766  *	It is unclear whether a_vp is passed locked or unlocked.  Whatever
767  *	the case we do not change it.
768  */
769 
770 static int
771 union_close(ap)
772 	struct vop_close_args /* {
773 		struct vnode *a_vp;
774 		int  a_fflag;
775 		struct ucred *a_cred;
776 		struct thread *a_td;
777 	} */ *ap;
778 {
779 	struct union_node *un = VTOUNION(ap->a_vp);
780 	struct vnode *vp;
781 
782 	if ((vp = un->un_uppervp) == NULLVP) {
783 #ifdef UNION_DIAGNOSTIC
784 		if (un->un_openl <= 0)
785 			panic("union: un_openl cnt");
786 #endif
787 		--un->un_openl;
788 		vp = un->un_lowervp;
789 	}
790 	ap->a_vp = vp;
791 	return (VOP_CLOSE_AP(ap));
792 }
793 
794 /*
795  * Check access permission on the union vnode.
796  * The access check being enforced is to check
797  * against both the underlying vnode, and any
798  * copied vnode.  This ensures that no additional
799  * file permissions are given away simply because
800  * the user caused an implicit file copy.
801  */
802 static int
803 union_access(ap)
804 	struct vop_access_args /* {
805 		struct vnodeop_desc *a_desc;
806 		struct vnode *a_vp;
807 		int a_mode;
808 		struct ucred *a_cred;
809 		struct thread *a_td;
810 	} */ *ap;
811 {
812 	struct union_node *un = VTOUNION(ap->a_vp);
813 	struct thread *td = ap->a_td;
814 	int error = EACCES;
815 	struct vnode *vp;
816 
817 	/*
818 	 * Disallow write attempts on filesystems mounted read-only.
819 	 */
820 	if ((ap->a_mode & VWRITE) &&
821 	    (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
822 		switch (ap->a_vp->v_type) {
823 		case VREG:
824 		case VDIR:
825 		case VLNK:
826 			return (EROFS);
827 		default:
828 			break;
829 		}
830 	}
831 
832 	if ((vp = union_lock_upper(un, td)) != NULLVP) {
833 		ap->a_vp = vp;
834 		error = VOP_ACCESS_AP(ap);
835 		union_unlock_upper(vp, td);
836 		return(error);
837 	}
838 
839 	if ((vp = un->un_lowervp) != NULLVP) {
840 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
841 		ap->a_vp = vp;
842 
843 		/*
844 		 * Remove VWRITE from a_mode if our mount point is RW, because
845 		 * we want to allow writes and lowervp may be read-only.
846 		 */
847 		if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
848 			ap->a_mode &= ~VWRITE;
849 
850 		error = VOP_ACCESS_AP(ap);
851 		if (error == 0) {
852 			struct union_mount *um;
853 
854 			um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
855 
856 			if (um->um_op == UNMNT_BELOW) {
857 				ap->a_cred = um->um_cred;
858 				error = VOP_ACCESS_AP(ap);
859 			}
860 		}
861 		VOP_UNLOCK(vp, 0, td);
862 	}
863 	return(error);
864 }
865 
866 /*
867  * We handle getattr only to change the fsid and
868  * track object sizes
869  *
870  * It's not clear whether VOP_GETATTR is to be
871  * called with the vnode locked or not.  stat() calls
872  * it with (vp) locked, and fstat() calls it with
873  * (vp) unlocked.
874  *
875  * Because of this we cannot use our normal locking functions
876  * if we do not intend to lock the main a_vp node.  At the moment
877  * we are running without any specific locking at all, but beware
878  * to any programmer that care must be taken if locking is added
879  * to this function.
880  */
881 
882 static int
883 union_getattr(ap)
884 	struct vop_getattr_args /* {
885 		struct vnode *a_vp;
886 		struct vattr *a_vap;
887 		struct ucred *a_cred;
888 		struct thread *a_td;
889 	} */ *ap;
890 {
891 	int error;
892 	struct union_node *un = VTOUNION(ap->a_vp);
893 	struct union_mount *um = MOUNTTOUNIONMOUNT(ap->a_vp->v_mount);
894 	struct vnode *vp;
895 	struct vattr *vap;
896 	struct vattr va;
897 
898 	/*
899 	 * Some programs walk the filesystem hierarchy by counting
900 	 * links to directories to avoid stat'ing all the time.
901 	 * This means the link count on directories needs to be "correct".
902 	 * The only way to do that is to call getattr on both layers
903 	 * and fix up the link count.  The link count will not necessarily
904 	 * be accurate but will be large enough to defeat the tree walkers.
905 	 */
906 
907 	vap = ap->a_vap;
908 
909 	if ((vp = un->un_uppervp) != NULLVP) {
910 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td);
911 		if (error)
912 			return (error);
913 		/* XXX isn't this dangerous without a lock? */
914 		union_newsize(ap->a_vp, vap->va_size, VNOVAL);
915 	}
916 
917 	if (vp == NULLVP) {
918 		vp = un->un_lowervp;
919 	} else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
920 		vp = un->un_lowervp;
921 		vap = &va;
922 	} else {
923 		vp = NULLVP;
924 	}
925 
926 	if (vp != NULLVP) {
927 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td);
928 		if (error)
929 			return (error);
930 		/* XXX isn't this dangerous without a lock? */
931 		union_newsize(ap->a_vp, VNOVAL, vap->va_size);
932 	}
933 
934 	if (ap->a_vap->va_fsid == um->um_upperdev)
935 		ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
936 
937 	if ((vap != ap->a_vap) && (vap->va_type == VDIR))
938 		ap->a_vap->va_nlink += vap->va_nlink;
939 	return (0);
940 }
941 
942 static int
943 union_setattr(ap)
944 	struct vop_setattr_args /* {
945 		struct vnode *a_vp;
946 		struct vattr *a_vap;
947 		struct ucred *a_cred;
948 		struct thread *a_td;
949 	} */ *ap;
950 {
951 	struct union_node *un = VTOUNION(ap->a_vp);
952 	struct thread *td = ap->a_td;
953 	struct vattr *vap = ap->a_vap;
954 	struct vnode *uppervp;
955 	int error;
956 
957 	/*
958 	 * Disallow write attempts on filesystems mounted read-only.
959 	 */
960 	if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
961 	    (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
962 	     vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
963 	     vap->va_mtime.tv_sec != VNOVAL ||
964 	     vap->va_mode != (mode_t)VNOVAL)) {
965 		return (EROFS);
966 	}
967 
968 	/*
969 	 * Handle case of truncating lower object to zero size
970 	 * by creating a zero length upper object.  This is to
971 	 * handle the case of open with O_TRUNC and O_CREAT.
972 	 */
973 	if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
974 		error = union_copyup(un, (ap->a_vap->va_size != 0),
975 			    ap->a_cred, ap->a_td);
976 		if (error)
977 			return (error);
978 	}
979 
980 	/*
981 	 * Try to set attributes in upper layer,
982 	 * otherwise return read-only filesystem error.
983 	 */
984 	error = EROFS;
985 	if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
986 		error = VOP_SETATTR(un->un_uppervp, ap->a_vap,
987 					ap->a_cred, ap->a_td);
988 		if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
989 			union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
990 		union_unlock_upper(uppervp, td);
991 	}
992 	return (error);
993 }
994 
995 static int
996 union_read(ap)
997 	struct vop_read_args /* {
998 		struct vnode *a_vp;
999 		struct uio *a_uio;
1000 		int  a_ioflag;
1001 		struct ucred *a_cred;
1002 	} */ *ap;
1003 {
1004 	struct union_node *un = VTOUNION(ap->a_vp);
1005 	struct thread *td = ap->a_uio->uio_td;
1006 	struct vnode *uvp;
1007 	int error;
1008 
1009 	uvp = union_lock_other(un, td);
1010 	KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1011 
1012 	error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1013 	union_unlock_other(uvp, td);
1014 
1015 	/*
1016 	 * XXX
1017 	 * Perhaps the size of the underlying object has changed under
1018 	 * our feet.  Take advantage of the offset information present
1019 	 * in the uio structure.
1020 	 */
1021 	if (error == 0) {
1022 		struct union_node *un = VTOUNION(ap->a_vp);
1023 		off_t cur = ap->a_uio->uio_offset;
1024 
1025 		if (uvp == un->un_uppervp) {
1026 			if (cur > un->un_uppersz)
1027 				union_newsize(ap->a_vp, cur, VNOVAL);
1028 		} else {
1029 			if (cur > un->un_lowersz)
1030 				union_newsize(ap->a_vp, VNOVAL, cur);
1031 		}
1032 	}
1033 	return (error);
1034 }
1035 
1036 static int
1037 union_write(ap)
1038 	struct vop_write_args /* {
1039 		struct vnode *a_vp;
1040 		struct uio *a_uio;
1041 		int  a_ioflag;
1042 		struct ucred *a_cred;
1043 	} */ *ap;
1044 {
1045 	struct union_node *un = VTOUNION(ap->a_vp);
1046 	struct thread *td = ap->a_uio->uio_td;
1047 	struct vnode *uppervp;
1048 	int error;
1049 
1050 	if ((uppervp = union_lock_upper(un, td)) == NULLVP)
1051 		panic("union: missing upper layer in write");
1052 
1053 	error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1054 
1055 	/*
1056 	 * The size of the underlying object may be changed by the
1057 	 * write.
1058 	 */
1059 	if (error == 0) {
1060 		off_t cur = ap->a_uio->uio_offset;
1061 
1062 		if (cur > un->un_uppersz)
1063 			union_newsize(ap->a_vp, cur, VNOVAL);
1064 	}
1065 	union_unlock_upper(uppervp, td);
1066 	return (error);
1067 }
1068 
1069 static int
1070 union_lease(ap)
1071 	struct vop_lease_args /* {
1072 		struct vnode *a_vp;
1073 		struct thread *a_td;
1074 		struct ucred *a_cred;
1075 		int a_flag;
1076 	} */ *ap;
1077 {
1078 	struct vnode *ovp = OTHERVP(ap->a_vp);
1079 
1080 	ap->a_vp = ovp;
1081 	return (VOP_LEASE_AP(ap));
1082 }
1083 
1084 static int
1085 union_ioctl(ap)
1086 	struct vop_ioctl_args /* {
1087 		struct vnode *a_vp;
1088 		u_long  a_command;
1089 		caddr_t  a_data;
1090 		int  a_fflag;
1091 		struct ucred *a_cred;
1092 		struct thread *a_td;
1093 	} */ *ap;
1094 {
1095 	struct vnode *ovp = OTHERVP(ap->a_vp);
1096 
1097 	ap->a_vp = ovp;
1098 	return (VOP_IOCTL_AP(ap));
1099 }
1100 
1101 static int
1102 union_poll(ap)
1103 	struct vop_poll_args /* {
1104 		struct vnode *a_vp;
1105 		int  a_events;
1106 		struct ucred *a_cred;
1107 		struct thread *a_td;
1108 	} */ *ap;
1109 {
1110 	struct vnode *ovp = OTHERVP(ap->a_vp);
1111 
1112 	ap->a_vp = ovp;
1113 	return (VOP_POLL_AP(ap));
1114 }
1115 
1116 static int
1117 union_fsync(ap)
1118 	struct vop_fsync_args /* {
1119 		struct vnode *a_vp;
1120 		struct ucred *a_cred;
1121 		int  a_waitfor;
1122 		struct thread *a_td;
1123 	} */ *ap;
1124 {
1125 	int error = 0;
1126 	struct thread *td = ap->a_td;
1127 	struct vnode *targetvp;
1128 	struct union_node *un = VTOUNION(ap->a_vp);
1129 
1130 	if ((targetvp = union_lock_other(un, td)) != NULLVP) {
1131 		error = VOP_FSYNC(targetvp, ap->a_waitfor, td);
1132 		union_unlock_other(targetvp, td);
1133 	}
1134 
1135 	return (error);
1136 }
1137 
1138 /*
1139  *	union_remove:
1140  *
1141  *	Remove the specified cnp.  The dvp and vp are passed to us locked
1142  *	and must remain locked on return.
1143  */
1144 
1145 static int
1146 union_remove(ap)
1147 	struct vop_remove_args /* {
1148 		struct vnode *a_dvp;
1149 		struct vnode *a_vp;
1150 		struct componentname *a_cnp;
1151 	} */ *ap;
1152 {
1153 	struct union_node *dun = VTOUNION(ap->a_dvp);
1154 	struct union_node *un = VTOUNION(ap->a_vp);
1155 	struct componentname *cnp = ap->a_cnp;
1156 	struct thread *td = cnp->cn_thread;
1157 	struct vnode *uppervp;
1158 	struct vnode *upperdvp;
1159 	int error;
1160 
1161 	if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1162 		panic("union remove: null upper vnode");
1163 
1164 	if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1165 		if (union_dowhiteout(un, cnp->cn_cred, td))
1166 			cnp->cn_flags |= DOWHITEOUT;
1167 		if (cnp->cn_flags & DOWHITEOUT)		/* XXX fs corruption */
1168 			error = EOPNOTSUPP;
1169 		else
1170 			error = VOP_REMOVE(upperdvp, uppervp, cnp);
1171 		if (!error)
1172 			union_removed_upper(un);
1173 		union_unlock_upper(uppervp, td);
1174 	} else {
1175 		error = union_mkwhiteout(
1176 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1177 			    upperdvp, ap->a_cnp, un->un_path);
1178 	}
1179 	union_unlock_upper(upperdvp, td);
1180 	return (error);
1181 }
1182 
1183 /*
1184  *	union_link:
1185  *
1186  *	tdvp and vp will be locked on entry.
1187  *	tdvp and vp should remain locked on return.
1188  */
1189 
1190 static int
1191 union_link(ap)
1192 	struct vop_link_args /* {
1193 		struct vnode *a_tdvp;
1194 		struct vnode *a_vp;
1195 		struct componentname *a_cnp;
1196 	} */ *ap;
1197 {
1198 	struct componentname *cnp = ap->a_cnp;
1199 	struct thread *td = cnp->cn_thread;
1200 	struct union_node *dun = VTOUNION(ap->a_tdvp);
1201 	struct vnode *vp;
1202 	struct vnode *tdvp;
1203 	int error = 0;
1204 
1205 	if (ap->a_tdvp->v_op != ap->a_vp->v_op) {
1206 		vp = ap->a_vp;
1207 	} else {
1208 		struct union_node *tun = VTOUNION(ap->a_vp);
1209 
1210 		if (tun->un_uppervp == NULLVP) {
1211 #if 0
1212 			if (dun->un_uppervp == tun->un_dirvp) {
1213 				if (dun->un_flags & UN_ULOCK) {
1214 					dun->un_flags &= ~UN_ULOCK;
1215 					VOP_UNLOCK(dun->un_uppervp, 0, td);
1216 				}
1217 			}
1218 #endif
1219 			error = union_copyup(tun, 1, cnp->cn_cred, td);
1220 #if 0
1221 			if (dun->un_uppervp == tun->un_dirvp) {
1222 				vn_lock(dun->un_uppervp,
1223 					    LK_EXCLUSIVE | LK_RETRY, td);
1224 				dun->un_flags |= UN_ULOCK;
1225 			}
1226 #endif
1227 			if (error)
1228 				return (error);
1229 		}
1230 		vp = tun->un_uppervp;
1231 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1232 	}
1233 
1234 	/*
1235 	 * Make sure upper is locked, then unlock the union directory we were
1236 	 * called with to avoid a deadlock while we are calling VOP_LINK() on
1237 	 * the upper (with tdvp locked and vp not locked).  Our ap->a_tdvp
1238 	 * is expected to be locked on return.
1239 	 */
1240 
1241 	if ((tdvp = union_lock_upper(dun, td)) == NULLVP)
1242 		return (EROFS);
1243 
1244 	VOP_UNLOCK(ap->a_tdvp, 0, td);		/* unlock calling node */
1245 	error = VOP_LINK(tdvp, vp, cnp);	/* call link on upper */
1246 
1247 	/*
1248 	 * Unlock tun->un_uppervp if we locked it above.
1249 	 */
1250 	if (ap->a_tdvp->v_op == ap->a_vp->v_op)
1251 		VOP_UNLOCK(vp, 0, td);
1252 	/*
1253 	 * We have to unlock tdvp prior to relocking our calling node in
1254 	 * order to avoid a deadlock.  We also have to unlock ap->a_vp
1255 	 * before relocking the directory, but then we have to relock
1256 	 * ap->a_vp as our caller expects.
1257 	 */
1258 	VOP_UNLOCK(ap->a_vp, 0, td);
1259 	union_unlock_upper(tdvp, td);
1260 	vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY, td);
1261 	vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, td);
1262 	return (error);
1263 }
1264 
1265 static int
1266 union_rename(ap)
1267 	struct vop_rename_args  /* {
1268 		struct vnode *a_fdvp;
1269 		struct vnode *a_fvp;
1270 		struct componentname *a_fcnp;
1271 		struct vnode *a_tdvp;
1272 		struct vnode *a_tvp;
1273 		struct componentname *a_tcnp;
1274 	} */ *ap;
1275 {
1276 	int error;
1277 	struct vnode *fdvp = ap->a_fdvp;
1278 	struct vnode *fvp = ap->a_fvp;
1279 	struct vnode *tdvp = ap->a_tdvp;
1280 	struct vnode *tvp = ap->a_tvp;
1281 
1282 	/*
1283 	 * Figure out what fdvp to pass to our upper or lower vnode.  If we
1284 	 * replace the fdvp, release the original one and ref the new one.
1285 	 */
1286 
1287 	if (fdvp->v_op == &union_vnodeops) {	/* always true */
1288 		struct union_node *un = VTOUNION(fdvp);
1289 		if (un->un_uppervp == NULLVP) {
1290 			/*
1291 			 * this should never happen in normal
1292 			 * operation but might if there was
1293 			 * a problem creating the top-level shadow
1294 			 * directory.
1295 			 */
1296 			error = EXDEV;
1297 			goto bad;
1298 		}
1299 		fdvp = un->un_uppervp;
1300 		VREF(fdvp);
1301 		vrele(ap->a_fdvp);
1302 	}
1303 
1304 	/*
1305 	 * Figure out what fvp to pass to our upper or lower vnode.  If we
1306 	 * replace the fvp, release the original one and ref the new one.
1307 	 */
1308 
1309 	if (fvp->v_op == &union_vnodeops) {	/* always true */
1310 		struct union_node *un = VTOUNION(fvp);
1311 #if 0
1312 		struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1313 #endif
1314 
1315 		if (un->un_uppervp == NULLVP) {
1316 			switch(fvp->v_type) {
1317 			case VREG:
1318 				vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread);
1319 				error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_thread);
1320 				VOP_UNLOCK(un->un_vnode, 0, ap->a_fcnp->cn_thread);
1321 				if (error)
1322 					goto bad;
1323 				break;
1324 			case VDIR:
1325 				/*
1326 				 * XXX not yet.
1327 				 *
1328 				 * There is only one way to rename a directory
1329 				 * based in the lowervp, and that is to copy
1330 				 * the entire directory hierarchy.  Otherwise
1331 				 * it would not last across a reboot.
1332 				 */
1333 #if 0
1334 				vrele(fvp);
1335 				fvp = NULL;
1336 				vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread);
1337 				error = union_mkshadow(um, fdvp,
1338 					    ap->a_fcnp, &un->un_uppervp);
1339 				VOP_UNLOCK(fdvp, 0, ap->a_fcnp->cn_thread);
1340 				if (un->un_uppervp)
1341 					VOP_UNLOCK(un->un_uppervp, 0, ap->a_fcnp->cn_thread);
1342 				if (error)
1343 					goto bad;
1344 				break;
1345 #endif
1346 			default:
1347 				error = EXDEV;
1348 				goto bad;
1349 			}
1350 		}
1351 
1352 		if (un->un_lowervp != NULLVP)
1353 			ap->a_fcnp->cn_flags |= DOWHITEOUT;
1354 		fvp = un->un_uppervp;
1355 		VREF(fvp);
1356 		vrele(ap->a_fvp);
1357 	}
1358 
1359 	/*
1360 	 * Figure out what tdvp (destination directory) to pass to the
1361 	 * lower level.  If we replace it with uppervp, we need to vput the
1362 	 * old one.  The exclusive lock is transfered to what we will pass
1363 	 * down in the VOP_RENAME() and we replace uppervp with a simple
1364 	 * reference.
1365 	 */
1366 
1367 	if (tdvp->v_op == &union_vnodeops) {
1368 		struct union_node *un = VTOUNION(tdvp);
1369 
1370 		if (un->un_uppervp == NULLVP) {
1371 			/*
1372 			 * This should never happen in normal
1373 			 * operation but might if there was
1374 			 * a problem creating the top-level shadow
1375 			 * directory.
1376 			 */
1377 			error = EXDEV;
1378 			goto bad;
1379 		}
1380 
1381 		/*
1382 		 * New tdvp is a lock and reference on uppervp.
1383 		 * Put away the old tdvp.
1384 		 */
1385 		tdvp = union_lock_upper(un, ap->a_tcnp->cn_thread);
1386 		vput(ap->a_tdvp);
1387 	}
1388 
1389 	/*
1390 	 * Figure out what tvp (destination file) to pass to the
1391 	 * lower level.
1392 	 *
1393 	 * If the uppervp file does not exist, put away the (wrong)
1394 	 * file and change tvp to NULL.
1395 	 */
1396 
1397 	if (tvp != NULLVP && tvp->v_op == &union_vnodeops) {
1398 		struct union_node *un = VTOUNION(tvp);
1399 
1400 		tvp = union_lock_upper(un, ap->a_tcnp->cn_thread);
1401 		vput(ap->a_tvp);
1402 		/* note: tvp may be NULL */
1403 	}
1404 
1405 	/*
1406 	 * VOP_RENAME() releases/vputs prior to returning, so we have no
1407 	 * cleanup to do.
1408 	 */
1409 
1410 	return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1411 
1412 	/*
1413 	 * Error.  We still have to release / vput the various elements.
1414 	 */
1415 
1416 bad:
1417 	vrele(fdvp);
1418 	if (fvp)
1419 		vrele(fvp);
1420 	vput(tdvp);
1421 	if (tvp != NULLVP) {
1422 		if (tvp != tdvp)
1423 			vput(tvp);
1424 		else
1425 			vrele(tvp);
1426 	}
1427 	return (error);
1428 }
1429 
1430 static int
1431 union_mkdir(ap)
1432 	struct vop_mkdir_args /* {
1433 		struct vnode *a_dvp;
1434 		struct vnode **a_vpp;
1435 		struct componentname *a_cnp;
1436 		struct vattr *a_vap;
1437 	} */ *ap;
1438 {
1439 	struct union_node *dun = VTOUNION(ap->a_dvp);
1440 	struct componentname *cnp = ap->a_cnp;
1441 	struct thread *td = cnp->cn_thread;
1442 	struct vnode *upperdvp;
1443 	int error = EROFS;
1444 
1445 	if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) {
1446 		struct vnode *vp;
1447 
1448 		error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
1449 		union_unlock_upper(upperdvp, td);
1450 
1451 		if (error == 0) {
1452 			VOP_UNLOCK(vp, 0, td);
1453 			UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vrefcnt(vp)));
1454 			error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1455 				ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1456 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vrefcnt(vp)));
1457 		}
1458 	}
1459 	return (error);
1460 }
1461 
1462 static int
1463 union_rmdir(ap)
1464 	struct vop_rmdir_args /* {
1465 		struct vnode *a_dvp;
1466 		struct vnode *a_vp;
1467 		struct componentname *a_cnp;
1468 	} */ *ap;
1469 {
1470 	struct union_node *dun = VTOUNION(ap->a_dvp);
1471 	struct union_node *un = VTOUNION(ap->a_vp);
1472 	struct componentname *cnp = ap->a_cnp;
1473 	struct thread *td = cnp->cn_thread;
1474 	struct vnode *upperdvp;
1475 	struct vnode *uppervp;
1476 	int error;
1477 
1478 	if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1479 		panic("union rmdir: null upper vnode");
1480 
1481 	if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1482 		if (union_dowhiteout(un, cnp->cn_cred, td))
1483 			cnp->cn_flags |= DOWHITEOUT;
1484 		if (cnp->cn_flags & DOWHITEOUT)		/* XXX fs corruption */
1485 			error = EOPNOTSUPP;
1486 		else
1487 			error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
1488 		if (!error)
1489 			union_removed_upper(un);
1490 		union_unlock_upper(uppervp, td);
1491 	} else {
1492 		error = union_mkwhiteout(
1493 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1494 			    dun->un_uppervp, ap->a_cnp, un->un_path);
1495 	}
1496 	union_unlock_upper(upperdvp, td);
1497 	return (error);
1498 }
1499 
1500 /*
1501  *	union_symlink:
1502  *
1503  *	dvp is locked on entry and remains locked on return.  a_vpp is garbage
1504  *	(unused).
1505  */
1506 
1507 static int
1508 union_symlink(ap)
1509 	struct vop_symlink_args /* {
1510 		struct vnode *a_dvp;
1511 		struct vnode **a_vpp;
1512 		struct componentname *a_cnp;
1513 		struct vattr *a_vap;
1514 		char *a_target;
1515 	} */ *ap;
1516 {
1517 	struct union_node *dun = VTOUNION(ap->a_dvp);
1518 	struct componentname *cnp = ap->a_cnp;
1519 	struct thread *td = cnp->cn_thread;
1520 	struct vnode *dvp;
1521 	int error = EROFS;
1522 
1523 	if ((dvp = union_lock_upper(dun, td)) != NULLVP) {
1524 		error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1525 			    ap->a_target);
1526 		union_unlock_upper(dvp, td);
1527 	}
1528 	return (error);
1529 }
1530 
1531 /*
1532  * union_readdir ()works in concert with getdirentries() and
1533  * readdir(3) to provide a list of entries in the unioned
1534  * directories.  getdirentries()  is responsible for walking
1535  * down the union stack.  readdir(3) is responsible for
1536  * eliminating duplicate names from the returned data stream.
1537  */
1538 static int
1539 union_readdir(ap)
1540 	struct vop_readdir_args /* {
1541 		struct vnode *a_vp;
1542 		struct uio *a_uio;
1543 		struct ucred *a_cred;
1544 		int *a_eofflag;
1545 		u_long *a_cookies;
1546 		int a_ncookies;
1547 	} */ *ap;
1548 {
1549 	struct union_node *un = VTOUNION(ap->a_vp);
1550 	struct thread *td = ap->a_uio->uio_td;
1551 	struct vnode *uvp;
1552 	int error = 0;
1553 
1554 	if ((uvp = union_lock_upper(un, td)) != NULLVP) {
1555 		ap->a_vp = uvp;
1556 		error = VOP_READDIR_AP(ap);
1557 		union_unlock_upper(uvp, td);
1558 	}
1559 	return(error);
1560 }
1561 
1562 static int
1563 union_readlink(ap)
1564 	struct vop_readlink_args /* {
1565 		struct vnode *a_vp;
1566 		struct uio *a_uio;
1567 		struct ucred *a_cred;
1568 	} */ *ap;
1569 {
1570 	int error;
1571 	struct union_node *un = VTOUNION(ap->a_vp);
1572 	struct uio *uio = ap->a_uio;
1573 	struct thread *td = uio->uio_td;
1574 	struct vnode *vp;
1575 
1576 	vp = union_lock_other(un, td);
1577 	KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1578 
1579 	ap->a_vp = vp;
1580 	error = VOP_READLINK_AP(ap);
1581 	union_unlock_other(vp, td);
1582 
1583 	return (error);
1584 }
1585 
1586 static int
1587 union_getwritemount(ap)
1588 	struct vop_getwritemount_args /* {
1589 		struct vnode *a_vp;
1590 		struct mount **a_mpp;
1591 	} */ *ap;
1592 {
1593 	struct vnode *vp = ap->a_vp;
1594 	struct vnode *uvp = UPPERVP(vp);
1595 
1596 	if (uvp == NULL) {
1597 		VI_LOCK(vp);
1598 		if (vp->v_iflag & VI_FREE) {
1599 			VI_UNLOCK(vp);
1600 			return (EOPNOTSUPP);
1601 		}
1602 		VI_UNLOCK(vp);
1603 		return (EACCES);
1604 	}
1605 	return(VOP_GETWRITEMOUNT(uvp, ap->a_mpp));
1606 }
1607 
1608 /*
1609  *	union_inactive:
1610  *
1611  *	Called with the vnode locked.  We are expected to unlock the vnode.
1612  */
1613 
1614 static int
1615 union_inactive(ap)
1616 	struct vop_inactive_args /* {
1617 		struct vnode *a_vp;
1618 		struct thread *a_td;
1619 	} */ *ap;
1620 {
1621 	struct vnode *vp = ap->a_vp;
1622 	struct union_node *un = VTOUNION(vp);
1623 
1624 	/*
1625 	 * Do nothing (and _don't_ bypass).
1626 	 * Wait to vrele lowervp until reclaim,
1627 	 * so that until then our union_node is in the
1628 	 * cache and reusable.
1629 	 *
1630 	 */
1631 
1632 	if (un->un_dircache != NULL)
1633 		union_dircache_free(un);
1634 
1635 #if 0
1636 	if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1637 		un->un_flags &= ~UN_ULOCK;
1638 		VOP_UNLOCK(un->un_uppervp, 0, td);
1639 	}
1640 #endif
1641 
1642 	if ((un->un_flags & UN_CACHED) == 0)
1643 		vgone(vp);
1644 
1645 	return (0);
1646 }
1647 
1648 static int
1649 union_reclaim(ap)
1650 	struct vop_reclaim_args /* {
1651 		struct vnode *a_vp;
1652 	} */ *ap;
1653 {
1654 	union_freevp(ap->a_vp);
1655 
1656 	return (0);
1657 }
1658 
1659 static int
1660 union_print(ap)
1661 	struct vop_print_args /* {
1662 		struct vnode *a_vp;
1663 	} */ *ap;
1664 {
1665 	struct vnode *vp = ap->a_vp;
1666 
1667 	printf("\tvp=%p, uppervp=%p, lowervp=%p\n",
1668 	       vp, UPPERVP(vp), LOWERVP(vp));
1669 	if (UPPERVP(vp) != NULLVP)
1670 		vprint("union: upper", UPPERVP(vp));
1671 	if (LOWERVP(vp) != NULLVP)
1672 		vprint("union: lower", LOWERVP(vp));
1673 
1674 	return (0);
1675 }
1676 
1677 static int
1678 union_pathconf(ap)
1679 	struct vop_pathconf_args /* {
1680 		struct vnode *a_vp;
1681 		int a_name;
1682 		int *a_retval;
1683 	} */ *ap;
1684 {
1685 	int error;
1686 	struct thread *td = curthread;		/* XXX */
1687 	struct union_node *un = VTOUNION(ap->a_vp);
1688 	struct vnode *vp;
1689 
1690 	vp = union_lock_other(un, td);
1691 	KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1692 
1693 	ap->a_vp = vp;
1694 	error = VOP_PATHCONF_AP(ap);
1695 	union_unlock_other(vp, td);
1696 
1697 	return (error);
1698 }
1699 
1700 static int
1701 union_advlock(ap)
1702 	struct vop_advlock_args /* {
1703 		struct vnode *a_vp;
1704 		caddr_t  a_id;
1705 		int  a_op;
1706 		struct flock *a_fl;
1707 		int  a_flags;
1708 	} */ *ap;
1709 {
1710 	register struct vnode *ovp = OTHERVP(ap->a_vp);
1711 
1712 	ap->a_vp = ovp;
1713 	return (VOP_ADVLOCK_AP(ap));
1714 }
1715 
1716 
1717 /*
1718  * XXX - vop_strategy must be hand coded because it has no
1719  * YYY - and it is not coherent with anything
1720  *
1721  * vnode in its arguments.
1722  * This goes away with a merged VM/buffer cache.
1723  */
1724 static int
1725 union_strategy(ap)
1726 	struct vop_strategy_args /* {
1727 		struct vnode *a_vp;
1728 		struct buf *a_bp;
1729 	} */ *ap;
1730 {
1731 	struct buf *bp = ap->a_bp;
1732 	struct vnode *othervp = OTHERVP(ap->a_vp);
1733 
1734 #ifdef DIAGNOSTIC
1735 	if (othervp == NULLVP)
1736 		panic("union_strategy: nil vp");
1737 	if ((bp->b_iocmd == BIO_WRITE) &&
1738 	    (othervp == LOWERVP(ap->a_vp)))
1739 		panic("union_strategy: writing to lowervp");
1740 #endif
1741 	return (VOP_STRATEGY(othervp, bp));
1742 }
1743 
1744 static int
1745 union_getacl(ap)
1746 	struct vop_getacl_args /* {
1747 		struct vnode *a_vp;
1748 		acl_type_t a_type;
1749 		struct acl *a_aclp;
1750 		struct ucred *a_cred;
1751 		struct thread *a_td;
1752 	} */ *ap;
1753 {
1754 	int error;
1755 	struct union_node *un = VTOUNION(ap->a_vp);
1756 	struct vnode *vp;
1757 
1758 	vp = union_lock_other(un, ap->a_td);
1759 	ap->a_vp = vp;
1760 	error = VOP_GETACL_AP(ap);
1761 	union_unlock_other(vp, ap->a_td);
1762 
1763 	return (error);
1764 }
1765 
1766 static int
1767 union_setacl(ap)
1768 	struct vop_setacl_args /* {
1769 		struct vnode *a_vp;
1770 		acl_type_t a_type;
1771 		struct acl *a_aclp;
1772 		struct ucred *a_cred;
1773 		struct thread *a_td;
1774 	} */ *ap;
1775 {
1776 	int error;
1777 	struct union_node *un = VTOUNION(ap->a_vp);
1778 	struct vnode *vp;
1779 
1780 	vp = union_lock_other(un, ap->a_td);
1781 	ap->a_vp = vp;
1782 	error = VOP_SETACL_AP(ap);
1783 	union_unlock_other(vp, ap->a_td);
1784 
1785 	return (error);
1786 }
1787 
1788 static int
1789 union_aclcheck(ap)
1790 	struct vop_aclcheck_args /* {
1791 		struct vnode *a_vp;
1792 		acl_type_t a_type;
1793 		struct acl *a_aclp;
1794 		struct ucred *a_cred;
1795 		struct thread *a_td;
1796 	} */ *ap;
1797 {
1798 	struct vnode *ovp = OTHERVP(ap->a_vp);
1799 
1800 	ap->a_vp = ovp;
1801 	return (VOP_ACLCHECK_AP(ap));
1802 }
1803 
1804 static int
1805 union_closeextattr(ap)
1806 	struct vop_closeextattr_args /* {
1807 		struct vnode *a_vp;
1808 		int a_commit;
1809 		struct ucred *a_cred;
1810 		struct thread *a_td;
1811 	} */ *ap;
1812 {
1813 	int error;
1814 	struct union_node *un = VTOUNION(ap->a_vp);
1815 	struct vnode *vp;
1816 
1817 	vp = union_lock_other(un, ap->a_td);
1818 	ap->a_vp = vp;
1819 	error = VOP_CLOSEEXTATTR_AP(ap);
1820 	union_unlock_other(vp, ap->a_td);
1821 
1822 	return (error);
1823 }
1824 
1825 static int
1826 union_getextattr(ap)
1827 	struct vop_getextattr_args /* {
1828 		struct vnode *a_vp;
1829 		int a_attrnamespace;
1830 		const char *a_name;
1831 		struct uio *a_uio;
1832 		size_t *a_size;
1833 		struct ucred *a_cred;
1834 		struct thread *a_td;
1835 	} */ *ap;
1836 {
1837 	int error;
1838 	struct union_node *un = VTOUNION(ap->a_vp);
1839 	struct vnode *vp;
1840 
1841 	vp = union_lock_other(un, ap->a_td);
1842 	ap->a_vp = vp;
1843 	error = VOP_GETEXTATTR_AP(ap);
1844 	union_unlock_other(vp, ap->a_td);
1845 
1846 	return (error);
1847 }
1848 
1849 static int
1850 union_listextattr(ap)
1851 	struct vop_listextattr_args /* {
1852 		struct vnode *a_vp;
1853 		int a_attrnamespace;
1854 		struct uio *a_uio;
1855 		size_t *a_size;
1856 		struct ucred *a_cred;
1857 		struct thread *a_td;
1858 	} */ *ap;
1859 {
1860 	int error;
1861 	struct union_node *un = VTOUNION(ap->a_vp);
1862 	struct vnode *vp;
1863 
1864 	vp = union_lock_other(un, ap->a_td);
1865 	ap->a_vp = vp;
1866 	error = VOP_LISTEXTATTR_AP(ap);
1867 	union_unlock_other(vp, ap->a_td);
1868 
1869 	return (error);
1870 }
1871 
1872 static int
1873 union_openextattr(ap)
1874 	struct vop_openextattr_args /* {
1875 		struct vnode *a_vp;
1876 		struct ucred *a_cred;
1877 		struct thread *a_td;
1878 	} */ *ap;
1879 {
1880 	int error;
1881 	struct union_node *un = VTOUNION(ap->a_vp);
1882 	struct vnode *vp;
1883 
1884 	vp = union_lock_other(un, ap->a_td);
1885 	ap->a_vp = vp;
1886 	error = VOP_OPENEXTATTR_AP(ap);
1887 	union_unlock_other(vp, ap->a_td);
1888 
1889 	return (error);
1890 }
1891 
1892 static int
1893 union_deleteextattr(ap)
1894 	struct vop_deleteextattr_args /* {
1895 		struct vnode *a_vp;
1896 		int a_attrnamespace;
1897 		const char *a_name;
1898 		struct ucred *a_cred;
1899 		struct thread *a_td;
1900 	} */ *ap;
1901 {
1902 	int error;
1903 	struct union_node *un = VTOUNION(ap->a_vp);
1904 	struct vnode *vp;
1905 
1906 	vp = union_lock_other(un, ap->a_td);
1907 	ap->a_vp = vp;
1908 	error = VOP_DELETEEXTATTR_AP(ap);
1909 	union_unlock_other(vp, ap->a_td);
1910 
1911 	return (error);
1912 }
1913 
1914 static int
1915 union_setextattr(ap)
1916 	struct vop_setextattr_args /* {
1917 		struct vnode *a_vp;
1918 		int a_attrnamespace;
1919 		const char *a_name;
1920 		struct uio *a_uio;
1921 		struct ucred *a_cred;
1922 		struct thread *a_td;
1923 	} */ *ap;
1924 {
1925 	int error;
1926 	struct union_node *un = VTOUNION(ap->a_vp);
1927 	struct vnode *vp;
1928 
1929 	vp = union_lock_other(un, ap->a_td);
1930 	ap->a_vp = vp;
1931 	error = VOP_SETEXTATTR_AP(ap);
1932 	union_unlock_other(vp, ap->a_td);
1933 
1934 	return (error);
1935 }
1936 
1937 static int
1938 union_setlabel(ap)
1939 	struct vop_setlabel_args /* {
1940 		struct vnode *a_vp;
1941 		struct label *a_label;
1942 		struct ucred *a_cred;
1943 		struct thread *a_td;
1944 	} */ *ap;
1945 {
1946 	int error;
1947 	struct union_node *un = VTOUNION(ap->a_vp);
1948 	struct vnode *vp;
1949 
1950 	vp = union_lock_other(un, ap->a_td);
1951 	ap->a_vp = vp;
1952 	error = VOP_SETLABEL_AP(ap);
1953 	union_unlock_other(vp, ap->a_td);
1954 
1955 	return (error);
1956 }
1957 
1958 /*
1959  * Global vfs data structures
1960  */
1961 struct vop_vector union_vnodeops = {
1962 	.vop_default =		&default_vnodeops,
1963 
1964 	.vop_access =		union_access,
1965 	.vop_aclcheck =		union_aclcheck,
1966 	.vop_advlock =		union_advlock,
1967 	.vop_bmap =		VOP_EOPNOTSUPP,
1968 	.vop_close =		union_close,
1969 	.vop_closeextattr =	union_closeextattr,
1970 	.vop_create =		union_create,
1971 	.vop_deleteextattr =	union_deleteextattr,
1972 	.vop_fsync =		union_fsync,
1973 	.vop_getacl =		union_getacl,
1974 	.vop_getattr =		union_getattr,
1975 	.vop_getextattr =	union_getextattr,
1976 	.vop_getwritemount =	union_getwritemount,
1977 	.vop_inactive =		union_inactive,
1978 	.vop_ioctl =		union_ioctl,
1979 	.vop_lease =		union_lease,
1980 	.vop_link =		union_link,
1981 	.vop_listextattr =	union_listextattr,
1982 	.vop_lookup =		union_lookup,
1983 	.vop_mkdir =		union_mkdir,
1984 	.vop_mknod =		union_mknod,
1985 	.vop_open =		union_open,
1986 	.vop_openextattr =	union_openextattr,
1987 	.vop_pathconf =		union_pathconf,
1988 	.vop_poll =		union_poll,
1989 	.vop_print =		union_print,
1990 	.vop_read =		union_read,
1991 	.vop_readdir =		union_readdir,
1992 	.vop_readlink =		union_readlink,
1993 	.vop_reclaim =		union_reclaim,
1994 	.vop_remove =		union_remove,
1995 	.vop_rename =		union_rename,
1996 	.vop_rmdir =		union_rmdir,
1997 	.vop_setacl =		union_setacl,
1998 	.vop_setattr =		union_setattr,
1999 	.vop_setextattr =	union_setextattr,
2000 	.vop_setlabel =		union_setlabel,
2001 	.vop_strategy =		union_strategy,
2002 	.vop_symlink =		union_symlink,
2003 	.vop_whiteout =		union_whiteout,
2004 	.vop_write =		union_write,
2005 };
2006