xref: /freebsd/sys/fs/unionfs/union_vnops.c (revision aec0fb7b40e4cf877bea663f2d86dd07c3524fe8)
1 /*
2  * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3  * Copyright (c) 1992, 1993, 1994, 1995
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)union_vnops.c	8.32 (Berkeley) 6/23/95
34  * $FreeBSD$
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/fcntl.h>
40 #include <sys/stat.h>
41 #include <sys/kernel.h>
42 #include <sys/vnode.h>
43 #include <sys/mount.h>
44 #include <sys/namei.h>
45 #include <sys/malloc.h>
46 #include <sys/bio.h>
47 #include <sys/buf.h>
48 #include <sys/lock.h>
49 #include <sys/sysctl.h>
50 #include <sys/unistd.h>
51 #include <sys/acl.h>
52 #include <sys/event.h>
53 #include <sys/extattr.h>
54 #include <sys/mac.h>
55 #include <fs/unionfs/union.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vnode_pager.h>
59 
60 #include <vm/vm_page.h>
61 #include <vm/vm_object.h>
62 
63 int uniondebug = 0;
64 
65 #if UDEBUG_ENABLED
66 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
67 #else
68 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
69 #endif
70 
71 static vop_access_t	union_access;
72 static vop_aclcheck_t	union_aclcheck;
73 static vop_advlock_t	union_advlock;
74 static vop_close_t	union_close;
75 static vop_closeextattr_t	union_closeextattr;
76 static vop_create_t	union_create;
77 static vop_createvobject_t	union_createvobject;
78 static vop_deleteextattr_t	union_deleteextattr;
79 static vop_destroyvobject_t	union_destroyvobject;
80 static vop_fsync_t	union_fsync;
81 static vop_getacl_t	union_getacl;
82 static vop_getattr_t	union_getattr;
83 static vop_getextattr_t	union_getextattr;
84 static vop_getvobject_t	union_getvobject;
85 static vop_inactive_t	union_inactive;
86 static vop_ioctl_t	union_ioctl;
87 static vop_lease_t	union_lease;
88 static vop_link_t	union_link;
89 static vop_listextattr_t	union_listextattr;
90 static vop_lookup_t	union_lookup;
91 static int	union_lookup1(struct vnode *udvp, struct vnode **dvp,
92 				   struct vnode **vpp,
93 				   struct componentname *cnp);
94 static vop_mkdir_t	union_mkdir;
95 static vop_mknod_t	union_mknod;
96 static vop_open_t	union_open;
97 static vop_openextattr_t	union_openextattr;
98 static vop_pathconf_t	union_pathconf;
99 static vop_print_t	union_print;
100 static vop_read_t	union_read;
101 static vop_readdir_t	union_readdir;
102 static vop_readlink_t	union_readlink;
103 static vop_getwritemount_t	union_getwritemount;
104 static vop_reclaim_t	union_reclaim;
105 static vop_remove_t	union_remove;
106 static vop_rename_t	union_rename;
107 static vop_rmdir_t	union_rmdir;
108 static vop_poll_t	union_poll;
109 static vop_setacl_t	union_setacl;
110 static vop_setattr_t	union_setattr;
111 static vop_setlabel_t	union_setlabel;
112 static vop_setextattr_t	union_setextattr;
113 static vop_strategy_t	union_strategy;
114 static vop_symlink_t	union_symlink;
115 static vop_whiteout_t	union_whiteout;
116 static vop_write_t	union_write;
117 
118 static __inline
119 struct vnode *
120 union_lock_upper(struct union_node *un, struct thread *td)
121 {
122 	struct vnode *uppervp;
123 
124 	if ((uppervp = un->un_uppervp) != NULL) {
125 		VREF(uppervp);
126 		vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
127 	}
128 	KASSERT((uppervp == NULL || vrefcnt(uppervp) > 0), ("uppervp usecount is 0"));
129 	return(uppervp);
130 }
131 
132 static __inline
133 void
134 union_unlock_upper(struct vnode *uppervp, struct thread *td)
135 {
136 	vput(uppervp);
137 }
138 
139 static __inline
140 struct vnode *
141 union_lock_other(struct union_node *un, struct thread *td)
142 {
143 	struct vnode *vp;
144 
145 	if (un->un_uppervp != NULL) {
146 		vp = union_lock_upper(un, td);
147 	} else if ((vp = un->un_lowervp) != NULL) {
148 		VREF(vp);
149 		vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
150 	}
151 	return(vp);
152 }
153 
154 static __inline
155 void
156 union_unlock_other(struct vnode *vp, struct thread *td)
157 {
158 	vput(vp);
159 }
160 
161 /*
162  *	union_lookup:
163  *
164  *	udvp	must be exclusively locked on call and will remain
165  *		exclusively locked on return.  This is the mount point
166  *		for our filesystem.
167  *
168  *	dvp	Our base directory, locked and referenced.
169  *		The passed dvp will be dereferenced and unlocked on return
170  *		and a new dvp will be returned which is locked and
171  *		referenced in the same variable.
172  *
173  *	vpp	is filled in with the result if no error occured,
174  *		locked and ref'd.
175  *
176  *		If an error is returned, *vpp is set to NULLVP.  If no
177  *		error occurs, *vpp is returned with a reference and an
178  *		exclusive lock.
179  */
180 
181 static int
182 union_lookup1(udvp, pdvp, vpp, cnp)
183 	struct vnode *udvp;
184 	struct vnode **pdvp;
185 	struct vnode **vpp;
186 	struct componentname *cnp;
187 {
188 	int error;
189 	struct thread *td = cnp->cn_thread;
190 	struct vnode *dvp = *pdvp;
191 	struct vnode *tdvp;
192 	struct mount *mp;
193 
194 	/*
195 	 * If stepping up the directory tree, check for going
196 	 * back across the mount point, in which case do what
197 	 * lookup would do by stepping back down the mount
198 	 * hierarchy.
199 	 */
200 	if (cnp->cn_flags & ISDOTDOT) {
201 		while ((dvp != udvp) && (dvp->v_vflag & VV_ROOT)) {
202 			/*
203 			 * Don't do the NOCROSSMOUNT check
204 			 * at this level.  By definition,
205 			 * union fs deals with namespaces, not
206 			 * filesystems.
207 			 */
208 			tdvp = dvp;
209 			dvp = dvp->v_mount->mnt_vnodecovered;
210 			VREF(dvp);
211 			vput(tdvp);
212 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
213 		}
214 	}
215 
216 	/*
217 	 * Set return dvp to be the upperdvp 'parent directory.
218 	 */
219 	*pdvp = dvp;
220 
221 	/*
222 	 * If the VOP_LOOKUP() call generates an error, tdvp is invalid and
223 	 * no changes will have been made to dvp, so we are set to return.
224 	 */
225 
226         error = VOP_LOOKUP(dvp, &tdvp, cnp);
227 	if (error) {
228 		UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
229 		*vpp = NULL;
230 		return (error);
231 	}
232 
233 	/*
234 	 * The parent directory will have been unlocked, unless lookup
235 	 * found the last component or if dvp == tdvp (tdvp must be locked).
236 	 *
237 	 * We want our dvp to remain locked and ref'd.  We also want tdvp
238 	 * to remain locked and ref'd.
239 	 */
240 	UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
241 
242 	if (dvp != tdvp && (cnp->cn_flags & ISLASTCN) == 0)
243 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
244 
245 	/*
246 	 * Lastly check if the current node is a mount point in
247 	 * which case walk up the mount hierarchy making sure not to
248 	 * bump into the root of the mount tree (ie. dvp != udvp).
249 	 *
250 	 * We use dvp as a temporary variable here, it is no longer related
251 	 * to the dvp above.  However, we have to ensure that both *pdvp and
252 	 * tdvp are locked on return.
253 	 */
254 
255 	dvp = tdvp;
256 	while (
257 	    dvp != udvp &&
258 	    (dvp->v_type == VDIR) &&
259 	    (mp = dvp->v_mountedhere)
260 	) {
261 		int relock_pdvp = 0;
262 
263 		if (vfs_busy(mp, 0, 0, td))
264 			continue;
265 
266 		if (dvp == *pdvp)
267 			relock_pdvp = 1;
268 		vput(dvp);
269 		dvp = NULL;
270 		error = VFS_ROOT(mp, &dvp, td);
271 
272 		vfs_unbusy(mp, td);
273 
274 		if (relock_pdvp)
275 			vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY, td);
276 
277 		if (error) {
278 			*vpp = NULL;
279 			return (error);
280 		}
281 	}
282 	*vpp = dvp;
283 	return (0);
284 }
285 
286 static int
287 union_lookup(ap)
288 	struct vop_lookup_args /* {
289 		struct vnodeop_desc *a_desc;
290 		struct vnode *a_dvp;
291 		struct vnode **a_vpp;
292 		struct componentname *a_cnp;
293 	} */ *ap;
294 {
295 	int error;
296 	int uerror, lerror;
297 	struct vnode *uppervp, *lowervp;
298 	struct vnode *upperdvp, *lowerdvp;
299 	struct vnode *dvp = ap->a_dvp;		/* starting dir */
300 	struct union_node *dun = VTOUNION(dvp);	/* associated union node */
301 	struct componentname *cnp = ap->a_cnp;
302 	struct thread *td = cnp->cn_thread;
303 	int lockparent = cnp->cn_flags & LOCKPARENT;
304 	struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
305 	struct ucred *saved_cred = NULL;
306 	int iswhiteout;
307 	struct vattr va;
308 
309 	*ap->a_vpp = NULLVP;
310 
311 	/*
312 	 * Disallow write attempts to the filesystem mounted read-only.
313 	 */
314 	if ((cnp->cn_flags & ISLASTCN) &&
315 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
316 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
317 		return (EROFS);
318 	}
319 
320 	/*
321 	 * For any lookups we do, always return with the parent locked.
322 	 */
323 	cnp->cn_flags |= LOCKPARENT;
324 
325 	lowerdvp = dun->un_lowervp;
326 	uppervp = NULLVP;
327 	lowervp = NULLVP;
328 	iswhiteout = 0;
329 
330 	uerror = ENOENT;
331 	lerror = ENOENT;
332 
333 	/*
334 	 * Get a private lock on uppervp and a reference, effectively
335 	 * taking it out of the union_node's control.
336 	 *
337 	 * We must lock upperdvp while holding our lock on dvp
338 	 * to avoid a deadlock.
339 	 */
340 	upperdvp = union_lock_upper(dun, td);
341 
342 	/*
343 	 * Do the lookup in the upper level.
344 	 * If that level consumes additional pathnames,
345 	 * then assume that something special is going
346 	 * on and just return that vnode.
347 	 */
348 	if (upperdvp != NULLVP) {
349 		/*
350 		 * We do not have to worry about the DOTDOT case, we've
351 		 * already unlocked dvp.
352 		 */
353 		UDEBUG(("A %p\n", upperdvp));
354 
355 		/*
356 		 * Do the lookup.   We must supply a locked and referenced
357 		 * upperdvp to the function and will get a new locked and
358 		 * referenced upperdvp back, with the old having been
359 		 * dereferenced.
360 		 *
361 		 * If an error is returned, uppervp will be NULLVP.  If no
362 		 * error occurs, uppervp will be the locked and referenced.
363 		 * Return vnode, or possibly NULL, depending on what is being
364 		 * requested.  It is possible that the returned uppervp
365 		 * will be the same as upperdvp.
366 		 */
367 		uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
368 		UDEBUG((
369 		    "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
370 		    uerror,
371 		    upperdvp,
372 		    vrefcnt(upperdvp),
373 		    VOP_ISLOCKED(upperdvp, NULL),
374 		    uppervp,
375 		    (uppervp ? vrefcnt(uppervp) : -99),
376 		    (uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99)
377 		));
378 
379 		/*
380 		 * Disallow write attempts to the filesystem mounted read-only.
381 		 */
382 		if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) &&
383 		    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
384 		    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) {
385 			error = EROFS;
386 			goto out;
387 		}
388 
389 		/*
390 		 * Special case: If cn_consume != 0 then skip out.  The result
391 		 * of the lookup is transfered to our return variable.  If
392 		 * an error occured we have to throw away the results.
393 		 */
394 
395 		if (cnp->cn_consume != 0) {
396 			if ((error = uerror) == 0) {
397 				*ap->a_vpp = uppervp;
398 				uppervp = NULL;
399 			}
400 			goto out;
401 		}
402 
403 		/*
404 		 * Calculate whiteout, fall through.
405 		 */
406 
407 		if (uerror == ENOENT || uerror == EJUSTRETURN) {
408 			if (cnp->cn_flags & ISWHITEOUT) {
409 				iswhiteout = 1;
410 			} else if (lowerdvp != NULLVP) {
411 				int terror;
412 
413 				terror = VOP_GETATTR(upperdvp, &va,
414 					cnp->cn_cred, cnp->cn_thread);
415 				if (terror == 0 && (va.va_flags & OPAQUE))
416 					iswhiteout = 1;
417 			}
418 		}
419 	}
420 
421 	/*
422 	 * In a similar way to the upper layer, do the lookup
423 	 * in the lower layer.   This time, if there is some
424 	 * component magic going on, then vput whatever we got
425 	 * back from the upper layer and return the lower vnode
426 	 * instead.
427 	 */
428 
429 	if (lowerdvp != NULLVP && !iswhiteout) {
430 		int nameiop;
431 
432 		UDEBUG(("B %p\n", lowerdvp));
433 
434 		/*
435 		 * Force only LOOKUPs on the lower node, since
436 		 * we won't be making changes to it anyway.
437 		 */
438 		nameiop = cnp->cn_nameiop;
439 		cnp->cn_nameiop = LOOKUP;
440 		if (um->um_op == UNMNT_BELOW) {
441 			saved_cred = cnp->cn_cred;
442 			cnp->cn_cred = um->um_cred;
443 		}
444 
445 		/*
446 		 * We shouldn't have to worry about locking interactions
447 		 * between the lower layer and our union layer (w.r.t.
448 		 * `..' processing) because we don't futz with lowervp
449 		 * locks in the union-node instantiation code path.
450 		 *
451 		 * union_lookup1() requires lowervp to be locked on entry,
452 		 * and it will be unlocked on return.  The ref count will
453 		 * not change.  On return lowervp doesn't represent anything
454 		 * to us so we NULL it out.
455 		 */
456 		VREF(lowerdvp);
457 		vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, td);
458 		lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
459 		if (lowerdvp == lowervp)
460 			vrele(lowerdvp);
461 		else
462 			vput(lowerdvp);
463 		lowerdvp = NULL;	/* lowerdvp invalid after vput */
464 
465 		if (um->um_op == UNMNT_BELOW)
466 			cnp->cn_cred = saved_cred;
467 		cnp->cn_nameiop = nameiop;
468 
469 		if (cnp->cn_consume != 0 || lerror == EACCES) {
470 			if ((error = lerror) == 0) {
471 				*ap->a_vpp = lowervp;
472 				lowervp = NULL;
473 			}
474 			goto out;
475 		}
476 	} else {
477 		UDEBUG(("C %p\n", lowerdvp));
478 		if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
479 			if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
480 				VREF(lowervp);
481 				vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, td);
482 				lerror = 0;
483 			}
484 		}
485 	}
486 
487 	/*
488 	 * Ok.  Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
489 	 *
490 	 * 1. If both layers returned an error, select the upper layer.
491 	 *
492 	 * 2. If the upper layer failed and the bottom layer succeeded,
493 	 *    two subcases occur:
494 	 *
495 	 *	a.  The bottom vnode is not a directory, in which case
496 	 *	    just return a new union vnode referencing an
497 	 *	    empty top layer and the existing bottom layer.
498 	 *
499 	 *	b.  The bottom vnode is a directory, in which case
500 	 *	    create a new directory in the top layer and
501 	 *	    and fall through to case 3.
502 	 *
503 	 * 3. If the top layer succeeded, then return a new union
504 	 *    vnode referencing whatever the new top layer and
505 	 *    whatever the bottom layer returned.
506 	 */
507 
508 	/* case 1. */
509 	if ((uerror != 0) && (lerror != 0)) {
510 		error = uerror;
511 		goto out;
512 	}
513 
514 	/* case 2. */
515 	if (uerror != 0 /* && (lerror == 0) */ ) {
516 		if (lowervp->v_type == VDIR) { /* case 2b. */
517 			KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
518 			/*
519 			 * Oops, uppervp has a problem, we may have to shadow.
520 			 */
521 			uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
522 			if (uerror) {
523 				error = uerror;
524 				goto out;
525 			}
526 		}
527 	}
528 
529 	/*
530 	 * Must call union_allocvp() with both the upper and lower vnodes
531 	 * referenced and the upper vnode locked.   ap->a_vpp is returned
532 	 * referenced and locked.  lowervp, uppervp, and upperdvp are
533 	 * absorbed by union_allocvp() whether it succeeds or fails.
534 	 *
535 	 * upperdvp is the parent directory of uppervp which may be
536 	 * different, depending on the path, from dvp->un_uppervp.  That's
537 	 * why it is a separate argument.  Note that it must be unlocked.
538 	 *
539 	 * dvp must be locked on entry to the call and will be locked on
540 	 * return.
541 	 */
542 
543 	if (uppervp && uppervp != upperdvp)
544 		VOP_UNLOCK(uppervp, 0, td);
545 	if (lowervp)
546 		VOP_UNLOCK(lowervp, 0, td);
547 	if (upperdvp)
548 		VOP_UNLOCK(upperdvp, 0, td);
549 
550 	error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
551 			      uppervp, lowervp, 1);
552 
553 	UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? vrefcnt(*ap->a_vpp) : -99));
554 
555 	uppervp = NULL;
556 	upperdvp = NULL;
557 	lowervp = NULL;
558 
559 	/*
560 	 *	Termination Code
561 	 *
562 	 *	- put away any extra junk laying around.  Note that lowervp
563 	 *	  (if not NULL) will never be the same as *ap->a_vp and
564 	 *	  neither will uppervp, because when we set that state we
565 	 *	  NULL-out lowervp or uppervp.  On the otherhand, upperdvp
566 	 *	  may match uppervp or *ap->a_vpp.
567 	 *
568 	 *	- relock/unlock dvp if appropriate.
569 	 */
570 
571 out:
572 	if (upperdvp) {
573 		if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
574 			vrele(upperdvp);
575 		else
576 			vput(upperdvp);
577 	}
578 
579 	if (uppervp)
580 		vput(uppervp);
581 
582 	if (lowervp)
583 		vput(lowervp);
584 
585 	/*
586 	 * Restore LOCKPARENT state
587 	 */
588 
589 	if (!lockparent)
590 		cnp->cn_flags &= ~LOCKPARENT;
591 
592 	UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
593 		((*ap->a_vpp) ? vrefcnt(*ap->a_vpp) : -99),
594 		lowervp, uppervp));
595 
596 	if (error == 0 || error == EJUSTRETURN) {
597 		/*
598 		 * dvp lock state, determine whether to relock dvp.
599 		 * We are expected to unlock dvp unless:
600 		 *
601 		 *	- there was an error (other than EJUSTRETURN), or
602 		 *	- we hit the last component and lockparent is true
603 		 */
604 		if (*ap->a_vpp != dvp) {
605 			if (!lockparent || (cnp->cn_flags & ISLASTCN) == 0)
606 				VOP_UNLOCK(dvp, 0, td);
607 		}
608 
609 		if (cnp->cn_namelen == 1 &&
610 		    cnp->cn_nameptr[0] == '.' &&
611 		    *ap->a_vpp != dvp) {
612 #ifdef	DIAGNOSTIC
613 			vprint("union_lookup: vp", *ap->a_vpp);
614 			vprint("union_lookup: dvp", dvp);
615 #endif
616 			panic("union_lookup returning . (%p) != startdir (%p)",
617 			    *ap->a_vpp, dvp);
618 		}
619 	}
620 
621 	return (error);
622 }
623 
624 /*
625  * 	union_create:
626  *
627  * a_dvp is locked on entry and remains locked on return.  a_vpp is returned
628  * locked if no error occurs, otherwise it is garbage.
629  */
630 
631 static int
632 union_create(ap)
633 	struct vop_create_args /* {
634 		struct vnode *a_dvp;
635 		struct vnode **a_vpp;
636 		struct componentname *a_cnp;
637 		struct vattr *a_vap;
638 	} */ *ap;
639 {
640 	struct union_node *dun = VTOUNION(ap->a_dvp);
641 	struct componentname *cnp = ap->a_cnp;
642 	struct thread *td = cnp->cn_thread;
643 	struct vnode *dvp;
644 	int error = EROFS;
645 
646 	if ((dvp = union_lock_upper(dun, td)) != NULL) {
647 		struct vnode *vp;
648 		struct mount *mp;
649 
650 		error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
651 		if (error == 0) {
652 			mp = ap->a_dvp->v_mount;
653 			VOP_UNLOCK(vp, 0, td);
654 			UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vrefcnt(vp)));
655 			error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
656 				cnp, vp, NULLVP, 1);
657 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vrefcnt(vp)));
658 		}
659 		union_unlock_upper(dvp, td);
660 	}
661 	return (error);
662 }
663 
664 static int
665 union_whiteout(ap)
666 	struct vop_whiteout_args /* {
667 		struct vnode *a_dvp;
668 		struct componentname *a_cnp;
669 		int a_flags;
670 	} */ *ap;
671 {
672 	struct union_node *un = VTOUNION(ap->a_dvp);
673 	struct componentname *cnp = ap->a_cnp;
674 	struct vnode *uppervp;
675 	int error;
676 
677 	switch (ap->a_flags) {
678 	case CREATE:
679 	case DELETE:
680 		uppervp = union_lock_upper(un, cnp->cn_thread);
681 		if (uppervp != NULLVP) {
682 			error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
683 			union_unlock_upper(uppervp, cnp->cn_thread);
684 		} else
685 			error = EOPNOTSUPP;
686 		break;
687 	case LOOKUP:
688 		error = EOPNOTSUPP;
689 		break;
690 	default:
691 		panic("union_whiteout: unknown op");
692 	}
693 	return (error);
694 }
695 
696 /*
697  * 	union_mknod:
698  *
699  *	a_dvp is locked on entry and should remain locked on return.
700  *	a_vpp is garbagre whether an error occurs or not.
701  */
702 
703 static int
704 union_mknod(ap)
705 	struct vop_mknod_args /* {
706 		struct vnode *a_dvp;
707 		struct vnode **a_vpp;
708 		struct componentname *a_cnp;
709 		struct vattr *a_vap;
710 	} */ *ap;
711 {
712 	struct union_node *dun = VTOUNION(ap->a_dvp);
713 	struct componentname *cnp = ap->a_cnp;
714 	struct vnode *dvp;
715 	int error = EROFS;
716 
717 	if ((dvp = union_lock_upper(dun, cnp->cn_thread)) != NULL) {
718 		error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap);
719 		union_unlock_upper(dvp, cnp->cn_thread);
720 	}
721 	return (error);
722 }
723 
724 /*
725  *	union_open:
726  *
727  *	run open VOP.  When opening the underlying vnode we have to mimic
728  *	vn_open().  What we *really* need to do to avoid screwups if the
729  *	open semantics change is to call vn_open().  For example, ufs blows
730  *	up if you open a file but do not vmio it prior to writing.
731  */
732 
733 static int
734 union_open(ap)
735 	struct vop_open_args /* {
736 		struct vnodeop_desc *a_desc;
737 		struct vnode *a_vp;
738 		int a_mode;
739 		struct ucred *a_cred;
740 		struct thread *a_td;
741 	} */ *ap;
742 {
743 	struct union_node *un = VTOUNION(ap->a_vp);
744 	struct vnode *tvp;
745 	int mode = ap->a_mode;
746 	struct ucred *cred = ap->a_cred;
747 	struct thread *td = ap->a_td;
748 	int error = 0;
749 	int tvpisupper = 1;
750 
751 	/*
752 	 * If there is an existing upper vp then simply open that.
753 	 * The upper vp takes precedence over the lower vp.  When opening
754 	 * a lower vp for writing copy it to the uppervp and then open the
755 	 * uppervp.
756 	 *
757 	 * At the end of this section tvp will be left locked.
758 	 */
759 	if ((tvp = union_lock_upper(un, td)) == NULLVP) {
760 		/*
761 		 * If the lower vnode is being opened for writing, then
762 		 * copy the file contents to the upper vnode and open that,
763 		 * otherwise can simply open the lower vnode.
764 		 */
765 		tvp = un->un_lowervp;
766 		if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
767 			int docopy = !(mode & O_TRUNC);
768 			error = union_copyup(un, docopy, cred, td);
769 			tvp = union_lock_upper(un, td);
770 		} else {
771 			un->un_openl++;
772 			VREF(tvp);
773 			vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, td);
774 			tvpisupper = 0;
775 		}
776 	}
777 
778 	/*
779 	 * We are holding the correct vnode, open it.
780 	 */
781 
782 	if (error == 0)
783 		error = VOP_OPEN(tvp, mode, cred, td, -1);
784 
785 	/*
786 	 * This is absolutely necessary or UFS will blow up.
787 	 */
788         if (error == 0 && vn_canvmio(tvp) == TRUE) {
789                 error = vfs_object_create(tvp, td, cred);
790         }
791 
792 	/*
793 	 * Release any locks held.
794 	 */
795 	if (tvpisupper) {
796 		if (tvp)
797 			union_unlock_upper(tvp, td);
798 	} else {
799 		vput(tvp);
800 	}
801 	return (error);
802 }
803 
804 /*
805  *	union_close:
806  *
807  *	It is unclear whether a_vp is passed locked or unlocked.  Whatever
808  *	the case we do not change it.
809  */
810 
811 static int
812 union_close(ap)
813 	struct vop_close_args /* {
814 		struct vnode *a_vp;
815 		int  a_fflag;
816 		struct ucred *a_cred;
817 		struct thread *a_td;
818 	} */ *ap;
819 {
820 	struct union_node *un = VTOUNION(ap->a_vp);
821 	struct vnode *vp;
822 
823 	if ((vp = un->un_uppervp) == NULLVP) {
824 #ifdef UNION_DIAGNOSTIC
825 		if (un->un_openl <= 0)
826 			panic("union: un_openl cnt");
827 #endif
828 		--un->un_openl;
829 		vp = un->un_lowervp;
830 	}
831 	ap->a_vp = vp;
832 	return (VCALL(vp, VOFFSET(vop_close), ap));
833 }
834 
835 /*
836  * Check access permission on the union vnode.
837  * The access check being enforced is to check
838  * against both the underlying vnode, and any
839  * copied vnode.  This ensures that no additional
840  * file permissions are given away simply because
841  * the user caused an implicit file copy.
842  */
843 static int
844 union_access(ap)
845 	struct vop_access_args /* {
846 		struct vnodeop_desc *a_desc;
847 		struct vnode *a_vp;
848 		int a_mode;
849 		struct ucred *a_cred;
850 		struct thread *a_td;
851 	} */ *ap;
852 {
853 	struct union_node *un = VTOUNION(ap->a_vp);
854 	struct thread *td = ap->a_td;
855 	int error = EACCES;
856 	struct vnode *vp;
857 
858 	/*
859 	 * Disallow write attempts on filesystems mounted read-only.
860 	 */
861 	if ((ap->a_mode & VWRITE) &&
862 	    (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
863 		switch (ap->a_vp->v_type) {
864 		case VREG:
865 		case VDIR:
866 		case VLNK:
867 			return (EROFS);
868 		default:
869 			break;
870 		}
871 	}
872 
873 	if ((vp = union_lock_upper(un, td)) != NULLVP) {
874 		ap->a_vp = vp;
875 		error = VCALL(vp, VOFFSET(vop_access), ap);
876 		union_unlock_upper(vp, td);
877 		return(error);
878 	}
879 
880 	if ((vp = un->un_lowervp) != NULLVP) {
881 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
882 		ap->a_vp = vp;
883 
884 		/*
885 		 * Remove VWRITE from a_mode if our mount point is RW, because
886 		 * we want to allow writes and lowervp may be read-only.
887 		 */
888 		if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
889 			ap->a_mode &= ~VWRITE;
890 
891 		error = VCALL(vp, VOFFSET(vop_access), ap);
892 		if (error == 0) {
893 			struct union_mount *um;
894 
895 			um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
896 
897 			if (um->um_op == UNMNT_BELOW) {
898 				ap->a_cred = um->um_cred;
899 				error = VCALL(vp, VOFFSET(vop_access), ap);
900 			}
901 		}
902 		VOP_UNLOCK(vp, 0, td);
903 	}
904 	return(error);
905 }
906 
907 /*
908  * We handle getattr only to change the fsid and
909  * track object sizes
910  *
911  * It's not clear whether VOP_GETATTR is to be
912  * called with the vnode locked or not.  stat() calls
913  * it with (vp) locked, and fstat() calls it with
914  * (vp) unlocked.
915  *
916  * Because of this we cannot use our normal locking functions
917  * if we do not intend to lock the main a_vp node.  At the moment
918  * we are running without any specific locking at all, but beware
919  * to any programmer that care must be taken if locking is added
920  * to this function.
921  */
922 
923 static int
924 union_getattr(ap)
925 	struct vop_getattr_args /* {
926 		struct vnode *a_vp;
927 		struct vattr *a_vap;
928 		struct ucred *a_cred;
929 		struct thread *a_td;
930 	} */ *ap;
931 {
932 	int error;
933 	struct union_node *un = VTOUNION(ap->a_vp);
934 	struct union_mount *um = MOUNTTOUNIONMOUNT(ap->a_vp->v_mount);
935 	struct vnode *vp;
936 	struct vattr *vap;
937 	struct vattr va;
938 
939 	/*
940 	 * Some programs walk the filesystem hierarchy by counting
941 	 * links to directories to avoid stat'ing all the time.
942 	 * This means the link count on directories needs to be "correct".
943 	 * The only way to do that is to call getattr on both layers
944 	 * and fix up the link count.  The link count will not necessarily
945 	 * be accurate but will be large enough to defeat the tree walkers.
946 	 */
947 
948 	vap = ap->a_vap;
949 
950 	if ((vp = un->un_uppervp) != NULLVP) {
951 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td);
952 		if (error)
953 			return (error);
954 		/* XXX isn't this dangerous without a lock? */
955 		union_newsize(ap->a_vp, vap->va_size, VNOVAL);
956 	}
957 
958 	if (vp == NULLVP) {
959 		vp = un->un_lowervp;
960 	} else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
961 		vp = un->un_lowervp;
962 		vap = &va;
963 	} else {
964 		vp = NULLVP;
965 	}
966 
967 	if (vp != NULLVP) {
968 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td);
969 		if (error)
970 			return (error);
971 		/* XXX isn't this dangerous without a lock? */
972 		union_newsize(ap->a_vp, VNOVAL, vap->va_size);
973 	}
974 
975 	if (ap->a_vap->va_fsid == um->um_upperdev)
976 		ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
977 
978 	if ((vap != ap->a_vap) && (vap->va_type == VDIR))
979 		ap->a_vap->va_nlink += vap->va_nlink;
980 	return (0);
981 }
982 
983 static int
984 union_setattr(ap)
985 	struct vop_setattr_args /* {
986 		struct vnode *a_vp;
987 		struct vattr *a_vap;
988 		struct ucred *a_cred;
989 		struct thread *a_td;
990 	} */ *ap;
991 {
992 	struct union_node *un = VTOUNION(ap->a_vp);
993 	struct thread *td = ap->a_td;
994 	struct vattr *vap = ap->a_vap;
995 	struct vnode *uppervp;
996 	int error;
997 
998 	/*
999 	 * Disallow write attempts on filesystems mounted read-only.
1000 	 */
1001 	if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
1002 	    (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
1003 	     vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
1004 	     vap->va_mtime.tv_sec != VNOVAL ||
1005 	     vap->va_mode != (mode_t)VNOVAL)) {
1006 		return (EROFS);
1007 	}
1008 
1009 	/*
1010 	 * Handle case of truncating lower object to zero size
1011 	 * by creating a zero length upper object.  This is to
1012 	 * handle the case of open with O_TRUNC and O_CREAT.
1013 	 */
1014 	if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
1015 		error = union_copyup(un, (ap->a_vap->va_size != 0),
1016 			    ap->a_cred, ap->a_td);
1017 		if (error)
1018 			return (error);
1019 	}
1020 
1021 	/*
1022 	 * Try to set attributes in upper layer,
1023 	 * otherwise return read-only filesystem error.
1024 	 */
1025 	error = EROFS;
1026 	if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1027 		error = VOP_SETATTR(un->un_uppervp, ap->a_vap,
1028 					ap->a_cred, ap->a_td);
1029 		if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
1030 			union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
1031 		union_unlock_upper(uppervp, td);
1032 	}
1033 	return (error);
1034 }
1035 
1036 static int
1037 union_read(ap)
1038 	struct vop_read_args /* {
1039 		struct vnode *a_vp;
1040 		struct uio *a_uio;
1041 		int  a_ioflag;
1042 		struct ucred *a_cred;
1043 	} */ *ap;
1044 {
1045 	struct union_node *un = VTOUNION(ap->a_vp);
1046 	struct thread *td = ap->a_uio->uio_td;
1047 	struct vnode *uvp;
1048 	int error;
1049 
1050 	uvp = union_lock_other(un, td);
1051 	KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1052 
1053 	error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1054 	union_unlock_other(uvp, td);
1055 
1056 	/*
1057 	 * XXX
1058 	 * Perhaps the size of the underlying object has changed under
1059 	 * our feet.  Take advantage of the offset information present
1060 	 * in the uio structure.
1061 	 */
1062 	if (error == 0) {
1063 		struct union_node *un = VTOUNION(ap->a_vp);
1064 		off_t cur = ap->a_uio->uio_offset;
1065 
1066 		if (uvp == un->un_uppervp) {
1067 			if (cur > un->un_uppersz)
1068 				union_newsize(ap->a_vp, cur, VNOVAL);
1069 		} else {
1070 			if (cur > un->un_lowersz)
1071 				union_newsize(ap->a_vp, VNOVAL, cur);
1072 		}
1073 	}
1074 	return (error);
1075 }
1076 
1077 static int
1078 union_write(ap)
1079 	struct vop_write_args /* {
1080 		struct vnode *a_vp;
1081 		struct uio *a_uio;
1082 		int  a_ioflag;
1083 		struct ucred *a_cred;
1084 	} */ *ap;
1085 {
1086 	struct union_node *un = VTOUNION(ap->a_vp);
1087 	struct thread *td = ap->a_uio->uio_td;
1088 	struct vnode *uppervp;
1089 	int error;
1090 
1091 	if ((uppervp = union_lock_upper(un, td)) == NULLVP)
1092 		panic("union: missing upper layer in write");
1093 
1094 	error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1095 
1096 	/*
1097 	 * The size of the underlying object may be changed by the
1098 	 * write.
1099 	 */
1100 	if (error == 0) {
1101 		off_t cur = ap->a_uio->uio_offset;
1102 
1103 		if (cur > un->un_uppersz)
1104 			union_newsize(ap->a_vp, cur, VNOVAL);
1105 	}
1106 	union_unlock_upper(uppervp, td);
1107 	return (error);
1108 }
1109 
1110 static int
1111 union_lease(ap)
1112 	struct vop_lease_args /* {
1113 		struct vnode *a_vp;
1114 		struct thread *a_td;
1115 		struct ucred *a_cred;
1116 		int a_flag;
1117 	} */ *ap;
1118 {
1119 	struct vnode *ovp = OTHERVP(ap->a_vp);
1120 
1121 	ap->a_vp = ovp;
1122 	return (VCALL(ovp, VOFFSET(vop_lease), ap));
1123 }
1124 
1125 static int
1126 union_ioctl(ap)
1127 	struct vop_ioctl_args /* {
1128 		struct vnode *a_vp;
1129 		u_long  a_command;
1130 		caddr_t  a_data;
1131 		int  a_fflag;
1132 		struct ucred *a_cred;
1133 		struct thread *a_td;
1134 	} */ *ap;
1135 {
1136 	struct vnode *ovp = OTHERVP(ap->a_vp);
1137 
1138 	ap->a_vp = ovp;
1139 	return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1140 }
1141 
1142 static int
1143 union_poll(ap)
1144 	struct vop_poll_args /* {
1145 		struct vnode *a_vp;
1146 		int  a_events;
1147 		struct ucred *a_cred;
1148 		struct thread *a_td;
1149 	} */ *ap;
1150 {
1151 	struct vnode *ovp = OTHERVP(ap->a_vp);
1152 
1153 	ap->a_vp = ovp;
1154 	return (VCALL(ovp, VOFFSET(vop_poll), ap));
1155 }
1156 
1157 static int
1158 union_fsync(ap)
1159 	struct vop_fsync_args /* {
1160 		struct vnode *a_vp;
1161 		struct ucred *a_cred;
1162 		int  a_waitfor;
1163 		struct thread *a_td;
1164 	} */ *ap;
1165 {
1166 	int error = 0;
1167 	struct thread *td = ap->a_td;
1168 	struct vnode *targetvp;
1169 	struct union_node *un = VTOUNION(ap->a_vp);
1170 
1171 	if ((targetvp = union_lock_other(un, td)) != NULLVP) {
1172 		error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, td);
1173 		union_unlock_other(targetvp, td);
1174 	}
1175 
1176 	return (error);
1177 }
1178 
1179 /*
1180  *	union_remove:
1181  *
1182  *	Remove the specified cnp.  The dvp and vp are passed to us locked
1183  *	and must remain locked on return.
1184  */
1185 
1186 static int
1187 union_remove(ap)
1188 	struct vop_remove_args /* {
1189 		struct vnode *a_dvp;
1190 		struct vnode *a_vp;
1191 		struct componentname *a_cnp;
1192 	} */ *ap;
1193 {
1194 	struct union_node *dun = VTOUNION(ap->a_dvp);
1195 	struct union_node *un = VTOUNION(ap->a_vp);
1196 	struct componentname *cnp = ap->a_cnp;
1197 	struct thread *td = cnp->cn_thread;
1198 	struct vnode *uppervp;
1199 	struct vnode *upperdvp;
1200 	int error;
1201 
1202 	if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1203 		panic("union remove: null upper vnode");
1204 
1205 	if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1206 		if (union_dowhiteout(un, cnp->cn_cred, td))
1207 			cnp->cn_flags |= DOWHITEOUT;
1208 		if (cnp->cn_flags & DOWHITEOUT)		/* XXX fs corruption */
1209 			error = EOPNOTSUPP;
1210 		else
1211 			error = VOP_REMOVE(upperdvp, uppervp, cnp);
1212 		if (!error)
1213 			union_removed_upper(un);
1214 		union_unlock_upper(uppervp, td);
1215 	} else {
1216 		error = union_mkwhiteout(
1217 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1218 			    upperdvp, ap->a_cnp, un->un_path);
1219 	}
1220 	union_unlock_upper(upperdvp, td);
1221 	return (error);
1222 }
1223 
1224 /*
1225  *	union_link:
1226  *
1227  *	tdvp and vp will be locked on entry.
1228  *	tdvp and vp should remain locked on return.
1229  */
1230 
1231 static int
1232 union_link(ap)
1233 	struct vop_link_args /* {
1234 		struct vnode *a_tdvp;
1235 		struct vnode *a_vp;
1236 		struct componentname *a_cnp;
1237 	} */ *ap;
1238 {
1239 	struct componentname *cnp = ap->a_cnp;
1240 	struct thread *td = cnp->cn_thread;
1241 	struct union_node *dun = VTOUNION(ap->a_tdvp);
1242 	struct vnode *vp;
1243 	struct vnode *tdvp;
1244 	int error = 0;
1245 
1246 	if (ap->a_tdvp->v_op != ap->a_vp->v_op) {
1247 		vp = ap->a_vp;
1248 	} else {
1249 		struct union_node *tun = VTOUNION(ap->a_vp);
1250 
1251 		if (tun->un_uppervp == NULLVP) {
1252 #if 0
1253 			if (dun->un_uppervp == tun->un_dirvp) {
1254 				if (dun->un_flags & UN_ULOCK) {
1255 					dun->un_flags &= ~UN_ULOCK;
1256 					VOP_UNLOCK(dun->un_uppervp, 0, td);
1257 				}
1258 			}
1259 #endif
1260 			error = union_copyup(tun, 1, cnp->cn_cred, td);
1261 #if 0
1262 			if (dun->un_uppervp == tun->un_dirvp) {
1263 				vn_lock(dun->un_uppervp,
1264 					    LK_EXCLUSIVE | LK_RETRY, td);
1265 				dun->un_flags |= UN_ULOCK;
1266 			}
1267 #endif
1268 			if (error)
1269 				return (error);
1270 		}
1271 		vp = tun->un_uppervp;
1272 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1273 	}
1274 
1275 	/*
1276 	 * Make sure upper is locked, then unlock the union directory we were
1277 	 * called with to avoid a deadlock while we are calling VOP_LINK() on
1278 	 * the upper (with tdvp locked and vp not locked).  Our ap->a_tdvp
1279 	 * is expected to be locked on return.
1280 	 */
1281 
1282 	if ((tdvp = union_lock_upper(dun, td)) == NULLVP)
1283 		return (EROFS);
1284 
1285 	VOP_UNLOCK(ap->a_tdvp, 0, td);		/* unlock calling node */
1286 	error = VOP_LINK(tdvp, vp, cnp);	/* call link on upper */
1287 
1288 	/*
1289 	 * Unlock tun->un_uppervp if we locked it above.
1290 	 */
1291 	if (ap->a_tdvp->v_op == ap->a_vp->v_op)
1292 		VOP_UNLOCK(vp, 0, td);
1293 	/*
1294 	 * We have to unlock tdvp prior to relocking our calling node in
1295 	 * order to avoid a deadlock.  We also have to unlock ap->a_vp
1296 	 * before relocking the directory, but then we have to relock
1297 	 * ap->a_vp as our caller expects.
1298 	 */
1299 	VOP_UNLOCK(ap->a_vp, 0, td);
1300 	union_unlock_upper(tdvp, td);
1301 	vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY, td);
1302 	vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, td);
1303 	return (error);
1304 }
1305 
1306 static int
1307 union_rename(ap)
1308 	struct vop_rename_args  /* {
1309 		struct vnode *a_fdvp;
1310 		struct vnode *a_fvp;
1311 		struct componentname *a_fcnp;
1312 		struct vnode *a_tdvp;
1313 		struct vnode *a_tvp;
1314 		struct componentname *a_tcnp;
1315 	} */ *ap;
1316 {
1317 	int error;
1318 	struct vnode *fdvp = ap->a_fdvp;
1319 	struct vnode *fvp = ap->a_fvp;
1320 	struct vnode *tdvp = ap->a_tdvp;
1321 	struct vnode *tvp = ap->a_tvp;
1322 
1323 	/*
1324 	 * Figure out what fdvp to pass to our upper or lower vnode.  If we
1325 	 * replace the fdvp, release the original one and ref the new one.
1326 	 */
1327 
1328 	if (fdvp->v_op == &union_vnodeops) {	/* always true */
1329 		struct union_node *un = VTOUNION(fdvp);
1330 		if (un->un_uppervp == NULLVP) {
1331 			/*
1332 			 * this should never happen in normal
1333 			 * operation but might if there was
1334 			 * a problem creating the top-level shadow
1335 			 * directory.
1336 			 */
1337 			error = EXDEV;
1338 			goto bad;
1339 		}
1340 		fdvp = un->un_uppervp;
1341 		VREF(fdvp);
1342 		vrele(ap->a_fdvp);
1343 	}
1344 
1345 	/*
1346 	 * Figure out what fvp to pass to our upper or lower vnode.  If we
1347 	 * replace the fvp, release the original one and ref the new one.
1348 	 */
1349 
1350 	if (fvp->v_op == &union_vnodeops) {	/* always true */
1351 		struct union_node *un = VTOUNION(fvp);
1352 #if 0
1353 		struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1354 #endif
1355 
1356 		if (un->un_uppervp == NULLVP) {
1357 			switch(fvp->v_type) {
1358 			case VREG:
1359 				vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread);
1360 				error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_thread);
1361 				VOP_UNLOCK(un->un_vnode, 0, ap->a_fcnp->cn_thread);
1362 				if (error)
1363 					goto bad;
1364 				break;
1365 			case VDIR:
1366 				/*
1367 				 * XXX not yet.
1368 				 *
1369 				 * There is only one way to rename a directory
1370 				 * based in the lowervp, and that is to copy
1371 				 * the entire directory hierarchy.  Otherwise
1372 				 * it would not last across a reboot.
1373 				 */
1374 #if 0
1375 				vrele(fvp);
1376 				fvp = NULL;
1377 				vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread);
1378 				error = union_mkshadow(um, fdvp,
1379 					    ap->a_fcnp, &un->un_uppervp);
1380 				VOP_UNLOCK(fdvp, 0, ap->a_fcnp->cn_thread);
1381 				if (un->un_uppervp)
1382 					VOP_UNLOCK(un->un_uppervp, 0, ap->a_fcnp->cn_thread);
1383 				if (error)
1384 					goto bad;
1385 				break;
1386 #endif
1387 			default:
1388 				error = EXDEV;
1389 				goto bad;
1390 			}
1391 		}
1392 
1393 		if (un->un_lowervp != NULLVP)
1394 			ap->a_fcnp->cn_flags |= DOWHITEOUT;
1395 		fvp = un->un_uppervp;
1396 		VREF(fvp);
1397 		vrele(ap->a_fvp);
1398 	}
1399 
1400 	/*
1401 	 * Figure out what tdvp (destination directory) to pass to the
1402 	 * lower level.  If we replace it with uppervp, we need to vput the
1403 	 * old one.  The exclusive lock is transfered to what we will pass
1404 	 * down in the VOP_RENAME() and we replace uppervp with a simple
1405 	 * reference.
1406 	 */
1407 
1408 	if (tdvp->v_op == &union_vnodeops) {
1409 		struct union_node *un = VTOUNION(tdvp);
1410 
1411 		if (un->un_uppervp == NULLVP) {
1412 			/*
1413 			 * This should never happen in normal
1414 			 * operation but might if there was
1415 			 * a problem creating the top-level shadow
1416 			 * directory.
1417 			 */
1418 			error = EXDEV;
1419 			goto bad;
1420 		}
1421 
1422 		/*
1423 		 * New tdvp is a lock and reference on uppervp.
1424 		 * Put away the old tdvp.
1425 		 */
1426 		tdvp = union_lock_upper(un, ap->a_tcnp->cn_thread);
1427 		vput(ap->a_tdvp);
1428 	}
1429 
1430 	/*
1431 	 * Figure out what tvp (destination file) to pass to the
1432 	 * lower level.
1433 	 *
1434 	 * If the uppervp file does not exist, put away the (wrong)
1435 	 * file and change tvp to NULL.
1436 	 */
1437 
1438 	if (tvp != NULLVP && tvp->v_op == &union_vnodeops) {
1439 		struct union_node *un = VTOUNION(tvp);
1440 
1441 		tvp = union_lock_upper(un, ap->a_tcnp->cn_thread);
1442 		vput(ap->a_tvp);
1443 		/* note: tvp may be NULL */
1444 	}
1445 
1446 	/*
1447 	 * VOP_RENAME() releases/vputs prior to returning, so we have no
1448 	 * cleanup to do.
1449 	 */
1450 
1451 	return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1452 
1453 	/*
1454 	 * Error.  We still have to release / vput the various elements.
1455 	 */
1456 
1457 bad:
1458 	vrele(fdvp);
1459 	if (fvp)
1460 		vrele(fvp);
1461 	vput(tdvp);
1462 	if (tvp != NULLVP) {
1463 		if (tvp != tdvp)
1464 			vput(tvp);
1465 		else
1466 			vrele(tvp);
1467 	}
1468 	return (error);
1469 }
1470 
1471 static int
1472 union_mkdir(ap)
1473 	struct vop_mkdir_args /* {
1474 		struct vnode *a_dvp;
1475 		struct vnode **a_vpp;
1476 		struct componentname *a_cnp;
1477 		struct vattr *a_vap;
1478 	} */ *ap;
1479 {
1480 	struct union_node *dun = VTOUNION(ap->a_dvp);
1481 	struct componentname *cnp = ap->a_cnp;
1482 	struct thread *td = cnp->cn_thread;
1483 	struct vnode *upperdvp;
1484 	int error = EROFS;
1485 
1486 	if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) {
1487 		struct vnode *vp;
1488 
1489 		error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
1490 		union_unlock_upper(upperdvp, td);
1491 
1492 		if (error == 0) {
1493 			VOP_UNLOCK(vp, 0, td);
1494 			UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vrefcnt(vp)));
1495 			error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1496 				ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1497 			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vrefcnt(vp)));
1498 		}
1499 	}
1500 	return (error);
1501 }
1502 
1503 static int
1504 union_rmdir(ap)
1505 	struct vop_rmdir_args /* {
1506 		struct vnode *a_dvp;
1507 		struct vnode *a_vp;
1508 		struct componentname *a_cnp;
1509 	} */ *ap;
1510 {
1511 	struct union_node *dun = VTOUNION(ap->a_dvp);
1512 	struct union_node *un = VTOUNION(ap->a_vp);
1513 	struct componentname *cnp = ap->a_cnp;
1514 	struct thread *td = cnp->cn_thread;
1515 	struct vnode *upperdvp;
1516 	struct vnode *uppervp;
1517 	int error;
1518 
1519 	if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1520 		panic("union rmdir: null upper vnode");
1521 
1522 	if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1523 		if (union_dowhiteout(un, cnp->cn_cred, td))
1524 			cnp->cn_flags |= DOWHITEOUT;
1525 		if (cnp->cn_flags & DOWHITEOUT)		/* XXX fs corruption */
1526 			error = EOPNOTSUPP;
1527 		else
1528 			error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
1529 		if (!error)
1530 			union_removed_upper(un);
1531 		union_unlock_upper(uppervp, td);
1532 	} else {
1533 		error = union_mkwhiteout(
1534 			    MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1535 			    dun->un_uppervp, ap->a_cnp, un->un_path);
1536 	}
1537 	union_unlock_upper(upperdvp, td);
1538 	return (error);
1539 }
1540 
1541 /*
1542  *	union_symlink:
1543  *
1544  *	dvp is locked on entry and remains locked on return.  a_vpp is garbage
1545  *	(unused).
1546  */
1547 
1548 static int
1549 union_symlink(ap)
1550 	struct vop_symlink_args /* {
1551 		struct vnode *a_dvp;
1552 		struct vnode **a_vpp;
1553 		struct componentname *a_cnp;
1554 		struct vattr *a_vap;
1555 		char *a_target;
1556 	} */ *ap;
1557 {
1558 	struct union_node *dun = VTOUNION(ap->a_dvp);
1559 	struct componentname *cnp = ap->a_cnp;
1560 	struct thread *td = cnp->cn_thread;
1561 	struct vnode *dvp;
1562 	int error = EROFS;
1563 
1564 	if ((dvp = union_lock_upper(dun, td)) != NULLVP) {
1565 		error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1566 			    ap->a_target);
1567 		union_unlock_upper(dvp, td);
1568 	}
1569 	return (error);
1570 }
1571 
1572 /*
1573  * union_readdir ()works in concert with getdirentries() and
1574  * readdir(3) to provide a list of entries in the unioned
1575  * directories.  getdirentries()  is responsible for walking
1576  * down the union stack.  readdir(3) is responsible for
1577  * eliminating duplicate names from the returned data stream.
1578  */
1579 static int
1580 union_readdir(ap)
1581 	struct vop_readdir_args /* {
1582 		struct vnode *a_vp;
1583 		struct uio *a_uio;
1584 		struct ucred *a_cred;
1585 		int *a_eofflag;
1586 		u_long *a_cookies;
1587 		int a_ncookies;
1588 	} */ *ap;
1589 {
1590 	struct union_node *un = VTOUNION(ap->a_vp);
1591 	struct thread *td = ap->a_uio->uio_td;
1592 	struct vnode *uvp;
1593 	int error = 0;
1594 
1595 	if ((uvp = union_lock_upper(un, td)) != NULLVP) {
1596 		ap->a_vp = uvp;
1597 		error = VCALL(uvp, VOFFSET(vop_readdir), ap);
1598 		union_unlock_upper(uvp, td);
1599 	}
1600 	return(error);
1601 }
1602 
1603 static int
1604 union_readlink(ap)
1605 	struct vop_readlink_args /* {
1606 		struct vnode *a_vp;
1607 		struct uio *a_uio;
1608 		struct ucred *a_cred;
1609 	} */ *ap;
1610 {
1611 	int error;
1612 	struct union_node *un = VTOUNION(ap->a_vp);
1613 	struct uio *uio = ap->a_uio;
1614 	struct thread *td = uio->uio_td;
1615 	struct vnode *vp;
1616 
1617 	vp = union_lock_other(un, td);
1618 	KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1619 
1620 	ap->a_vp = vp;
1621 	error = VCALL(vp, VOFFSET(vop_readlink), ap);
1622 	union_unlock_other(vp, td);
1623 
1624 	return (error);
1625 }
1626 
1627 static int
1628 union_getwritemount(ap)
1629 	struct vop_getwritemount_args /* {
1630 		struct vnode *a_vp;
1631 		struct mount **a_mpp;
1632 	} */ *ap;
1633 {
1634 	struct vnode *vp = ap->a_vp;
1635 	struct vnode *uvp = UPPERVP(vp);
1636 
1637 	if (uvp == NULL) {
1638 		VI_LOCK(vp);
1639 		if (vp->v_iflag & VI_FREE) {
1640 			VI_UNLOCK(vp);
1641 			return (EOPNOTSUPP);
1642 		}
1643 		VI_UNLOCK(vp);
1644 		return (EACCES);
1645 	}
1646 	return(VOP_GETWRITEMOUNT(uvp, ap->a_mpp));
1647 }
1648 
1649 /*
1650  *	union_inactive:
1651  *
1652  *	Called with the vnode locked.  We are expected to unlock the vnode.
1653  */
1654 
1655 static int
1656 union_inactive(ap)
1657 	struct vop_inactive_args /* {
1658 		struct vnode *a_vp;
1659 		struct thread *a_td;
1660 	} */ *ap;
1661 {
1662 	struct vnode *vp = ap->a_vp;
1663 	struct thread *td = ap->a_td;
1664 	struct union_node *un = VTOUNION(vp);
1665 
1666 	/*
1667 	 * Do nothing (and _don't_ bypass).
1668 	 * Wait to vrele lowervp until reclaim,
1669 	 * so that until then our union_node is in the
1670 	 * cache and reusable.
1671 	 *
1672 	 */
1673 
1674 	if (un->un_dircache != NULL)
1675 		union_dircache_free(un);
1676 
1677 #if 0
1678 	if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1679 		un->un_flags &= ~UN_ULOCK;
1680 		VOP_UNLOCK(un->un_uppervp, 0, td);
1681 	}
1682 #endif
1683 
1684 	VOP_UNLOCK(vp, 0, td);
1685 
1686 	if ((un->un_flags & UN_CACHED) == 0)
1687 		vgone(vp);
1688 
1689 	return (0);
1690 }
1691 
1692 static int
1693 union_reclaim(ap)
1694 	struct vop_reclaim_args /* {
1695 		struct vnode *a_vp;
1696 	} */ *ap;
1697 {
1698 	union_freevp(ap->a_vp);
1699 
1700 	return (0);
1701 }
1702 
1703 /*
1704  * unionvp do not hold a VM object and there is no need to create one for
1705  * upper or lower vp because it is done in the union_open()
1706  */
1707 static int
1708 union_createvobject(ap)
1709 	struct vop_createvobject_args /* {
1710 		struct vnode *vp;
1711 		struct ucred *cred;
1712 		struct thread *td;
1713 	} */ *ap;
1714 {
1715 	struct vnode *vp = ap->a_vp;
1716 
1717 	vp->v_vflag |= VV_OBJBUF;
1718 	return (0);
1719 }
1720 
1721 /*
1722  * We have nothing to destroy and this operation shouldn't be bypassed.
1723  */
1724 static int
1725 union_destroyvobject(ap)
1726 	struct vop_destroyvobject_args /* {
1727 		struct vnode *vp;
1728 	} */ *ap;
1729 {
1730 	struct vnode *vp = ap->a_vp;
1731 
1732 	vp->v_vflag &= ~VV_OBJBUF;
1733 	return (0);
1734 }
1735 
1736 /*
1737  * Get VM object from the upper or lower vp
1738  */
1739 static int
1740 union_getvobject(ap)
1741 	struct vop_getvobject_args /* {
1742 		struct vnode *vp;
1743 		struct vm_object **objpp;
1744 	} */ *ap;
1745 {
1746 	struct vnode *ovp = OTHERVP(ap->a_vp);
1747 
1748 	if (ovp == NULL)
1749 		return EINVAL;
1750 	return (VOP_GETVOBJECT(ovp, ap->a_objpp));
1751 }
1752 
1753 static int
1754 union_print(ap)
1755 	struct vop_print_args /* {
1756 		struct vnode *a_vp;
1757 	} */ *ap;
1758 {
1759 	struct vnode *vp = ap->a_vp;
1760 
1761 	printf("\tvp=%p, uppervp=%p, lowervp=%p\n",
1762 	       vp, UPPERVP(vp), LOWERVP(vp));
1763 	if (UPPERVP(vp) != NULLVP)
1764 		vprint("union: upper", UPPERVP(vp));
1765 	if (LOWERVP(vp) != NULLVP)
1766 		vprint("union: lower", LOWERVP(vp));
1767 
1768 	return (0);
1769 }
1770 
1771 static int
1772 union_pathconf(ap)
1773 	struct vop_pathconf_args /* {
1774 		struct vnode *a_vp;
1775 		int a_name;
1776 		int *a_retval;
1777 	} */ *ap;
1778 {
1779 	int error;
1780 	struct thread *td = curthread;		/* XXX */
1781 	struct union_node *un = VTOUNION(ap->a_vp);
1782 	struct vnode *vp;
1783 
1784 	vp = union_lock_other(un, td);
1785 	KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1786 
1787 	ap->a_vp = vp;
1788 	error = VCALL(vp, VOFFSET(vop_pathconf), ap);
1789 	union_unlock_other(vp, td);
1790 
1791 	return (error);
1792 }
1793 
1794 static int
1795 union_advlock(ap)
1796 	struct vop_advlock_args /* {
1797 		struct vnode *a_vp;
1798 		caddr_t  a_id;
1799 		int  a_op;
1800 		struct flock *a_fl;
1801 		int  a_flags;
1802 	} */ *ap;
1803 {
1804 	register struct vnode *ovp = OTHERVP(ap->a_vp);
1805 
1806 	ap->a_vp = ovp;
1807 	return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1808 }
1809 
1810 
1811 /*
1812  * XXX - vop_strategy must be hand coded because it has no
1813  * YYY - and it is not coherent with anything
1814  *
1815  * vnode in its arguments.
1816  * This goes away with a merged VM/buffer cache.
1817  */
1818 static int
1819 union_strategy(ap)
1820 	struct vop_strategy_args /* {
1821 		struct vnode *a_vp;
1822 		struct buf *a_bp;
1823 	} */ *ap;
1824 {
1825 	struct buf *bp = ap->a_bp;
1826 	struct vnode *othervp = OTHERVP(ap->a_vp);
1827 
1828 #ifdef DIAGNOSTIC
1829 	if (othervp == NULLVP)
1830 		panic("union_strategy: nil vp");
1831 	if ((bp->b_iocmd == BIO_WRITE) &&
1832 	    (othervp == LOWERVP(ap->a_vp)))
1833 		panic("union_strategy: writing to lowervp");
1834 #endif
1835 	return (VOP_STRATEGY(othervp, bp));
1836 }
1837 
1838 static int
1839 union_getacl(ap)
1840 	struct vop_getacl_args /* {
1841 		struct vnode *a_vp;
1842 		acl_type_t a_type;
1843 		struct acl *a_aclp;
1844 		struct ucred *a_cred;
1845 		struct thread *a_td;
1846 	} */ *ap;
1847 {
1848 	int error;
1849 	struct union_node *un = VTOUNION(ap->a_vp);
1850 	struct vnode *vp;
1851 
1852 	vp = union_lock_other(un, ap->a_td);
1853 	ap->a_vp = vp;
1854 	error = VCALL(vp, VOFFSET(vop_getacl), ap);
1855 	union_unlock_other(vp, ap->a_td);
1856 
1857 	return (error);
1858 }
1859 
1860 static int
1861 union_setacl(ap)
1862 	struct vop_setacl_args /* {
1863 		struct vnode *a_vp;
1864 		acl_type_t a_type;
1865 		struct acl *a_aclp;
1866 		struct ucred *a_cred;
1867 		struct thread *a_td;
1868 	} */ *ap;
1869 {
1870 	int error;
1871 	struct union_node *un = VTOUNION(ap->a_vp);
1872 	struct vnode *vp;
1873 
1874 	vp = union_lock_other(un, ap->a_td);
1875 	ap->a_vp = vp;
1876 	error = VCALL(vp, VOFFSET(vop_setacl), ap);
1877 	union_unlock_other(vp, ap->a_td);
1878 
1879 	return (error);
1880 }
1881 
1882 static int
1883 union_aclcheck(ap)
1884 	struct vop_aclcheck_args /* {
1885 		struct vnode *a_vp;
1886 		acl_type_t a_type;
1887 		struct acl *a_aclp;
1888 		struct ucred *a_cred;
1889 		struct thread *a_td;
1890 	} */ *ap;
1891 {
1892 	struct vnode *ovp = OTHERVP(ap->a_vp);
1893 
1894 	ap->a_vp = ovp;
1895 	return (VCALL(ovp, VOFFSET(vop_aclcheck), ap));
1896 }
1897 
1898 static int
1899 union_closeextattr(ap)
1900 	struct vop_closeextattr_args /* {
1901 		struct vnode *a_vp;
1902 		int a_commit;
1903 		struct ucred *a_cred;
1904 		struct thread *a_td;
1905 	} */ *ap;
1906 {
1907 	int error;
1908 	struct union_node *un = VTOUNION(ap->a_vp);
1909 	struct vnode *vp;
1910 
1911 	vp = union_lock_other(un, ap->a_td);
1912 	ap->a_vp = vp;
1913 	error = VCALL(vp, VOFFSET(vop_closeextattr), ap);
1914 	union_unlock_other(vp, ap->a_td);
1915 
1916 	return (error);
1917 }
1918 
1919 static int
1920 union_getextattr(ap)
1921 	struct vop_getextattr_args /* {
1922 		struct vnode *a_vp;
1923 		int a_attrnamespace;
1924 		const char *a_name;
1925 		struct uio *a_uio;
1926 		size_t *a_size;
1927 		struct ucred *a_cred;
1928 		struct thread *a_td;
1929 	} */ *ap;
1930 {
1931 	int error;
1932 	struct union_node *un = VTOUNION(ap->a_vp);
1933 	struct vnode *vp;
1934 
1935 	vp = union_lock_other(un, ap->a_td);
1936 	ap->a_vp = vp;
1937 	error = VCALL(vp, VOFFSET(vop_getextattr), ap);
1938 	union_unlock_other(vp, ap->a_td);
1939 
1940 	return (error);
1941 }
1942 
1943 static int
1944 union_listextattr(ap)
1945 	struct vop_listextattr_args /* {
1946 		struct vnode *a_vp;
1947 		int a_attrnamespace;
1948 		struct uio *a_uio;
1949 		size_t *a_size;
1950 		struct ucred *a_cred;
1951 		struct thread *a_td;
1952 	} */ *ap;
1953 {
1954 	int error;
1955 	struct union_node *un = VTOUNION(ap->a_vp);
1956 	struct vnode *vp;
1957 
1958 	vp = union_lock_other(un, ap->a_td);
1959 	ap->a_vp = vp;
1960 	error = VCALL(vp, VOFFSET(vop_listextattr), ap);
1961 	union_unlock_other(vp, ap->a_td);
1962 
1963 	return (error);
1964 }
1965 
1966 static int
1967 union_openextattr(ap)
1968 	struct vop_openextattr_args /* {
1969 		struct vnode *a_vp;
1970 		struct ucred *a_cred;
1971 		struct thread *a_td;
1972 	} */ *ap;
1973 {
1974 	int error;
1975 	struct union_node *un = VTOUNION(ap->a_vp);
1976 	struct vnode *vp;
1977 
1978 	vp = union_lock_other(un, ap->a_td);
1979 	ap->a_vp = vp;
1980 	error = VCALL(vp, VOFFSET(vop_openextattr), ap);
1981 	union_unlock_other(vp, ap->a_td);
1982 
1983 	return (error);
1984 }
1985 
1986 static int
1987 union_deleteextattr(ap)
1988 	struct vop_deleteextattr_args /* {
1989 		struct vnode *a_vp;
1990 		int a_attrnamespace;
1991 		const char *a_name;
1992 		struct ucred *a_cred;
1993 		struct thread *a_td;
1994 	} */ *ap;
1995 {
1996 	int error;
1997 	struct union_node *un = VTOUNION(ap->a_vp);
1998 	struct vnode *vp;
1999 
2000 	vp = union_lock_other(un, ap->a_td);
2001 	ap->a_vp = vp;
2002 	error = VCALL(vp, VOFFSET(vop_deleteextattr), ap);
2003 	union_unlock_other(vp, ap->a_td);
2004 
2005 	return (error);
2006 }
2007 
2008 static int
2009 union_setextattr(ap)
2010 	struct vop_setextattr_args /* {
2011 		struct vnode *a_vp;
2012 		int a_attrnamespace;
2013 		const char *a_name;
2014 		struct uio *a_uio;
2015 		struct ucred *a_cred;
2016 		struct thread *a_td;
2017 	} */ *ap;
2018 {
2019 	int error;
2020 	struct union_node *un = VTOUNION(ap->a_vp);
2021 	struct vnode *vp;
2022 
2023 	vp = union_lock_other(un, ap->a_td);
2024 	ap->a_vp = vp;
2025 	error = VCALL(vp, VOFFSET(vop_setextattr), ap);
2026 	union_unlock_other(vp, ap->a_td);
2027 
2028 	return (error);
2029 }
2030 
2031 static int
2032 union_setlabel(ap)
2033 	struct vop_setlabel_args /* {
2034 		struct vnode *a_vp;
2035 		struct label *a_label;
2036 		struct ucred *a_cred;
2037 		struct thread *a_td;
2038 	} */ *ap;
2039 {
2040 	int error;
2041 	struct union_node *un = VTOUNION(ap->a_vp);
2042 	struct vnode *vp;
2043 
2044 	vp = union_lock_other(un, ap->a_td);
2045 	ap->a_vp = vp;
2046 	error = VCALL(vp, VOFFSET(vop_setlabel), ap);
2047 	union_unlock_other(vp, ap->a_td);
2048 
2049 	return (error);
2050 }
2051 
2052 /*
2053  * Global vfs data structures
2054  */
2055 struct vop_vector union_vnodeops = {
2056 	.vop_default =		&default_vnodeops,
2057 	.vop_access =		union_access,
2058 	.vop_aclcheck =		union_aclcheck,
2059 	.vop_advlock =		union_advlock,
2060 	.vop_bmap =		VOP_EOPNOTSUPP,
2061 	.vop_close =		union_close,
2062 	.vop_closeextattr =	union_closeextattr,
2063 	.vop_create =		union_create,
2064 	.vop_createvobject =	union_createvobject,
2065 	.vop_deleteextattr =	union_deleteextattr,
2066 	.vop_destroyvobject =	union_destroyvobject,
2067 	.vop_fsync =		union_fsync,
2068 	.vop_getattr =		union_getattr,
2069 	.vop_getacl =		union_getacl,
2070 	.vop_getextattr =		union_getextattr,
2071 	.vop_getvobject =		union_getvobject,
2072 	.vop_inactive =		union_inactive,
2073 	.vop_ioctl =		union_ioctl,
2074 	.vop_lease =		union_lease,
2075 	.vop_link =		union_link,
2076 	.vop_listextattr =	union_listextattr,
2077 	.vop_lookup =		union_lookup,
2078 	.vop_mkdir =		union_mkdir,
2079 	.vop_mknod =		union_mknod,
2080 	.vop_open =		union_open,
2081 	.vop_openextattr =	union_openextattr,
2082 	.vop_pathconf =		union_pathconf,
2083 	.vop_poll =		union_poll,
2084 	.vop_print =		union_print,
2085 	.vop_read =		union_read,
2086 	.vop_readdir =		union_readdir,
2087 	.vop_readlink =		union_readlink,
2088 	.vop_getwritemount =	union_getwritemount,
2089 	.vop_reclaim =		union_reclaim,
2090 	.vop_remove =		union_remove,
2091 	.vop_rename =		union_rename,
2092 	.vop_rmdir =		union_rmdir,
2093 	.vop_setacl =		union_setacl,
2094 	.vop_setattr =		union_setattr,
2095 	.vop_setextattr =		union_setextattr,
2096 	.vop_setlabel =		union_setlabel,
2097 	.vop_strategy =		union_strategy,
2098 	.vop_symlink =		union_symlink,
2099 	.vop_whiteout =		union_whiteout,
2100 	.vop_write =		union_write,
2101 };
2102