xref: /freebsd/sys/fs/unionfs/union_subr.c (revision 7259ca31048e5ced8e7f90657a3d7084aeafdf51)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1994 Jan-Simon Pendry
5  * Copyright (c) 1994
6  *	The Regents of the University of California.  All rights reserved.
7  * Copyright (c) 2005, 2006, 2012 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc.
8  * Copyright (c) 2006, 2012 Daichi Goto <daichi@freebsd.org>
9  *
10  * This code is derived from software contributed to Berkeley by
11  * Jan-Simon Pendry.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)union_subr.c	8.20 (Berkeley) 5/20/95
38  * $FreeBSD$
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/proc.h>
51 #include <sys/vnode.h>
52 #include <sys/dirent.h>
53 #include <sys/fcntl.h>
54 #include <sys/filedesc.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/taskqueue.h>
58 #include <sys/resourcevar.h>
59 
60 #include <security/mac/mac_framework.h>
61 
62 #include <vm/uma.h>
63 
64 #include <fs/unionfs/union.h>
65 
66 #define NUNIONFSNODECACHE 16
67 
68 static MALLOC_DEFINE(M_UNIONFSHASH, "UNIONFS hash", "UNIONFS hash table");
69 MALLOC_DEFINE(M_UNIONFSNODE, "UNIONFS node", "UNIONFS vnode private part");
70 MALLOC_DEFINE(M_UNIONFSPATH, "UNIONFS path", "UNIONFS path private part");
71 
72 static struct task unionfs_deferred_rele_task;
73 static struct mtx unionfs_deferred_rele_lock;
74 static STAILQ_HEAD(, unionfs_node) unionfs_deferred_rele_list =
75     STAILQ_HEAD_INITIALIZER(unionfs_deferred_rele_list);
76 static TASKQUEUE_DEFINE_THREAD(unionfs_rele);
77 
78 unsigned int unionfs_ndeferred = 0;
79 SYSCTL_UINT(_vfs, OID_AUTO, unionfs_ndeferred, CTLFLAG_RD,
80     &unionfs_ndeferred, 0, "unionfs deferred vnode release");
81 
82 static void unionfs_deferred_rele(void *, int);
83 
84 /*
85  * Initialize
86  */
87 int
88 unionfs_init(struct vfsconf *vfsp)
89 {
90 	UNIONFSDEBUG("unionfs_init\n");	/* printed during system boot */
91 	TASK_INIT(&unionfs_deferred_rele_task, 0, unionfs_deferred_rele, NULL);
92 	mtx_init(&unionfs_deferred_rele_lock, "uniondefr", NULL, MTX_DEF);
93 	return (0);
94 }
95 
96 /*
97  * Uninitialize
98  */
99 int
100 unionfs_uninit(struct vfsconf *vfsp)
101 {
102 	taskqueue_quiesce(taskqueue_unionfs_rele);
103 	taskqueue_free(taskqueue_unionfs_rele);
104 	mtx_destroy(&unionfs_deferred_rele_lock);
105 	return (0);
106 }
107 
108 static void
109 unionfs_deferred_rele(void *arg __unused, int pending __unused)
110 {
111 	STAILQ_HEAD(, unionfs_node) local_rele_list;
112 	struct unionfs_node *unp, *tunp;
113 	unsigned int ndeferred;
114 
115 	ndeferred = 0;
116 	STAILQ_INIT(&local_rele_list);
117 	mtx_lock(&unionfs_deferred_rele_lock);
118 	STAILQ_CONCAT(&local_rele_list, &unionfs_deferred_rele_list);
119 	mtx_unlock(&unionfs_deferred_rele_lock);
120 	STAILQ_FOREACH_SAFE(unp, &local_rele_list, un_rele, tunp) {
121 		++ndeferred;
122 		MPASS(unp->un_dvp != NULL);
123 		vrele(unp->un_dvp);
124 		free(unp, M_UNIONFSNODE);
125 	}
126 
127 	/* We expect this function to be single-threaded, thus no atomic */
128 	unionfs_ndeferred += ndeferred;
129 }
130 
131 static struct unionfs_node_hashhead *
132 unionfs_get_hashhead(struct vnode *dvp, char *path)
133 {
134 	struct unionfs_node *unp;
135 	int		count;
136 	char		hash;
137 
138 	hash = 0;
139 	unp = VTOUNIONFS(dvp);
140 	if (path != NULL) {
141 		for (count = 0; path[count]; count++)
142 			hash += path[count];
143 	}
144 
145 	return (&(unp->un_hashtbl[hash & (unp->un_hashmask)]));
146 }
147 
148 /*
149  * Get the cached vnode.
150  */
151 static struct vnode *
152 unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp,
153     struct vnode *dvp, char *path)
154 {
155 	struct unionfs_node_hashhead *hd;
156 	struct unionfs_node *unp;
157 	struct vnode *vp;
158 
159 	KASSERT((uvp == NULLVP || uvp->v_type == VDIR),
160 	    ("unionfs_get_cached_vnode: v_type != VDIR"));
161 	KASSERT((lvp == NULLVP || lvp->v_type == VDIR),
162 	    ("unionfs_get_cached_vnode: v_type != VDIR"));
163 
164 	VI_LOCK(dvp);
165 	hd = unionfs_get_hashhead(dvp, path);
166 	LIST_FOREACH(unp, hd, un_hash) {
167 		if (!strcmp(unp->un_path, path)) {
168 			vp = UNIONFSTOV(unp);
169 			VI_LOCK_FLAGS(vp, MTX_DUPOK);
170 			VI_UNLOCK(dvp);
171 			vp->v_iflag &= ~VI_OWEINACT;
172 			if (VN_IS_DOOMED(vp) ||
173 			    ((vp->v_iflag & VI_DOINGINACT) != 0)) {
174 				VI_UNLOCK(vp);
175 				vp = NULLVP;
176 			} else
177 				VI_UNLOCK(vp);
178 			return (vp);
179 		}
180 	}
181 	VI_UNLOCK(dvp);
182 
183 	return (NULLVP);
184 }
185 
186 /*
187  * Add the new vnode into cache.
188  */
189 static struct vnode *
190 unionfs_ins_cached_vnode(struct unionfs_node *uncp,
191     struct vnode *dvp, char *path)
192 {
193 	struct unionfs_node_hashhead *hd;
194 	struct unionfs_node *unp;
195 	struct vnode *vp;
196 
197 	KASSERT((uncp->un_uppervp==NULLVP || uncp->un_uppervp->v_type==VDIR),
198 	    ("unionfs_ins_cached_vnode: v_type != VDIR"));
199 	KASSERT((uncp->un_lowervp==NULLVP || uncp->un_lowervp->v_type==VDIR),
200 	    ("unionfs_ins_cached_vnode: v_type != VDIR"));
201 
202 	VI_LOCK(dvp);
203 	hd = unionfs_get_hashhead(dvp, path);
204 	LIST_FOREACH(unp, hd, un_hash) {
205 		if (!strcmp(unp->un_path, path)) {
206 			vp = UNIONFSTOV(unp);
207 			VI_LOCK_FLAGS(vp, MTX_DUPOK);
208 			vp->v_iflag &= ~VI_OWEINACT;
209 			if (VN_IS_DOOMED(vp) ||
210 			    ((vp->v_iflag & VI_DOINGINACT) != 0)) {
211 				LIST_INSERT_HEAD(hd, uncp, un_hash);
212 				VI_UNLOCK(vp);
213 				vp = NULLVP;
214 			} else
215 				VI_UNLOCK(vp);
216 			VI_UNLOCK(dvp);
217 			return (vp);
218 		}
219 	}
220 
221 	LIST_INSERT_HEAD(hd, uncp, un_hash);
222 	VI_UNLOCK(dvp);
223 
224 	return (NULLVP);
225 }
226 
227 /*
228  * Remove the vnode.
229  */
230 static void
231 unionfs_rem_cached_vnode(struct unionfs_node *unp, struct vnode *dvp)
232 {
233 	KASSERT((unp != NULL), ("unionfs_rem_cached_vnode: null node"));
234 	KASSERT((dvp != NULLVP),
235 	    ("unionfs_rem_cached_vnode: null parent vnode"));
236 	KASSERT((unp->un_hash.le_prev != NULL),
237 	    ("unionfs_rem_cached_vnode: null hash"));
238 
239 	VI_LOCK(dvp);
240 	LIST_REMOVE(unp, un_hash);
241 	unp->un_hash.le_next = NULL;
242 	unp->un_hash.le_prev = NULL;
243 	VI_UNLOCK(dvp);
244 }
245 
246 /*
247  * Common cleanup handling for unionfs_nodeget
248  * Upper, lower, and parent directory vnodes are expected to be referenced by
249  * the caller.  Upper and lower vnodes, if non-NULL, are also expected to be
250  * exclusively locked by the caller.
251  * This function will return with the caller's locks and references undone.
252  */
253 static void
254 unionfs_nodeget_cleanup(struct vnode *vp, void *arg)
255 {
256 	struct unionfs_node *unp;
257 
258 	/*
259 	 * Lock and reset the default vnode lock; vgone() expects a locked
260 	 * vnode, and we're going to reset the vnode ops.
261 	 */
262 	lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
263 
264 	/*
265 	 * Clear out private data and reset the vnode ops to avoid use of
266 	 * unionfs vnode ops on a partially constructed vnode.
267 	 */
268 	VI_LOCK(vp);
269 	vp->v_data = NULL;
270 	vp->v_vnlock = &vp->v_lock;
271 	vp->v_op = &dead_vnodeops;
272 	VI_UNLOCK(vp);
273 	vgone(vp);
274 	vput(vp);
275 
276 	unp = arg;
277 	if (unp->un_dvp != NULLVP)
278 		vrele(unp->un_dvp);
279 	if (unp->un_uppervp != NULLVP)
280 		vput(unp->un_uppervp);
281 	if (unp->un_lowervp != NULLVP)
282 		vput(unp->un_lowervp);
283 	if (unp->un_hashtbl != NULL)
284 		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
285 	free(unp->un_path, M_UNIONFSPATH);
286 	free(unp, M_UNIONFSNODE);
287 }
288 
289 /*
290  * Make a new or get existing unionfs node.
291  *
292  * uppervp and lowervp should be unlocked. Because if new unionfs vnode is
293  * locked, uppervp or lowervp is locked too. In order to prevent dead lock,
294  * you should not lock plurality simultaneously.
295  */
296 int
297 unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
298     struct vnode *lowervp, struct vnode *dvp, struct vnode **vpp,
299     struct componentname *cnp, struct thread *td)
300 {
301 	char	       *path;
302 	struct unionfs_mount *ump;
303 	struct unionfs_node *unp;
304 	struct vnode   *vp;
305 	int		error;
306 	int		lkflags;
307 	enum vtype	vt;
308 
309 	error = 0;
310 	ump = MOUNTTOUNIONFSMOUNT(mp);
311 	lkflags = (cnp ? cnp->cn_lkflags : 0);
312 	path = (cnp ? cnp->cn_nameptr : NULL);
313 	*vpp = NULLVP;
314 
315 	if (uppervp == NULLVP && lowervp == NULLVP)
316 		panic("unionfs_nodeget: upper and lower is null");
317 
318 	vt = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type);
319 
320 	/* If it has no ISLASTCN flag, path check is skipped. */
321 	if (cnp && !(cnp->cn_flags & ISLASTCN))
322 		path = NULL;
323 
324 	/* check the cache */
325 	if (path != NULL && dvp != NULLVP && vt == VDIR) {
326 		vp = unionfs_get_cached_vnode(uppervp, lowervp, dvp, path);
327 		if (vp != NULLVP) {
328 			vref(vp);
329 			*vpp = vp;
330 			goto unionfs_nodeget_out;
331 		}
332 	}
333 
334 	if ((uppervp == NULLVP || ump->um_uppervp != uppervp) ||
335 	    (lowervp == NULLVP || ump->um_lowervp != lowervp)) {
336 		/* dvp will be NULLVP only in case of root vnode. */
337 		if (dvp == NULLVP)
338 			return (EINVAL);
339 	}
340 	unp = malloc(sizeof(struct unionfs_node),
341 	    M_UNIONFSNODE, M_WAITOK | M_ZERO);
342 
343 	error = getnewvnode("unionfs", mp, &unionfs_vnodeops, &vp);
344 	if (error != 0) {
345 		free(unp, M_UNIONFSNODE);
346 		return (error);
347 	}
348 	if (dvp != NULLVP)
349 		vref(dvp);
350 	if (uppervp != NULLVP)
351 		vref(uppervp);
352 	if (lowervp != NULLVP)
353 		vref(lowervp);
354 
355 	if (vt == VDIR)
356 		unp->un_hashtbl = hashinit(NUNIONFSNODECACHE, M_UNIONFSHASH,
357 		    &(unp->un_hashmask));
358 
359 	unp->un_vnode = vp;
360 	unp->un_uppervp = uppervp;
361 	unp->un_lowervp = lowervp;
362 	unp->un_dvp = dvp;
363 	if (uppervp != NULLVP)
364 		vp->v_vnlock = uppervp->v_vnlock;
365 	else
366 		vp->v_vnlock = lowervp->v_vnlock;
367 
368 	if (path != NULL) {
369 		unp->un_path = malloc(cnp->cn_namelen + 1,
370 		    M_UNIONFSPATH, M_WAITOK | M_ZERO);
371 		bcopy(cnp->cn_nameptr, unp->un_path, cnp->cn_namelen);
372 		unp->un_path[cnp->cn_namelen] = '\0';
373 		unp->un_pathlen = cnp->cn_namelen;
374 	}
375 	vp->v_type = vt;
376 	vp->v_data = unp;
377 
378 	if ((uppervp != NULLVP && ump->um_uppervp == uppervp) &&
379 	    (lowervp != NULLVP && ump->um_lowervp == lowervp))
380 		vp->v_vflag |= VV_ROOT;
381 
382 	vn_lock_pair(lowervp, false, uppervp, false);
383 	error = insmntque1(vp, mp, unionfs_nodeget_cleanup, unp);
384 	if (error != 0)
385 		return (error);
386 	if (lowervp != NULL && VN_IS_DOOMED(lowervp)) {
387 		vput(lowervp);
388 		unp->un_lowervp = NULL;
389 	}
390 	if (uppervp != NULL && VN_IS_DOOMED(uppervp)) {
391 		vput(uppervp);
392 		unp->un_uppervp = NULL;
393 	}
394 	if (unp->un_lowervp == NULL && unp->un_uppervp == NULL) {
395 		unionfs_nodeget_cleanup(vp, unp);
396 		return (ENOENT);
397 	}
398 	if (path != NULL && dvp != NULLVP && vt == VDIR)
399 		*vpp = unionfs_ins_cached_vnode(unp, dvp, path);
400 	if (*vpp != NULLVP) {
401 		unionfs_nodeget_cleanup(vp, unp);
402 		vp = *vpp;
403 		vref(vp);
404 	} else {
405 		if (uppervp != NULL)
406 			VOP_UNLOCK(uppervp);
407 		if (lowervp != NULL)
408 			VOP_UNLOCK(lowervp);
409 		*vpp = vp;
410 	}
411 
412 unionfs_nodeget_out:
413 	if (lkflags & LK_TYPE_MASK)
414 		vn_lock(vp, lkflags | LK_RETRY);
415 
416 	return (0);
417 }
418 
419 /*
420  * Clean up the unionfs node.
421  */
422 void
423 unionfs_noderem(struct vnode *vp, struct thread *td)
424 {
425 	struct unionfs_node *unp, *unp_t1, *unp_t2;
426 	struct unionfs_node_hashhead *hd;
427 	struct unionfs_node_status *unsp, *unsp_tmp;
428 	struct vnode   *lvp;
429 	struct vnode   *uvp;
430 	struct vnode   *dvp;
431 	int		count;
432 
433 	/*
434 	 * Use the interlock to protect the clearing of v_data to
435 	 * prevent faults in unionfs_lock().
436 	 */
437 	VI_LOCK(vp);
438 	unp = VTOUNIONFS(vp);
439 	lvp = unp->un_lowervp;
440 	uvp = unp->un_uppervp;
441 	dvp = unp->un_dvp;
442 	unp->un_lowervp = unp->un_uppervp = NULLVP;
443 	vp->v_vnlock = &(vp->v_lock);
444 	vp->v_data = NULL;
445 	vp->v_object = NULL;
446 	if (vp->v_writecount > 0) {
447 		if (uvp != NULL)
448 			VOP_ADD_WRITECOUNT(uvp, -vp->v_writecount);
449 		else if (lvp != NULL)
450 			VOP_ADD_WRITECOUNT(lvp, -vp->v_writecount);
451 	} else if (vp->v_writecount < 0)
452 		vp->v_writecount = 0;
453 	VI_UNLOCK(vp);
454 
455 	if (lvp != NULLVP)
456 		VOP_UNLOCK(lvp);
457 	if (uvp != NULLVP)
458 		VOP_UNLOCK(uvp);
459 
460 	if (dvp != NULLVP && unp->un_hash.le_prev != NULL)
461 		unionfs_rem_cached_vnode(unp, dvp);
462 
463 	if (lockmgr(vp->v_vnlock, LK_EXCLUSIVE, VI_MTX(vp)) != 0)
464 		panic("the lock for deletion is unacquirable.");
465 
466 	if (lvp != NULLVP)
467 		vrele(lvp);
468 	if (uvp != NULLVP)
469 		vrele(uvp);
470 	if (unp->un_path != NULL) {
471 		free(unp->un_path, M_UNIONFSPATH);
472 		unp->un_path = NULL;
473 		unp->un_pathlen = 0;
474 	}
475 
476 	if (unp->un_hashtbl != NULL) {
477 		for (count = 0; count <= unp->un_hashmask; count++) {
478 			hd = unp->un_hashtbl + count;
479 			LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
480 				LIST_REMOVE(unp_t1, un_hash);
481 				unp_t1->un_hash.le_next = NULL;
482 				unp_t1->un_hash.le_prev = NULL;
483 			}
484 		}
485 		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
486 	}
487 
488 	LIST_FOREACH_SAFE(unsp, &(unp->un_unshead), uns_list, unsp_tmp) {
489 		LIST_REMOVE(unsp, uns_list);
490 		free(unsp, M_TEMP);
491 	}
492 	if (dvp != NULLVP) {
493 		mtx_lock(&unionfs_deferred_rele_lock);
494 		STAILQ_INSERT_TAIL(&unionfs_deferred_rele_list, unp, un_rele);
495 		mtx_unlock(&unionfs_deferred_rele_lock);
496 		taskqueue_enqueue(taskqueue_unionfs_rele,
497 		    &unionfs_deferred_rele_task);
498 	} else
499 		free(unp, M_UNIONFSNODE);
500 }
501 
502 /*
503  * Get the unionfs node status.
504  * You need exclusive lock this vnode.
505  */
506 void
507 unionfs_get_node_status(struct unionfs_node *unp, struct thread *td,
508     struct unionfs_node_status **unspp)
509 {
510 	struct unionfs_node_status *unsp;
511 	pid_t pid;
512 
513 	pid = td->td_proc->p_pid;
514 
515 	KASSERT(NULL != unspp, ("null pointer"));
516 	ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), "unionfs_get_node_status");
517 
518 	LIST_FOREACH(unsp, &(unp->un_unshead), uns_list) {
519 		if (unsp->uns_pid == pid) {
520 			*unspp = unsp;
521 			return;
522 		}
523 	}
524 
525 	/* create a new unionfs node status */
526 	unsp = malloc(sizeof(struct unionfs_node_status),
527 	    M_TEMP, M_WAITOK | M_ZERO);
528 
529 	unsp->uns_pid = pid;
530 	LIST_INSERT_HEAD(&(unp->un_unshead), unsp, uns_list);
531 
532 	*unspp = unsp;
533 }
534 
535 /*
536  * Remove the unionfs node status, if you can.
537  * You need exclusive lock this vnode.
538  */
539 void
540 unionfs_tryrem_node_status(struct unionfs_node *unp,
541     struct unionfs_node_status *unsp)
542 {
543 	KASSERT(NULL != unsp, ("null pointer"));
544 	ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), "unionfs_get_node_status");
545 
546 	if (0 < unsp->uns_lower_opencnt || 0 < unsp->uns_upper_opencnt)
547 		return;
548 
549 	LIST_REMOVE(unsp, uns_list);
550 	free(unsp, M_TEMP);
551 }
552 
553 /*
554  * Create upper node attr.
555  */
556 void
557 unionfs_create_uppervattr_core(struct unionfs_mount *ump, struct vattr *lva,
558     struct vattr *uva, struct thread *td)
559 {
560 	VATTR_NULL(uva);
561 	uva->va_type = lva->va_type;
562 	uva->va_atime = lva->va_atime;
563 	uva->va_mtime = lva->va_mtime;
564 	uva->va_ctime = lva->va_ctime;
565 
566 	switch (ump->um_copymode) {
567 	case UNIONFS_TRANSPARENT:
568 		uva->va_mode = lva->va_mode;
569 		uva->va_uid = lva->va_uid;
570 		uva->va_gid = lva->va_gid;
571 		break;
572 	case UNIONFS_MASQUERADE:
573 		if (ump->um_uid == lva->va_uid) {
574 			uva->va_mode = lva->va_mode & 077077;
575 			uva->va_mode |= (lva->va_type == VDIR ?
576 			    ump->um_udir : ump->um_ufile) & 0700;
577 			uva->va_uid = lva->va_uid;
578 			uva->va_gid = lva->va_gid;
579 		} else {
580 			uva->va_mode = (lva->va_type == VDIR ?
581 			    ump->um_udir : ump->um_ufile);
582 			uva->va_uid = ump->um_uid;
583 			uva->va_gid = ump->um_gid;
584 		}
585 		break;
586 	default:		/* UNIONFS_TRADITIONAL */
587 		uva->va_mode = 0777 & ~td->td_proc->p_pd->pd_cmask;
588 		uva->va_uid = ump->um_uid;
589 		uva->va_gid = ump->um_gid;
590 		break;
591 	}
592 }
593 
594 /*
595  * Create upper node attr.
596  */
597 int
598 unionfs_create_uppervattr(struct unionfs_mount *ump, struct vnode *lvp,
599     struct vattr *uva, struct ucred *cred, struct thread *td)
600 {
601 	struct vattr	lva;
602 	int		error;
603 
604 	if ((error = VOP_GETATTR(lvp, &lva, cred)))
605 		return (error);
606 
607 	unionfs_create_uppervattr_core(ump, &lva, uva, td);
608 
609 	return (error);
610 }
611 
612 /*
613  * relookup
614  *
615  * dvp should be locked on entry and will be locked on return.
616  *
617  * If an error is returned, *vpp will be invalid, otherwise it will hold a
618  * locked, referenced vnode. If *vpp == dvp then remember that only one
619  * LK_EXCLUSIVE lock is held.
620  */
621 int
622 unionfs_relookup(struct vnode *dvp, struct vnode **vpp,
623     struct componentname *cnp, struct componentname *cn, struct thread *td,
624     char *path, int pathlen, u_long nameiop)
625 {
626 	int error;
627 
628 	cn->cn_namelen = pathlen;
629 	cn->cn_pnbuf = path;
630 	cn->cn_nameiop = nameiop;
631 	cn->cn_flags = (LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME | ISLASTCN);
632 	cn->cn_lkflags = LK_EXCLUSIVE;
633 	cn->cn_cred = cnp->cn_cred;
634 	cn->cn_nameptr = cn->cn_pnbuf;
635 
636 	if (nameiop == DELETE)
637 		cn->cn_flags |= (cnp->cn_flags & (DOWHITEOUT | SAVESTART));
638 	else if (RENAME == nameiop)
639 		cn->cn_flags |= (cnp->cn_flags & SAVESTART);
640 	else if (nameiop == CREATE)
641 		cn->cn_flags |= NOCACHE;
642 
643 	vref(dvp);
644 	VOP_UNLOCK(dvp);
645 
646 	if ((error = relookup(dvp, vpp, cn))) {
647 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
648 	} else
649 		vrele(dvp);
650 
651 	KASSERT((cn->cn_flags & HASBUF) != 0,
652 	    ("%s: HASBUF cleared", __func__));
653 	KASSERT((cn->cn_flags & SAVENAME) != 0,
654 	    ("%s: SAVENAME cleared", __func__));
655 	KASSERT(cn->cn_pnbuf == path, ("%s: cn_pnbuf changed", __func__));
656 
657 	return (error);
658 }
659 
660 /*
661  * relookup for CREATE namei operation.
662  *
663  * dvp is unionfs vnode. dvp should be locked.
664  *
665  * If it called 'unionfs_copyfile' function by unionfs_link etc,
666  * VOP_LOOKUP information is broken.
667  * So it need relookup in order to create link etc.
668  */
669 int
670 unionfs_relookup_for_create(struct vnode *dvp, struct componentname *cnp,
671     struct thread *td)
672 {
673 	struct vnode *udvp;
674 	struct vnode *vp;
675 	struct componentname cn;
676 	int error;
677 
678 	udvp = UNIONFSVPTOUPPERVP(dvp);
679 	vp = NULLVP;
680 
681 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
682 	    cnp->cn_namelen, CREATE);
683 	if (error)
684 		return (error);
685 
686 	if (vp != NULLVP) {
687 		if (udvp == vp)
688 			vrele(vp);
689 		else
690 			vput(vp);
691 
692 		error = EEXIST;
693 	}
694 
695 	return (error);
696 }
697 
698 /*
699  * relookup for DELETE namei operation.
700  *
701  * dvp is unionfs vnode. dvp should be locked.
702  */
703 int
704 unionfs_relookup_for_delete(struct vnode *dvp, struct componentname *cnp,
705     struct thread *td)
706 {
707 	struct vnode *udvp;
708 	struct vnode *vp;
709 	struct componentname cn;
710 	int error;
711 
712 	udvp = UNIONFSVPTOUPPERVP(dvp);
713 	vp = NULLVP;
714 
715 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
716 	    cnp->cn_namelen, DELETE);
717 	if (error)
718 		return (error);
719 
720 	if (vp == NULLVP)
721 		error = ENOENT;
722 	else {
723 		if (udvp == vp)
724 			vrele(vp);
725 		else
726 			vput(vp);
727 	}
728 
729 	return (error);
730 }
731 
732 /*
733  * relookup for RENAME namei operation.
734  *
735  * dvp is unionfs vnode. dvp should be locked.
736  */
737 int
738 unionfs_relookup_for_rename(struct vnode *dvp, struct componentname *cnp,
739     struct thread *td)
740 {
741 	struct vnode *udvp;
742 	struct vnode *vp;
743 	struct componentname cn;
744 	int error;
745 
746 	udvp = UNIONFSVPTOUPPERVP(dvp);
747 	vp = NULLVP;
748 
749 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
750 	    cnp->cn_namelen, RENAME);
751 	if (error)
752 		return (error);
753 
754 	if (vp != NULLVP) {
755 		if (udvp == vp)
756 			vrele(vp);
757 		else
758 			vput(vp);
759 	}
760 
761 	return (error);
762 }
763 
764 /*
765  * Update the unionfs_node.
766  *
767  * uvp is new locked upper vnode. unionfs vnode's lock will be exchanged to the
768  * uvp's lock and lower's lock will be unlocked.
769  */
770 static void
771 unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
772     struct thread *td)
773 {
774 	struct vnode   *vp;
775 	struct vnode   *lvp;
776 	struct vnode   *dvp;
777 	unsigned	count, lockrec;
778 
779 	vp = UNIONFSTOV(unp);
780 	lvp = unp->un_lowervp;
781 	ASSERT_VOP_ELOCKED(lvp, "unionfs_node_update");
782 	dvp = unp->un_dvp;
783 
784 	/*
785 	 * lock update
786 	 */
787 	VI_LOCK(vp);
788 	unp->un_uppervp = uvp;
789 	vp->v_vnlock = uvp->v_vnlock;
790 	VI_UNLOCK(vp);
791 	lockrec = lvp->v_vnlock->lk_recurse;
792 	for (count = 0; count < lockrec; count++)
793 		vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
794 
795 	/*
796 	 * cache update
797 	 */
798 	if (unp->un_path != NULL && dvp != NULLVP && vp->v_type == VDIR) {
799 		static struct unionfs_node_hashhead *hd;
800 
801 		VI_LOCK(dvp);
802 		hd = unionfs_get_hashhead(dvp, unp->un_path);
803 		LIST_REMOVE(unp, un_hash);
804 		LIST_INSERT_HEAD(hd, unp, un_hash);
805 		VI_UNLOCK(dvp);
806 	}
807 }
808 
809 /*
810  * Create a new shadow dir.
811  *
812  * udvp should be locked on entry and will be locked on return.
813  *
814  * If no error returned, unp will be updated.
815  */
816 int
817 unionfs_mkshadowdir(struct unionfs_mount *ump, struct vnode *udvp,
818     struct unionfs_node *unp, struct componentname *cnp, struct thread *td)
819 {
820 	struct vnode   *lvp;
821 	struct vnode   *uvp;
822 	struct vattr	va;
823 	struct vattr	lva;
824 	struct nameidata nd;
825 	struct mount   *mp;
826 	struct ucred   *cred;
827 	struct ucred   *credbk;
828 	struct uidinfo *rootinfo;
829 	int		error;
830 
831 	if (unp->un_uppervp != NULLVP)
832 		return (EEXIST);
833 
834 	lvp = unp->un_lowervp;
835 	uvp = NULLVP;
836 	credbk = cnp->cn_cred;
837 
838 	/* Authority change to root */
839 	rootinfo = uifind((uid_t)0);
840 	cred = crdup(cnp->cn_cred);
841 	/*
842 	 * The calls to chgproccnt() are needed to compensate for change_ruid()
843 	 * calling chgproccnt().
844 	 */
845 	chgproccnt(cred->cr_ruidinfo, 1, 0);
846 	change_euid(cred, rootinfo);
847 	change_ruid(cred, rootinfo);
848 	change_svuid(cred, (uid_t)0);
849 	uifree(rootinfo);
850 	cnp->cn_cred = cred;
851 
852 	memset(&nd.ni_cnd, 0, sizeof(struct componentname));
853 	NDPREINIT(&nd);
854 
855 	if ((error = VOP_GETATTR(lvp, &lva, cnp->cn_cred)))
856 		goto unionfs_mkshadowdir_abort;
857 
858 	if ((error = unionfs_relookup(udvp, &uvp, cnp, &nd.ni_cnd, td,
859 	    cnp->cn_nameptr, cnp->cn_namelen, CREATE)))
860 		goto unionfs_mkshadowdir_abort;
861 	if (uvp != NULLVP) {
862 		if (udvp == uvp)
863 			vrele(uvp);
864 		else
865 			vput(uvp);
866 
867 		error = EEXIST;
868 		goto unionfs_mkshadowdir_abort;
869 	}
870 
871 	if ((error = vn_start_write(udvp, &mp, V_WAIT | PCATCH)))
872 		goto unionfs_mkshadowdir_abort;
873 	unionfs_create_uppervattr_core(ump, &lva, &va, td);
874 
875 	error = VOP_MKDIR(udvp, &uvp, &nd.ni_cnd, &va);
876 
877 	if (!error) {
878 		unionfs_node_update(unp, uvp, td);
879 
880 		/*
881 		 * XXX The bug which cannot set uid/gid was corrected.
882 		 * Ignore errors.
883 		 */
884 		va.va_type = VNON;
885 		VOP_SETATTR(uvp, &va, nd.ni_cnd.cn_cred);
886 	}
887 	vn_finished_write(mp);
888 
889 unionfs_mkshadowdir_abort:
890 	cnp->cn_cred = credbk;
891 	chgproccnt(cred->cr_ruidinfo, -1, 0);
892 	crfree(cred);
893 
894 	return (error);
895 }
896 
897 /*
898  * Create a new whiteout.
899  *
900  * dvp should be locked on entry and will be locked on return.
901  */
902 int
903 unionfs_mkwhiteout(struct vnode *dvp, struct componentname *cnp,
904     struct thread *td, char *path, int pathlen)
905 {
906 	struct vnode   *wvp;
907 	struct nameidata nd;
908 	struct mount   *mp;
909 	int		error;
910 
911 	wvp = NULLVP;
912 	NDPREINIT(&nd);
913 	if ((error = unionfs_relookup(dvp, &wvp, cnp, &nd.ni_cnd, td, path,
914 	    pathlen, CREATE))) {
915 		return (error);
916 	}
917 	if (wvp != NULLVP) {
918 		if (dvp == wvp)
919 			vrele(wvp);
920 		else
921 			vput(wvp);
922 
923 		return (EEXIST);
924 	}
925 
926 	if ((error = vn_start_write(dvp, &mp, V_WAIT | PCATCH)))
927 		goto unionfs_mkwhiteout_free_out;
928 	error = VOP_WHITEOUT(dvp, &nd.ni_cnd, CREATE);
929 
930 	vn_finished_write(mp);
931 
932 unionfs_mkwhiteout_free_out:
933 	return (error);
934 }
935 
936 /*
937  * Create a new vnode for create a new shadow file.
938  *
939  * If an error is returned, *vpp will be invalid, otherwise it will hold a
940  * locked, referenced and opened vnode.
941  *
942  * unp is never updated.
943  */
944 static int
945 unionfs_vn_create_on_upper(struct vnode **vpp, struct vnode *udvp,
946     struct unionfs_node *unp, struct vattr *uvap, struct thread *td)
947 {
948 	struct unionfs_mount *ump;
949 	struct vnode   *vp;
950 	struct vnode   *lvp;
951 	struct ucred   *cred;
952 	struct vattr	lva;
953 	struct nameidata nd;
954 	int		fmode;
955 	int		error;
956 
957 	ump = MOUNTTOUNIONFSMOUNT(UNIONFSTOV(unp)->v_mount);
958 	vp = NULLVP;
959 	lvp = unp->un_lowervp;
960 	cred = td->td_ucred;
961 	fmode = FFLAGS(O_WRONLY | O_CREAT | O_TRUNC | O_EXCL);
962 	error = 0;
963 
964 	if ((error = VOP_GETATTR(lvp, &lva, cred)) != 0)
965 		return (error);
966 	unionfs_create_uppervattr_core(ump, &lva, uvap, td);
967 
968 	if (unp->un_path == NULL)
969 		panic("unionfs: un_path is null");
970 
971 	nd.ni_cnd.cn_namelen = unp->un_pathlen;
972 	nd.ni_cnd.cn_pnbuf = unp->un_path;
973 	nd.ni_cnd.cn_nameiop = CREATE;
974 	nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME |
975 	    ISLASTCN;
976 	nd.ni_cnd.cn_lkflags = LK_EXCLUSIVE;
977 	nd.ni_cnd.cn_cred = cred;
978 	nd.ni_cnd.cn_nameptr = nd.ni_cnd.cn_pnbuf;
979 	NDPREINIT(&nd);
980 
981 	vref(udvp);
982 	if ((error = relookup(udvp, &vp, &nd.ni_cnd)) != 0)
983 		goto unionfs_vn_create_on_upper_free_out2;
984 	vrele(udvp);
985 
986 	if (vp != NULLVP) {
987 		if (vp == udvp)
988 			vrele(vp);
989 		else
990 			vput(vp);
991 		error = EEXIST;
992 		goto unionfs_vn_create_on_upper_free_out1;
993 	}
994 
995 	if ((error = VOP_CREATE(udvp, &vp, &nd.ni_cnd, uvap)) != 0)
996 		goto unionfs_vn_create_on_upper_free_out1;
997 
998 	if ((error = VOP_OPEN(vp, fmode, cred, td, NULL)) != 0) {
999 		vput(vp);
1000 		goto unionfs_vn_create_on_upper_free_out1;
1001 	}
1002 	error = VOP_ADD_WRITECOUNT(vp, 1);
1003 	CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
1004 	    __func__, vp, vp->v_writecount);
1005 	if (error == 0) {
1006 		*vpp = vp;
1007 	} else {
1008 		VOP_CLOSE(vp, fmode, cred, td);
1009 	}
1010 
1011 unionfs_vn_create_on_upper_free_out1:
1012 	VOP_UNLOCK(udvp);
1013 
1014 unionfs_vn_create_on_upper_free_out2:
1015 	KASSERT((nd.ni_cnd.cn_flags & HASBUF) != 0,
1016 	    ("%s: HASBUF cleared", __func__));
1017 	KASSERT((nd.ni_cnd.cn_flags & SAVENAME) != 0,
1018 	    ("%s: SAVENAME cleared", __func__));
1019 	KASSERT(nd.ni_cnd.cn_pnbuf == unp->un_path,
1020 	    ("%s: cn_pnbuf changed", __func__));
1021 
1022 	return (error);
1023 }
1024 
1025 /*
1026  * Copy from lvp to uvp.
1027  *
1028  * lvp and uvp should be locked and opened on entry and will be locked and
1029  * opened on return.
1030  */
1031 static int
1032 unionfs_copyfile_core(struct vnode *lvp, struct vnode *uvp,
1033     struct ucred *cred, struct thread *td)
1034 {
1035 	char           *buf;
1036 	struct uio	uio;
1037 	struct iovec	iov;
1038 	off_t		offset;
1039 	int		count;
1040 	int		error;
1041 	int		bufoffset;
1042 
1043 	error = 0;
1044 	memset(&uio, 0, sizeof(uio));
1045 
1046 	uio.uio_td = td;
1047 	uio.uio_segflg = UIO_SYSSPACE;
1048 	uio.uio_offset = 0;
1049 
1050 	buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
1051 
1052 	while (error == 0) {
1053 		offset = uio.uio_offset;
1054 
1055 		uio.uio_iov = &iov;
1056 		uio.uio_iovcnt = 1;
1057 		iov.iov_base = buf;
1058 		iov.iov_len = MAXBSIZE;
1059 		uio.uio_resid = iov.iov_len;
1060 		uio.uio_rw = UIO_READ;
1061 
1062 		if ((error = VOP_READ(lvp, &uio, 0, cred)) != 0)
1063 			break;
1064 		if ((count = MAXBSIZE - uio.uio_resid) == 0)
1065 			break;
1066 
1067 		bufoffset = 0;
1068 		while (bufoffset < count) {
1069 			uio.uio_iov = &iov;
1070 			uio.uio_iovcnt = 1;
1071 			iov.iov_base = buf + bufoffset;
1072 			iov.iov_len = count - bufoffset;
1073 			uio.uio_offset = offset + bufoffset;
1074 			uio.uio_resid = iov.iov_len;
1075 			uio.uio_rw = UIO_WRITE;
1076 
1077 			if ((error = VOP_WRITE(uvp, &uio, 0, cred)) != 0)
1078 				break;
1079 
1080 			bufoffset += (count - bufoffset) - uio.uio_resid;
1081 		}
1082 
1083 		uio.uio_offset = offset + bufoffset;
1084 	}
1085 
1086 	free(buf, M_TEMP);
1087 
1088 	return (error);
1089 }
1090 
1091 /*
1092  * Copy file from lower to upper.
1093  *
1094  * If you need copy of the contents, set 1 to docopy. Otherwise, set 0 to
1095  * docopy.
1096  *
1097  * If no error returned, unp will be updated.
1098  */
1099 int
1100 unionfs_copyfile(struct unionfs_node *unp, int docopy, struct ucred *cred,
1101     struct thread *td)
1102 {
1103 	struct mount   *mp;
1104 	struct vnode   *udvp;
1105 	struct vnode   *lvp;
1106 	struct vnode   *uvp;
1107 	struct vattr	uva;
1108 	int		error;
1109 
1110 	lvp = unp->un_lowervp;
1111 	uvp = NULLVP;
1112 
1113 	if ((UNIONFSTOV(unp)->v_mount->mnt_flag & MNT_RDONLY))
1114 		return (EROFS);
1115 	if (unp->un_dvp == NULLVP)
1116 		return (EINVAL);
1117 	if (unp->un_uppervp != NULLVP)
1118 		return (EEXIST);
1119 	udvp = VTOUNIONFS(unp->un_dvp)->un_uppervp;
1120 	if (udvp == NULLVP)
1121 		return (EROFS);
1122 	if ((udvp->v_mount->mnt_flag & MNT_RDONLY))
1123 		return (EROFS);
1124 
1125 	error = VOP_ACCESS(lvp, VREAD, cred, td);
1126 	if (error != 0)
1127 		return (error);
1128 
1129 	if ((error = vn_start_write(udvp, &mp, V_WAIT | PCATCH)) != 0)
1130 		return (error);
1131 	error = unionfs_vn_create_on_upper(&uvp, udvp, unp, &uva, td);
1132 	if (error != 0) {
1133 		vn_finished_write(mp);
1134 		return (error);
1135 	}
1136 
1137 	if (docopy != 0) {
1138 		error = VOP_OPEN(lvp, FREAD, cred, td, NULL);
1139 		if (error == 0) {
1140 			error = unionfs_copyfile_core(lvp, uvp, cred, td);
1141 			VOP_CLOSE(lvp, FREAD, cred, td);
1142 		}
1143 	}
1144 	VOP_CLOSE(uvp, FWRITE, cred, td);
1145 	VOP_ADD_WRITECOUNT_CHECKED(uvp, -1);
1146 	CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
1147 	    __func__, uvp, uvp->v_writecount);
1148 
1149 	vn_finished_write(mp);
1150 
1151 	if (error == 0) {
1152 		/* Reset the attributes. Ignore errors. */
1153 		uva.va_type = VNON;
1154 		VOP_SETATTR(uvp, &uva, cred);
1155 	}
1156 
1157 	unionfs_node_update(unp, uvp, td);
1158 
1159 	return (error);
1160 }
1161 
1162 /*
1163  * It checks whether vp can rmdir. (check empty)
1164  *
1165  * vp is unionfs vnode.
1166  * vp should be locked.
1167  */
1168 int
1169 unionfs_check_rmdir(struct vnode *vp, struct ucred *cred, struct thread *td)
1170 {
1171 	struct vnode   *uvp;
1172 	struct vnode   *lvp;
1173 	struct vnode   *tvp;
1174 	struct dirent  *dp;
1175 	struct dirent  *edp;
1176 	struct componentname cn;
1177 	struct iovec	iov;
1178 	struct uio	uio;
1179 	struct vattr	va;
1180 	int		error;
1181 	int		eofflag;
1182 	int		lookuperr;
1183 
1184 	/*
1185 	 * The size of buf needs to be larger than DIRBLKSIZ.
1186 	 */
1187 	char		buf[256 * 6];
1188 
1189 	ASSERT_VOP_ELOCKED(vp, "unionfs_check_rmdir");
1190 
1191 	eofflag = 0;
1192 	uvp = UNIONFSVPTOUPPERVP(vp);
1193 	lvp = UNIONFSVPTOLOWERVP(vp);
1194 
1195 	/* check opaque */
1196 	if ((error = VOP_GETATTR(uvp, &va, cred)) != 0)
1197 		return (error);
1198 	if (va.va_flags & OPAQUE)
1199 		return (0);
1200 
1201 	/* open vnode */
1202 #ifdef MAC
1203 	if ((error = mac_vnode_check_open(cred, vp, VEXEC|VREAD)) != 0)
1204 		return (error);
1205 #endif
1206 	if ((error = VOP_ACCESS(vp, VEXEC|VREAD, cred, td)) != 0)
1207 		return (error);
1208 	if ((error = VOP_OPEN(vp, FREAD, cred, td, NULL)) != 0)
1209 		return (error);
1210 
1211 	uio.uio_rw = UIO_READ;
1212 	uio.uio_segflg = UIO_SYSSPACE;
1213 	uio.uio_td = td;
1214 	uio.uio_offset = 0;
1215 
1216 #ifdef MAC
1217 	error = mac_vnode_check_readdir(td->td_ucred, lvp);
1218 #endif
1219 	while (!error && !eofflag) {
1220 		iov.iov_base = buf;
1221 		iov.iov_len = sizeof(buf);
1222 		uio.uio_iov = &iov;
1223 		uio.uio_iovcnt = 1;
1224 		uio.uio_resid = iov.iov_len;
1225 
1226 		error = VOP_READDIR(lvp, &uio, cred, &eofflag, NULL, NULL);
1227 		if (error != 0)
1228 			break;
1229 		if (eofflag == 0 && uio.uio_resid == sizeof(buf)) {
1230 #ifdef DIAGNOSTIC
1231 			panic("bad readdir response from lower FS.");
1232 #endif
1233 			break;
1234 		}
1235 
1236 		edp = (struct dirent*)&buf[sizeof(buf) - uio.uio_resid];
1237 		for (dp = (struct dirent*)buf; !error && dp < edp;
1238 		     dp = (struct dirent*)((caddr_t)dp + dp->d_reclen)) {
1239 			if (dp->d_type == DT_WHT || dp->d_fileno == 0 ||
1240 			    (dp->d_namlen == 1 && dp->d_name[0] == '.') ||
1241 			    (dp->d_namlen == 2 && !bcmp(dp->d_name, "..", 2)))
1242 				continue;
1243 
1244 			cn.cn_namelen = dp->d_namlen;
1245 			cn.cn_pnbuf = NULL;
1246 			cn.cn_nameptr = dp->d_name;
1247 			cn.cn_nameiop = LOOKUP;
1248 			cn.cn_flags = LOCKPARENT | LOCKLEAF | SAVENAME |
1249 			    RDONLY | ISLASTCN;
1250 			cn.cn_lkflags = LK_EXCLUSIVE;
1251 			cn.cn_cred = cred;
1252 
1253 			/*
1254 			 * check entry in lower.
1255 			 * Sometimes, readdir function returns
1256 			 * wrong entry.
1257 			 */
1258 			lookuperr = VOP_LOOKUP(lvp, &tvp, &cn);
1259 
1260 			if (!lookuperr)
1261 				vput(tvp);
1262 			else
1263 				continue; /* skip entry */
1264 
1265 			/*
1266 			 * check entry
1267 			 * If it has no exist/whiteout entry in upper,
1268 			 * directory is not empty.
1269 			 */
1270 			cn.cn_flags = LOCKPARENT | LOCKLEAF | SAVENAME |
1271 			    RDONLY | ISLASTCN;
1272 			lookuperr = VOP_LOOKUP(uvp, &tvp, &cn);
1273 
1274 			if (!lookuperr)
1275 				vput(tvp);
1276 
1277 			/* ignore exist or whiteout entry */
1278 			if (!lookuperr ||
1279 			    (lookuperr == ENOENT && (cn.cn_flags & ISWHITEOUT)))
1280 				continue;
1281 
1282 			error = ENOTEMPTY;
1283 		}
1284 	}
1285 
1286 	/* close vnode */
1287 	VOP_CLOSE(vp, FREAD, cred, td);
1288 
1289 	return (error);
1290 }
1291 
1292 #ifdef DIAGNOSTIC
1293 
1294 struct vnode   *
1295 unionfs_checkuppervp(struct vnode *vp, char *fil, int lno)
1296 {
1297 	struct unionfs_node *unp;
1298 
1299 	unp = VTOUNIONFS(vp);
1300 
1301 #ifdef notyet
1302 	if (vp->v_op != unionfs_vnodeop_p) {
1303 		printf("unionfs_checkuppervp: on non-unionfs-node.\n");
1304 #ifdef KDB
1305 		kdb_enter(KDB_WHY_UNIONFS,
1306 		    "unionfs_checkuppervp: on non-unionfs-node.\n");
1307 #endif
1308 		panic("unionfs_checkuppervp");
1309 	}
1310 #endif
1311 	return (unp->un_uppervp);
1312 }
1313 
1314 struct vnode   *
1315 unionfs_checklowervp(struct vnode *vp, char *fil, int lno)
1316 {
1317 	struct unionfs_node *unp;
1318 
1319 	unp = VTOUNIONFS(vp);
1320 
1321 #ifdef notyet
1322 	if (vp->v_op != unionfs_vnodeop_p) {
1323 		printf("unionfs_checklowervp: on non-unionfs-node.\n");
1324 #ifdef KDB
1325 		kdb_enter(KDB_WHY_UNIONFS,
1326 		    "unionfs_checklowervp: on non-unionfs-node.\n");
1327 #endif
1328 		panic("unionfs_checklowervp");
1329 	}
1330 #endif
1331 	return (unp->un_lowervp);
1332 }
1333 #endif
1334