xref: /freebsd/sys/fs/unionfs/union_subr.c (revision d9a42747950146bf03cda7f6e25d219253f8a57a)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1994 Jan-Simon Pendry
5  * Copyright (c) 1994
6  *	The Regents of the University of California.  All rights reserved.
7  * Copyright (c) 2005, 2006, 2012 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc.
8  * Copyright (c) 2006, 2012 Daichi Goto <daichi@freebsd.org>
9  *
10  * This code is derived from software contributed to Berkeley by
11  * Jan-Simon Pendry.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)union_subr.c	8.20 (Berkeley) 5/20/95
38  * $FreeBSD$
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/proc.h>
51 #include <sys/vnode.h>
52 #include <sys/dirent.h>
53 #include <sys/fcntl.h>
54 #include <sys/filedesc.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/taskqueue.h>
58 #include <sys/resourcevar.h>
59 
60 #include <machine/atomic.h>
61 
62 #include <security/mac/mac_framework.h>
63 
64 #include <vm/uma.h>
65 
66 #include <fs/unionfs/union.h>
67 
68 #define NUNIONFSNODECACHE 16
69 #define UNIONFSHASHMASK (NUNIONFSNODECACHE - 1)
70 
71 static MALLOC_DEFINE(M_UNIONFSHASH, "UNIONFS hash", "UNIONFS hash table");
72 MALLOC_DEFINE(M_UNIONFSNODE, "UNIONFS node", "UNIONFS vnode private part");
73 MALLOC_DEFINE(M_UNIONFSPATH, "UNIONFS path", "UNIONFS path private part");
74 
75 static struct task unionfs_deferred_rele_task;
76 static struct mtx unionfs_deferred_rele_lock;
77 static STAILQ_HEAD(, unionfs_node) unionfs_deferred_rele_list =
78     STAILQ_HEAD_INITIALIZER(unionfs_deferred_rele_list);
79 static TASKQUEUE_DEFINE_THREAD(unionfs_rele);
80 
81 unsigned int unionfs_ndeferred = 0;
82 SYSCTL_UINT(_vfs, OID_AUTO, unionfs_ndeferred, CTLFLAG_RD,
83     &unionfs_ndeferred, 0, "unionfs deferred vnode release");
84 
85 static void unionfs_deferred_rele(void *, int);
86 
87 /*
88  * Initialize
89  */
90 int
91 unionfs_init(struct vfsconf *vfsp)
92 {
93 	UNIONFSDEBUG("unionfs_init\n");	/* printed during system boot */
94 	TASK_INIT(&unionfs_deferred_rele_task, 0, unionfs_deferred_rele, NULL);
95 	mtx_init(&unionfs_deferred_rele_lock, "uniondefr", NULL, MTX_DEF);
96 	return (0);
97 }
98 
99 /*
100  * Uninitialize
101  */
102 int
103 unionfs_uninit(struct vfsconf *vfsp)
104 {
105 	taskqueue_quiesce(taskqueue_unionfs_rele);
106 	taskqueue_free(taskqueue_unionfs_rele);
107 	mtx_destroy(&unionfs_deferred_rele_lock);
108 	return (0);
109 }
110 
111 static void
112 unionfs_deferred_rele(void *arg __unused, int pending __unused)
113 {
114 	STAILQ_HEAD(, unionfs_node) local_rele_list;
115 	struct unionfs_node *unp, *tunp;
116 	unsigned int ndeferred;
117 
118 	ndeferred = 0;
119 	STAILQ_INIT(&local_rele_list);
120 	mtx_lock(&unionfs_deferred_rele_lock);
121 	STAILQ_CONCAT(&local_rele_list, &unionfs_deferred_rele_list);
122 	mtx_unlock(&unionfs_deferred_rele_lock);
123 	STAILQ_FOREACH_SAFE(unp, &local_rele_list, un_rele, tunp) {
124 		++ndeferred;
125 		MPASS(unp->un_dvp != NULL);
126 		vrele(unp->un_dvp);
127 		free(unp, M_UNIONFSNODE);
128 	}
129 
130 	/* We expect this function to be single-threaded, thus no atomic */
131 	unionfs_ndeferred += ndeferred;
132 }
133 
134 static struct unionfs_node_hashhead *
135 unionfs_get_hashhead(struct vnode *dvp, struct vnode *lookup)
136 {
137 	struct unionfs_node *unp;
138 
139 	unp = VTOUNIONFS(dvp);
140 
141 	return (&(unp->un_hashtbl[vfs_hash_index(lookup) & UNIONFSHASHMASK]));
142 }
143 
144 /*
145  * Attempt to lookup a cached unionfs vnode by upper/lower vp
146  * from dvp, with dvp's interlock held.
147  */
148 static struct vnode *
149 unionfs_get_cached_vnode_locked(struct vnode *lookup, struct vnode *dvp)
150 {
151 	struct unionfs_node *unp;
152 	struct unionfs_node_hashhead *hd;
153 	struct vnode *vp;
154 
155 	hd = unionfs_get_hashhead(dvp, lookup);
156 
157 	LIST_FOREACH(unp, hd, un_hash) {
158 		if (unp->un_uppervp == lookup ||
159 		    unp->un_lowervp == lookup) {
160 			vp = UNIONFSTOV(unp);
161 			VI_LOCK_FLAGS(vp, MTX_DUPOK);
162 			vp->v_iflag &= ~VI_OWEINACT;
163 			if (VN_IS_DOOMED(vp) ||
164 			    ((vp->v_iflag & VI_DOINGINACT) != 0)) {
165 				VI_UNLOCK(vp);
166 				vp = NULLVP;
167 			} else {
168 				vrefl(vp);
169 				VI_UNLOCK(vp);
170 			}
171 			return (vp);
172 		}
173 	}
174 
175 	return (NULLVP);
176 }
177 
178 
179 /*
180  * Get the cached vnode.
181  */
182 static struct vnode *
183 unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp,
184     struct vnode *dvp)
185 {
186 	struct vnode *vp;
187 
188 	vp = NULLVP;
189 	VI_LOCK(dvp);
190 	if (uvp != NULLVP)
191 		vp = unionfs_get_cached_vnode_locked(uvp, dvp);
192 	else if (lvp != NULLVP)
193 		vp = unionfs_get_cached_vnode_locked(lvp, dvp);
194 	VI_UNLOCK(dvp);
195 
196 	return (vp);
197 }
198 
199 /*
200  * Add the new vnode into cache.
201  */
202 static struct vnode *
203 unionfs_ins_cached_vnode(struct unionfs_node *uncp,
204     struct vnode *dvp)
205 {
206 	struct unionfs_node_hashhead *hd;
207 	struct vnode *vp;
208 
209 	ASSERT_VOP_ELOCKED(uncp->un_uppervp, __func__);
210 	ASSERT_VOP_ELOCKED(uncp->un_lowervp, __func__);
211 	KASSERT(uncp->un_uppervp == NULLVP || uncp->un_uppervp->v_type == VDIR,
212 	    ("%s: v_type != VDIR", __func__));
213 	KASSERT(uncp->un_lowervp == NULLVP || uncp->un_lowervp->v_type == VDIR,
214 	    ("%s: v_type != VDIR", __func__));
215 
216 	vp = NULLVP;
217 	VI_LOCK(dvp);
218 	if (uncp->un_uppervp != NULL)
219 		vp = unionfs_get_cached_vnode_locked(uncp->un_uppervp, dvp);
220 	else if (uncp->un_lowervp != NULL)
221 		vp = unionfs_get_cached_vnode_locked(uncp->un_lowervp, dvp);
222 	if (vp == NULLVP) {
223 		hd = unionfs_get_hashhead(dvp, (uncp->un_uppervp != NULLVP ?
224 		    uncp->un_uppervp : uncp->un_lowervp));
225 		LIST_INSERT_HEAD(hd, uncp, un_hash);
226 	}
227 	VI_UNLOCK(dvp);
228 
229 	return (vp);
230 }
231 
232 /*
233  * Remove the vnode.
234  */
235 static void
236 unionfs_rem_cached_vnode(struct unionfs_node *unp, struct vnode *dvp)
237 {
238 	KASSERT(unp != NULL, ("%s: null node", __func__));
239 	KASSERT(dvp != NULLVP,
240 	    ("%s: null parent vnode", __func__));
241 
242 	VI_LOCK(dvp);
243 	if (unp->un_hash.le_prev != NULL) {
244 		LIST_REMOVE(unp, un_hash);
245 		unp->un_hash.le_next = NULL;
246 		unp->un_hash.le_prev = NULL;
247 	}
248 	VI_UNLOCK(dvp);
249 }
250 
251 /*
252  * Common cleanup handling for unionfs_nodeget
253  * Upper, lower, and parent directory vnodes are expected to be referenced by
254  * the caller.  Upper and lower vnodes, if non-NULL, are also expected to be
255  * exclusively locked by the caller.
256  * This function will return with the caller's locks and references undone.
257  */
258 static void
259 unionfs_nodeget_cleanup(struct vnode *vp, struct unionfs_node *unp)
260 {
261 
262 	/*
263 	 * Lock and reset the default vnode lock; vgone() expects a locked
264 	 * vnode, and we're going to reset the vnode ops.
265 	 */
266 	lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
267 
268 	/*
269 	 * Clear out private data and reset the vnode ops to avoid use of
270 	 * unionfs vnode ops on a partially constructed vnode.
271 	 */
272 	VI_LOCK(vp);
273 	vp->v_data = NULL;
274 	vp->v_vnlock = &vp->v_lock;
275 	vp->v_op = &dead_vnodeops;
276 	VI_UNLOCK(vp);
277 	vgone(vp);
278 	vput(vp);
279 
280 	if (unp->un_dvp != NULLVP)
281 		vrele(unp->un_dvp);
282 	if (unp->un_uppervp != NULLVP)
283 		vput(unp->un_uppervp);
284 	if (unp->un_lowervp != NULLVP)
285 		vput(unp->un_lowervp);
286 	if (unp->un_hashtbl != NULL)
287 		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, UNIONFSHASHMASK);
288 	free(unp->un_path, M_UNIONFSPATH);
289 	free(unp, M_UNIONFSNODE);
290 }
291 
292 /*
293  * Make a new or get existing unionfs node.
294  *
295  * uppervp and lowervp should be unlocked. Because if new unionfs vnode is
296  * locked, uppervp or lowervp is locked too. In order to prevent dead lock,
297  * you should not lock plurality simultaneously.
298  */
299 int
300 unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
301     struct vnode *lowervp, struct vnode *dvp, struct vnode **vpp,
302     struct componentname *cnp)
303 {
304 	char	       *path;
305 	struct unionfs_mount *ump;
306 	struct unionfs_node *unp;
307 	struct vnode   *vp;
308 	u_long		hashmask;
309 	int		error;
310 	int		lkflags;
311 	enum vtype	vt;
312 
313 	error = 0;
314 	ump = MOUNTTOUNIONFSMOUNT(mp);
315 	lkflags = (cnp ? cnp->cn_lkflags : 0);
316 	path = (cnp ? cnp->cn_nameptr : NULL);
317 	*vpp = NULLVP;
318 
319 	if (uppervp == NULLVP && lowervp == NULLVP)
320 		panic("%s: upper and lower is null", __func__);
321 
322 	vt = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type);
323 
324 	/* If it has no ISLASTCN flag, path check is skipped. */
325 	if (cnp && !(cnp->cn_flags & ISLASTCN))
326 		path = NULL;
327 
328 	/* check the cache */
329 	if (dvp != NULLVP && vt == VDIR) {
330 		vp = unionfs_get_cached_vnode(uppervp, lowervp, dvp);
331 		if (vp != NULLVP) {
332 			*vpp = vp;
333 			goto unionfs_nodeget_out;
334 		}
335 	}
336 
337 	unp = malloc(sizeof(struct unionfs_node),
338 	    M_UNIONFSNODE, M_WAITOK | M_ZERO);
339 
340 	error = getnewvnode("unionfs", mp, &unionfs_vnodeops, &vp);
341 	if (error != 0) {
342 		free(unp, M_UNIONFSNODE);
343 		return (error);
344 	}
345 	if (dvp != NULLVP)
346 		vref(dvp);
347 	if (uppervp != NULLVP)
348 		vref(uppervp);
349 	if (lowervp != NULLVP)
350 		vref(lowervp);
351 
352 	if (vt == VDIR) {
353 		unp->un_hashtbl = hashinit(NUNIONFSNODECACHE, M_UNIONFSHASH,
354 		    &hashmask);
355 		KASSERT(hashmask == UNIONFSHASHMASK,
356 		    ("unexpected unionfs hash mask 0x%lx", hashmask));
357 	}
358 
359 	unp->un_vnode = vp;
360 	unp->un_uppervp = uppervp;
361 	unp->un_lowervp = lowervp;
362 	unp->un_dvp = dvp;
363 	if (uppervp != NULLVP)
364 		vp->v_vnlock = uppervp->v_vnlock;
365 	else
366 		vp->v_vnlock = lowervp->v_vnlock;
367 
368 	if (path != NULL) {
369 		unp->un_path = malloc(cnp->cn_namelen + 1,
370 		    M_UNIONFSPATH, M_WAITOK | M_ZERO);
371 		bcopy(cnp->cn_nameptr, unp->un_path, cnp->cn_namelen);
372 		unp->un_path[cnp->cn_namelen] = '\0';
373 		unp->un_pathlen = cnp->cn_namelen;
374 	}
375 	vp->v_type = vt;
376 	vp->v_data = unp;
377 
378 	/*
379 	 * TODO: This is an imperfect check, as there's no guarantee that
380 	 * the underlying filesystems will always return vnode pointers
381 	 * for the root inodes that match our cached values.  To reduce
382 	 * the likelihood of failure, for example in the case where either
383 	 * vnode has been forcibly doomed, we check both pointers and set
384 	 * VV_ROOT if either matches.
385 	 */
386 	if (ump->um_uppervp == uppervp || ump->um_lowervp == lowervp)
387 		vp->v_vflag |= VV_ROOT;
388 	KASSERT(dvp != NULL || (vp->v_vflag & VV_ROOT) != 0,
389 	    ("%s: NULL dvp for non-root vp %p", __func__, vp));
390 
391 	vn_lock_pair(lowervp, false, LK_EXCLUSIVE, uppervp, false,
392 	    LK_EXCLUSIVE);
393 	error = insmntque1(vp, mp);
394 	if (error != 0) {
395 		unionfs_nodeget_cleanup(vp, unp);
396 		return (error);
397 	}
398 	if (lowervp != NULL && VN_IS_DOOMED(lowervp)) {
399 		vput(lowervp);
400 		unp->un_lowervp = NULL;
401 	}
402 	if (uppervp != NULL && VN_IS_DOOMED(uppervp)) {
403 		vput(uppervp);
404 		unp->un_uppervp = NULL;
405 	}
406 	if (unp->un_lowervp == NULL && unp->un_uppervp == NULL) {
407 		unionfs_nodeget_cleanup(vp, unp);
408 		return (ENOENT);
409 	}
410 
411 	vn_set_state(vp, VSTATE_CONSTRUCTED);
412 
413 	if (dvp != NULLVP && vt == VDIR)
414 		*vpp = unionfs_ins_cached_vnode(unp, dvp);
415 	if (*vpp != NULLVP) {
416 		unionfs_nodeget_cleanup(vp, unp);
417 		vp = *vpp;
418 	} else {
419 		if (uppervp != NULL)
420 			VOP_UNLOCK(uppervp);
421 		if (lowervp != NULL)
422 			VOP_UNLOCK(lowervp);
423 		*vpp = vp;
424 	}
425 
426 unionfs_nodeget_out:
427 	if (lkflags & LK_TYPE_MASK)
428 		vn_lock(vp, lkflags | LK_RETRY);
429 
430 	return (0);
431 }
432 
433 /*
434  * Clean up the unionfs node.
435  */
436 void
437 unionfs_noderem(struct vnode *vp)
438 {
439 	struct unionfs_node *unp, *unp_t1, *unp_t2;
440 	struct unionfs_node_hashhead *hd;
441 	struct unionfs_node_status *unsp, *unsp_tmp;
442 	struct vnode   *lvp;
443 	struct vnode   *uvp;
444 	struct vnode   *dvp;
445 	int		count;
446 	int		writerefs;
447 
448 	/*
449 	 * The root vnode lock may be recursed during unmount, because
450 	 * it may share the same lock as the unionfs mount's covered vnode,
451 	 * which is locked across VFS_UNMOUNT().  This lock will then be
452 	 * recursively taken during the vflush() issued by unionfs_unmount().
453 	 * But we still only need to lock the unionfs lock once, because only
454 	 * one of those lock operations was taken against a unionfs vnode and
455 	 * will be undone against a unionfs vnode.
456 	 */
457 	KASSERT(vp->v_vnlock->lk_recurse == 0 || (vp->v_vflag & VV_ROOT) != 0,
458 	    ("%s: vnode %p locked recursively", __func__, vp));
459 	if (lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
460 		panic("%s: failed to acquire lock for vnode lock", __func__);
461 
462 	/*
463 	 * Use the interlock to protect the clearing of v_data to
464 	 * prevent faults in unionfs_lock().
465 	 */
466 	VI_LOCK(vp);
467 	unp = VTOUNIONFS(vp);
468 	lvp = unp->un_lowervp;
469 	uvp = unp->un_uppervp;
470 	dvp = unp->un_dvp;
471 	unp->un_lowervp = unp->un_uppervp = NULLVP;
472 	vp->v_vnlock = &(vp->v_lock);
473 	vp->v_data = NULL;
474 	vp->v_object = NULL;
475 	if (unp->un_hashtbl != NULL) {
476 		/*
477 		 * Clear out any cached child vnodes.  This should only
478 		 * be necessary during forced unmount, when the vnode may
479 		 * be reclaimed with a non-zero use count.  Otherwise the
480 		 * reference held by each child should prevent reclamation.
481 		 */
482 		for (count = 0; count <= UNIONFSHASHMASK; count++) {
483 			hd = unp->un_hashtbl + count;
484 			LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
485 				LIST_REMOVE(unp_t1, un_hash);
486 				unp_t1->un_hash.le_next = NULL;
487 				unp_t1->un_hash.le_prev = NULL;
488 			}
489 		}
490 	}
491 	VI_UNLOCK(vp);
492 
493 	writerefs = atomic_load_int(&vp->v_writecount);
494 	VNASSERT(writerefs >= 0, vp,
495 	    ("%s: write count %d, unexpected text ref", __func__, writerefs));
496 	/*
497 	 * If we were opened for write, we leased the write reference
498 	 * to the lower vnode.  If this is a reclamation due to the
499 	 * forced unmount, undo the reference now.
500 	 */
501 	if (writerefs > 0) {
502 		VNASSERT(uvp != NULL, vp,
503 		    ("%s: write reference without upper vnode", __func__));
504 		VOP_ADD_WRITECOUNT(uvp, -writerefs);
505 	}
506 	if (lvp != NULLVP)
507 		VOP_UNLOCK(lvp);
508 	if (uvp != NULLVP)
509 		VOP_UNLOCK(uvp);
510 
511 	if (dvp != NULLVP)
512 		unionfs_rem_cached_vnode(unp, dvp);
513 
514 	if (lvp != NULLVP)
515 		vrele(lvp);
516 	if (uvp != NULLVP)
517 		vrele(uvp);
518 	if (unp->un_path != NULL) {
519 		free(unp->un_path, M_UNIONFSPATH);
520 		unp->un_path = NULL;
521 		unp->un_pathlen = 0;
522 	}
523 
524 	if (unp->un_hashtbl != NULL) {
525 		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, UNIONFSHASHMASK);
526 	}
527 
528 	LIST_FOREACH_SAFE(unsp, &(unp->un_unshead), uns_list, unsp_tmp) {
529 		LIST_REMOVE(unsp, uns_list);
530 		free(unsp, M_TEMP);
531 	}
532 	if (dvp != NULLVP) {
533 		mtx_lock(&unionfs_deferred_rele_lock);
534 		STAILQ_INSERT_TAIL(&unionfs_deferred_rele_list, unp, un_rele);
535 		mtx_unlock(&unionfs_deferred_rele_lock);
536 		taskqueue_enqueue(taskqueue_unionfs_rele,
537 		    &unionfs_deferred_rele_task);
538 	} else
539 		free(unp, M_UNIONFSNODE);
540 }
541 
542 /*
543  * Get the unionfs node status object for the vnode corresponding to unp,
544  * for the process that owns td.  Allocate a new status object if one
545  * does not already exist.
546  */
547 void
548 unionfs_get_node_status(struct unionfs_node *unp, struct thread *td,
549     struct unionfs_node_status **unspp)
550 {
551 	struct unionfs_node_status *unsp;
552 	pid_t pid;
553 
554 	pid = td->td_proc->p_pid;
555 
556 	KASSERT(NULL != unspp, ("%s: NULL status", __func__));
557 	ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), __func__);
558 
559 	LIST_FOREACH(unsp, &(unp->un_unshead), uns_list) {
560 		if (unsp->uns_pid == pid) {
561 			*unspp = unsp;
562 			return;
563 		}
564 	}
565 
566 	/* create a new unionfs node status */
567 	unsp = malloc(sizeof(struct unionfs_node_status),
568 	    M_TEMP, M_WAITOK | M_ZERO);
569 
570 	unsp->uns_pid = pid;
571 	LIST_INSERT_HEAD(&(unp->un_unshead), unsp, uns_list);
572 
573 	*unspp = unsp;
574 }
575 
576 /*
577  * Remove the unionfs node status, if you can.
578  * You need exclusive lock this vnode.
579  */
580 void
581 unionfs_tryrem_node_status(struct unionfs_node *unp,
582     struct unionfs_node_status *unsp)
583 {
584 	KASSERT(NULL != unsp, ("%s: NULL status", __func__));
585 	ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), __func__);
586 
587 	if (0 < unsp->uns_lower_opencnt || 0 < unsp->uns_upper_opencnt)
588 		return;
589 
590 	LIST_REMOVE(unsp, uns_list);
591 	free(unsp, M_TEMP);
592 }
593 
594 /*
595  * Create upper node attr.
596  */
597 void
598 unionfs_create_uppervattr_core(struct unionfs_mount *ump, struct vattr *lva,
599     struct vattr *uva, struct thread *td)
600 {
601 	VATTR_NULL(uva);
602 	uva->va_type = lva->va_type;
603 	uva->va_atime = lva->va_atime;
604 	uva->va_mtime = lva->va_mtime;
605 	uva->va_ctime = lva->va_ctime;
606 
607 	switch (ump->um_copymode) {
608 	case UNIONFS_TRANSPARENT:
609 		uva->va_mode = lva->va_mode;
610 		uva->va_uid = lva->va_uid;
611 		uva->va_gid = lva->va_gid;
612 		break;
613 	case UNIONFS_MASQUERADE:
614 		if (ump->um_uid == lva->va_uid) {
615 			uva->va_mode = lva->va_mode & 077077;
616 			uva->va_mode |= (lva->va_type == VDIR ?
617 			    ump->um_udir : ump->um_ufile) & 0700;
618 			uva->va_uid = lva->va_uid;
619 			uva->va_gid = lva->va_gid;
620 		} else {
621 			uva->va_mode = (lva->va_type == VDIR ?
622 			    ump->um_udir : ump->um_ufile);
623 			uva->va_uid = ump->um_uid;
624 			uva->va_gid = ump->um_gid;
625 		}
626 		break;
627 	default:		/* UNIONFS_TRADITIONAL */
628 		uva->va_mode = 0777 & ~td->td_proc->p_pd->pd_cmask;
629 		uva->va_uid = ump->um_uid;
630 		uva->va_gid = ump->um_gid;
631 		break;
632 	}
633 }
634 
635 /*
636  * Create upper node attr.
637  */
638 int
639 unionfs_create_uppervattr(struct unionfs_mount *ump, struct vnode *lvp,
640     struct vattr *uva, struct ucred *cred, struct thread *td)
641 {
642 	struct vattr	lva;
643 	int		error;
644 
645 	if ((error = VOP_GETATTR(lvp, &lva, cred)))
646 		return (error);
647 
648 	unionfs_create_uppervattr_core(ump, &lva, uva, td);
649 
650 	return (error);
651 }
652 
653 /*
654  * relookup
655  *
656  * dvp should be locked on entry and will be locked on return.
657  *
658  * If an error is returned, *vpp will be invalid, otherwise it will hold a
659  * locked, referenced vnode. If *vpp == dvp then remember that only one
660  * LK_EXCLUSIVE lock is held.
661  */
662 int
663 unionfs_relookup(struct vnode *dvp, struct vnode **vpp,
664     struct componentname *cnp, struct componentname *cn, struct thread *td,
665     char *path, int pathlen, u_long nameiop)
666 {
667 	int error;
668 	bool refstart;
669 
670 	cn->cn_namelen = pathlen;
671 	cn->cn_pnbuf = path;
672 	cn->cn_nameiop = nameiop;
673 	cn->cn_flags = (LOCKPARENT | LOCKLEAF | ISLASTCN);
674 	cn->cn_lkflags = LK_EXCLUSIVE;
675 	cn->cn_cred = cnp->cn_cred;
676 	cn->cn_nameptr = cn->cn_pnbuf;
677 
678 	refstart = false;
679 	if (nameiop == DELETE) {
680 		cn->cn_flags |= (cnp->cn_flags & DOWHITEOUT);
681 	} else if (nameiop == RENAME) {
682 		refstart = true;
683 	} else if (nameiop == CREATE) {
684 		cn->cn_flags |= NOCACHE;
685 	}
686 
687 	vref(dvp);
688 	VOP_UNLOCK(dvp);
689 
690 	if ((error = vfs_relookup(dvp, vpp, cn, refstart))) {
691 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
692 	} else
693 		vrele(dvp);
694 
695 	KASSERT(cn->cn_pnbuf == path, ("%s: cn_pnbuf changed", __func__));
696 
697 	return (error);
698 }
699 
700 /*
701  * relookup for CREATE namei operation.
702  *
703  * dvp is unionfs vnode. dvp should be locked.
704  *
705  * If it called 'unionfs_copyfile' function by unionfs_link etc,
706  * VOP_LOOKUP information is broken.
707  * So it need relookup in order to create link etc.
708  */
709 int
710 unionfs_relookup_for_create(struct vnode *dvp, struct componentname *cnp,
711     struct thread *td)
712 {
713 	struct vnode *udvp;
714 	struct vnode *vp;
715 	struct componentname cn;
716 	int error;
717 
718 	udvp = UNIONFSVPTOUPPERVP(dvp);
719 	vp = NULLVP;
720 
721 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
722 	    cnp->cn_namelen, CREATE);
723 	if (error)
724 		return (error);
725 
726 	if (vp != NULLVP) {
727 		if (udvp == vp)
728 			vrele(vp);
729 		else
730 			vput(vp);
731 
732 		error = EEXIST;
733 	}
734 
735 	return (error);
736 }
737 
738 /*
739  * relookup for DELETE namei operation.
740  *
741  * dvp is unionfs vnode. dvp should be locked.
742  */
743 int
744 unionfs_relookup_for_delete(struct vnode *dvp, struct componentname *cnp,
745     struct thread *td)
746 {
747 	struct vnode *udvp;
748 	struct vnode *vp;
749 	struct componentname cn;
750 	int error;
751 
752 	udvp = UNIONFSVPTOUPPERVP(dvp);
753 	vp = NULLVP;
754 
755 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
756 	    cnp->cn_namelen, DELETE);
757 	if (error)
758 		return (error);
759 
760 	if (vp == NULLVP)
761 		error = ENOENT;
762 	else {
763 		if (udvp == vp)
764 			vrele(vp);
765 		else
766 			vput(vp);
767 	}
768 
769 	return (error);
770 }
771 
772 /*
773  * relookup for RENAME namei operation.
774  *
775  * dvp is unionfs vnode. dvp should be locked.
776  */
777 int
778 unionfs_relookup_for_rename(struct vnode *dvp, struct componentname *cnp,
779     struct thread *td)
780 {
781 	struct vnode *udvp;
782 	struct vnode *vp;
783 	struct componentname cn;
784 	int error;
785 
786 	udvp = UNIONFSVPTOUPPERVP(dvp);
787 	vp = NULLVP;
788 
789 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
790 	    cnp->cn_namelen, RENAME);
791 	if (error)
792 		return (error);
793 
794 	if (vp != NULLVP) {
795 		if (udvp == vp)
796 			vrele(vp);
797 		else
798 			vput(vp);
799 	}
800 
801 	return (error);
802 }
803 
804 /*
805  * Update the unionfs_node.
806  *
807  * uvp is new locked upper vnode. unionfs vnode's lock will be exchanged to the
808  * uvp's lock and lower's lock will be unlocked.
809  */
810 static void
811 unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
812     struct thread *td)
813 {
814 	struct unionfs_node_hashhead *hd;
815 	struct vnode   *vp;
816 	struct vnode   *lvp;
817 	struct vnode   *dvp;
818 	unsigned	count, lockrec;
819 
820 	vp = UNIONFSTOV(unp);
821 	lvp = unp->un_lowervp;
822 	ASSERT_VOP_ELOCKED(lvp, __func__);
823 	ASSERT_VOP_ELOCKED(uvp, __func__);
824 	dvp = unp->un_dvp;
825 
826 	VNASSERT(vp->v_writecount == 0, vp,
827 	    ("%s: non-zero writecount", __func__));
828 	/*
829 	 * Update the upper vnode's lock state to match the lower vnode,
830 	 * and then switch the unionfs vnode's lock to the upper vnode.
831 	 */
832 	lockrec = lvp->v_vnlock->lk_recurse;
833 	for (count = 0; count < lockrec; count++)
834 		vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
835 	VI_LOCK(vp);
836 	unp->un_uppervp = uvp;
837 	vp->v_vnlock = uvp->v_vnlock;
838 	VI_UNLOCK(vp);
839 
840 	/*
841 	 * Re-cache the unionfs vnode against the upper vnode
842 	 */
843 	if (dvp != NULLVP && vp->v_type == VDIR) {
844 		VI_LOCK(dvp);
845 		if (unp->un_hash.le_prev != NULL) {
846 			LIST_REMOVE(unp, un_hash);
847 			hd = unionfs_get_hashhead(dvp, uvp);
848 			LIST_INSERT_HEAD(hd, unp, un_hash);
849 		}
850 		VI_UNLOCK(unp->un_dvp);
851 	}
852 }
853 
854 /*
855  * Create a new shadow dir.
856  *
857  * udvp should be locked on entry and will be locked on return.
858  *
859  * If no error returned, unp will be updated.
860  */
861 int
862 unionfs_mkshadowdir(struct unionfs_mount *ump, struct vnode *udvp,
863     struct unionfs_node *unp, struct componentname *cnp, struct thread *td)
864 {
865 	struct vnode   *lvp;
866 	struct vnode   *uvp;
867 	struct vattr	va;
868 	struct vattr	lva;
869 	struct nameidata nd;
870 	struct mount   *mp;
871 	struct ucred   *cred;
872 	struct ucred   *credbk;
873 	struct uidinfo *rootinfo;
874 	int		error;
875 
876 	if (unp->un_uppervp != NULLVP)
877 		return (EEXIST);
878 
879 	lvp = unp->un_lowervp;
880 	uvp = NULLVP;
881 	credbk = cnp->cn_cred;
882 
883 	/* Authority change to root */
884 	rootinfo = uifind((uid_t)0);
885 	cred = crdup(cnp->cn_cred);
886 	/*
887 	 * The calls to chgproccnt() are needed to compensate for change_ruid()
888 	 * calling chgproccnt().
889 	 */
890 	chgproccnt(cred->cr_ruidinfo, 1, 0);
891 	change_euid(cred, rootinfo);
892 	change_ruid(cred, rootinfo);
893 	change_svuid(cred, (uid_t)0);
894 	uifree(rootinfo);
895 	cnp->cn_cred = cred;
896 
897 	memset(&nd.ni_cnd, 0, sizeof(struct componentname));
898 	NDPREINIT(&nd);
899 
900 	if ((error = VOP_GETATTR(lvp, &lva, cnp->cn_cred)))
901 		goto unionfs_mkshadowdir_abort;
902 
903 	if ((error = unionfs_relookup(udvp, &uvp, cnp, &nd.ni_cnd, td,
904 	    cnp->cn_nameptr, cnp->cn_namelen, CREATE)))
905 		goto unionfs_mkshadowdir_abort;
906 	if (uvp != NULLVP) {
907 		if (udvp == uvp)
908 			vrele(uvp);
909 		else
910 			vput(uvp);
911 
912 		error = EEXIST;
913 		goto unionfs_mkshadowdir_abort;
914 	}
915 
916 	if ((error = vn_start_write(udvp, &mp, V_WAIT | V_PCATCH)))
917 		goto unionfs_mkshadowdir_abort;
918 	unionfs_create_uppervattr_core(ump, &lva, &va, td);
919 
920 	error = VOP_MKDIR(udvp, &uvp, &nd.ni_cnd, &va);
921 
922 	if (!error) {
923 		unionfs_node_update(unp, uvp, td);
924 
925 		/*
926 		 * XXX The bug which cannot set uid/gid was corrected.
927 		 * Ignore errors.
928 		 */
929 		va.va_type = VNON;
930 		VOP_SETATTR(uvp, &va, nd.ni_cnd.cn_cred);
931 	}
932 	vn_finished_write(mp);
933 
934 unionfs_mkshadowdir_abort:
935 	cnp->cn_cred = credbk;
936 	chgproccnt(cred->cr_ruidinfo, -1, 0);
937 	crfree(cred);
938 
939 	return (error);
940 }
941 
942 /*
943  * Create a new whiteout.
944  *
945  * dvp should be locked on entry and will be locked on return.
946  */
947 int
948 unionfs_mkwhiteout(struct vnode *dvp, struct componentname *cnp,
949     struct thread *td, char *path, int pathlen)
950 {
951 	struct vnode   *wvp;
952 	struct nameidata nd;
953 	struct mount   *mp;
954 	int		error;
955 
956 	wvp = NULLVP;
957 	NDPREINIT(&nd);
958 	if ((error = unionfs_relookup(dvp, &wvp, cnp, &nd.ni_cnd, td, path,
959 	    pathlen, CREATE))) {
960 		return (error);
961 	}
962 	if (wvp != NULLVP) {
963 		if (dvp == wvp)
964 			vrele(wvp);
965 		else
966 			vput(wvp);
967 
968 		return (EEXIST);
969 	}
970 
971 	if ((error = vn_start_write(dvp, &mp, V_WAIT | V_PCATCH)))
972 		goto unionfs_mkwhiteout_free_out;
973 	error = VOP_WHITEOUT(dvp, &nd.ni_cnd, CREATE);
974 
975 	vn_finished_write(mp);
976 
977 unionfs_mkwhiteout_free_out:
978 	return (error);
979 }
980 
981 /*
982  * Create a new vnode for create a new shadow file.
983  *
984  * If an error is returned, *vpp will be invalid, otherwise it will hold a
985  * locked, referenced and opened vnode.
986  *
987  * unp is never updated.
988  */
989 static int
990 unionfs_vn_create_on_upper(struct vnode **vpp, struct vnode *udvp,
991     struct unionfs_node *unp, struct vattr *uvap, struct thread *td)
992 {
993 	struct unionfs_mount *ump;
994 	struct vnode   *vp;
995 	struct vnode   *lvp;
996 	struct ucred   *cred;
997 	struct vattr	lva;
998 	struct nameidata nd;
999 	int		fmode;
1000 	int		error;
1001 
1002 	ump = MOUNTTOUNIONFSMOUNT(UNIONFSTOV(unp)->v_mount);
1003 	vp = NULLVP;
1004 	lvp = unp->un_lowervp;
1005 	cred = td->td_ucred;
1006 	fmode = FFLAGS(O_WRONLY | O_CREAT | O_TRUNC | O_EXCL);
1007 	error = 0;
1008 
1009 	if ((error = VOP_GETATTR(lvp, &lva, cred)) != 0)
1010 		return (error);
1011 	unionfs_create_uppervattr_core(ump, &lva, uvap, td);
1012 
1013 	if (unp->un_path == NULL)
1014 		panic("%s: NULL un_path", __func__);
1015 
1016 	nd.ni_cnd.cn_namelen = unp->un_pathlen;
1017 	nd.ni_cnd.cn_pnbuf = unp->un_path;
1018 	nd.ni_cnd.cn_nameiop = CREATE;
1019 	nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF | ISLASTCN;
1020 	nd.ni_cnd.cn_lkflags = LK_EXCLUSIVE;
1021 	nd.ni_cnd.cn_cred = cred;
1022 	nd.ni_cnd.cn_nameptr = nd.ni_cnd.cn_pnbuf;
1023 	NDPREINIT(&nd);
1024 
1025 	vref(udvp);
1026 	if ((error = vfs_relookup(udvp, &vp, &nd.ni_cnd, false)) != 0)
1027 		goto unionfs_vn_create_on_upper_free_out2;
1028 	vrele(udvp);
1029 
1030 	if (vp != NULLVP) {
1031 		if (vp == udvp)
1032 			vrele(vp);
1033 		else
1034 			vput(vp);
1035 		error = EEXIST;
1036 		goto unionfs_vn_create_on_upper_free_out1;
1037 	}
1038 
1039 	if ((error = VOP_CREATE(udvp, &vp, &nd.ni_cnd, uvap)) != 0)
1040 		goto unionfs_vn_create_on_upper_free_out1;
1041 
1042 	if ((error = VOP_OPEN(vp, fmode, cred, td, NULL)) != 0) {
1043 		vput(vp);
1044 		goto unionfs_vn_create_on_upper_free_out1;
1045 	}
1046 	error = VOP_ADD_WRITECOUNT(vp, 1);
1047 	CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
1048 	    __func__, vp, vp->v_writecount);
1049 	if (error == 0) {
1050 		*vpp = vp;
1051 	} else {
1052 		VOP_CLOSE(vp, fmode, cred, td);
1053 	}
1054 
1055 unionfs_vn_create_on_upper_free_out1:
1056 	VOP_UNLOCK(udvp);
1057 
1058 unionfs_vn_create_on_upper_free_out2:
1059 	KASSERT(nd.ni_cnd.cn_pnbuf == unp->un_path,
1060 	    ("%s: cn_pnbuf changed", __func__));
1061 
1062 	return (error);
1063 }
1064 
1065 /*
1066  * Copy from lvp to uvp.
1067  *
1068  * lvp and uvp should be locked and opened on entry and will be locked and
1069  * opened on return.
1070  */
1071 static int
1072 unionfs_copyfile_core(struct vnode *lvp, struct vnode *uvp,
1073     struct ucred *cred, struct thread *td)
1074 {
1075 	char           *buf;
1076 	struct uio	uio;
1077 	struct iovec	iov;
1078 	off_t		offset;
1079 	int		count;
1080 	int		error;
1081 	int		bufoffset;
1082 
1083 	error = 0;
1084 	memset(&uio, 0, sizeof(uio));
1085 
1086 	uio.uio_td = td;
1087 	uio.uio_segflg = UIO_SYSSPACE;
1088 	uio.uio_offset = 0;
1089 
1090 	buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
1091 
1092 	while (error == 0) {
1093 		offset = uio.uio_offset;
1094 
1095 		uio.uio_iov = &iov;
1096 		uio.uio_iovcnt = 1;
1097 		iov.iov_base = buf;
1098 		iov.iov_len = MAXBSIZE;
1099 		uio.uio_resid = iov.iov_len;
1100 		uio.uio_rw = UIO_READ;
1101 
1102 		if ((error = VOP_READ(lvp, &uio, 0, cred)) != 0)
1103 			break;
1104 		if ((count = MAXBSIZE - uio.uio_resid) == 0)
1105 			break;
1106 
1107 		bufoffset = 0;
1108 		while (bufoffset < count) {
1109 			uio.uio_iov = &iov;
1110 			uio.uio_iovcnt = 1;
1111 			iov.iov_base = buf + bufoffset;
1112 			iov.iov_len = count - bufoffset;
1113 			uio.uio_offset = offset + bufoffset;
1114 			uio.uio_resid = iov.iov_len;
1115 			uio.uio_rw = UIO_WRITE;
1116 
1117 			if ((error = VOP_WRITE(uvp, &uio, 0, cred)) != 0)
1118 				break;
1119 
1120 			bufoffset += (count - bufoffset) - uio.uio_resid;
1121 		}
1122 
1123 		uio.uio_offset = offset + bufoffset;
1124 	}
1125 
1126 	free(buf, M_TEMP);
1127 
1128 	return (error);
1129 }
1130 
1131 /*
1132  * Copy file from lower to upper.
1133  *
1134  * If you need copy of the contents, set 1 to docopy. Otherwise, set 0 to
1135  * docopy.
1136  *
1137  * If no error returned, unp will be updated.
1138  */
1139 int
1140 unionfs_copyfile(struct unionfs_node *unp, int docopy, struct ucred *cred,
1141     struct thread *td)
1142 {
1143 	struct mount   *mp;
1144 	struct vnode   *udvp;
1145 	struct vnode   *lvp;
1146 	struct vnode   *uvp;
1147 	struct vattr	uva;
1148 	int		error;
1149 
1150 	lvp = unp->un_lowervp;
1151 	uvp = NULLVP;
1152 
1153 	if ((UNIONFSTOV(unp)->v_mount->mnt_flag & MNT_RDONLY))
1154 		return (EROFS);
1155 	if (unp->un_dvp == NULLVP)
1156 		return (EINVAL);
1157 	if (unp->un_uppervp != NULLVP)
1158 		return (EEXIST);
1159 	udvp = VTOUNIONFS(unp->un_dvp)->un_uppervp;
1160 	if (udvp == NULLVP)
1161 		return (EROFS);
1162 	if ((udvp->v_mount->mnt_flag & MNT_RDONLY))
1163 		return (EROFS);
1164 
1165 	error = VOP_ACCESS(lvp, VREAD, cred, td);
1166 	if (error != 0)
1167 		return (error);
1168 
1169 	if ((error = vn_start_write(udvp, &mp, V_WAIT | V_PCATCH)) != 0)
1170 		return (error);
1171 	error = unionfs_vn_create_on_upper(&uvp, udvp, unp, &uva, td);
1172 	if (error != 0) {
1173 		vn_finished_write(mp);
1174 		return (error);
1175 	}
1176 
1177 	if (docopy != 0) {
1178 		error = VOP_OPEN(lvp, FREAD, cred, td, NULL);
1179 		if (error == 0) {
1180 			error = unionfs_copyfile_core(lvp, uvp, cred, td);
1181 			VOP_CLOSE(lvp, FREAD, cred, td);
1182 		}
1183 	}
1184 	VOP_CLOSE(uvp, FWRITE, cred, td);
1185 	VOP_ADD_WRITECOUNT_CHECKED(uvp, -1);
1186 	CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
1187 	    __func__, uvp, uvp->v_writecount);
1188 
1189 	vn_finished_write(mp);
1190 
1191 	if (error == 0) {
1192 		/* Reset the attributes. Ignore errors. */
1193 		uva.va_type = VNON;
1194 		VOP_SETATTR(uvp, &uva, cred);
1195 	}
1196 
1197 	unionfs_node_update(unp, uvp, td);
1198 
1199 	return (error);
1200 }
1201 
1202 /*
1203  * It checks whether vp can rmdir. (check empty)
1204  *
1205  * vp is unionfs vnode.
1206  * vp should be locked.
1207  */
1208 int
1209 unionfs_check_rmdir(struct vnode *vp, struct ucred *cred, struct thread *td)
1210 {
1211 	struct vnode   *uvp;
1212 	struct vnode   *lvp;
1213 	struct vnode   *tvp;
1214 	struct dirent  *dp;
1215 	struct dirent  *edp;
1216 	struct componentname cn;
1217 	struct iovec	iov;
1218 	struct uio	uio;
1219 	struct vattr	va;
1220 	int		error;
1221 	int		eofflag;
1222 	int		lookuperr;
1223 
1224 	/*
1225 	 * The size of buf needs to be larger than DIRBLKSIZ.
1226 	 */
1227 	char		buf[256 * 6];
1228 
1229 	ASSERT_VOP_ELOCKED(vp, __func__);
1230 
1231 	eofflag = 0;
1232 	uvp = UNIONFSVPTOUPPERVP(vp);
1233 	lvp = UNIONFSVPTOLOWERVP(vp);
1234 
1235 	/* check opaque */
1236 	if ((error = VOP_GETATTR(uvp, &va, cred)) != 0)
1237 		return (error);
1238 	if (va.va_flags & OPAQUE)
1239 		return (0);
1240 
1241 	/* open vnode */
1242 #ifdef MAC
1243 	if ((error = mac_vnode_check_open(cred, vp, VEXEC|VREAD)) != 0)
1244 		return (error);
1245 #endif
1246 	if ((error = VOP_ACCESS(vp, VEXEC|VREAD, cred, td)) != 0)
1247 		return (error);
1248 	if ((error = VOP_OPEN(vp, FREAD, cred, td, NULL)) != 0)
1249 		return (error);
1250 
1251 	uio.uio_rw = UIO_READ;
1252 	uio.uio_segflg = UIO_SYSSPACE;
1253 	uio.uio_td = td;
1254 	uio.uio_offset = 0;
1255 
1256 #ifdef MAC
1257 	error = mac_vnode_check_readdir(td->td_ucred, lvp);
1258 #endif
1259 	while (!error && !eofflag) {
1260 		iov.iov_base = buf;
1261 		iov.iov_len = sizeof(buf);
1262 		uio.uio_iov = &iov;
1263 		uio.uio_iovcnt = 1;
1264 		uio.uio_resid = iov.iov_len;
1265 
1266 		error = VOP_READDIR(lvp, &uio, cred, &eofflag, NULL, NULL);
1267 		if (error != 0)
1268 			break;
1269 		KASSERT(eofflag != 0 || uio.uio_resid < sizeof(buf),
1270 		    ("%s: empty read from lower FS", __func__));
1271 
1272 		edp = (struct dirent*)&buf[sizeof(buf) - uio.uio_resid];
1273 		for (dp = (struct dirent*)buf; !error && dp < edp;
1274 		     dp = (struct dirent*)((caddr_t)dp + dp->d_reclen)) {
1275 			if (dp->d_type == DT_WHT || dp->d_fileno == 0 ||
1276 			    (dp->d_namlen == 1 && dp->d_name[0] == '.') ||
1277 			    (dp->d_namlen == 2 && !bcmp(dp->d_name, "..", 2)))
1278 				continue;
1279 
1280 			cn.cn_namelen = dp->d_namlen;
1281 			cn.cn_pnbuf = NULL;
1282 			cn.cn_nameptr = dp->d_name;
1283 			cn.cn_nameiop = LOOKUP;
1284 			cn.cn_flags = LOCKPARENT | LOCKLEAF | RDONLY | ISLASTCN;
1285 			cn.cn_lkflags = LK_EXCLUSIVE;
1286 			cn.cn_cred = cred;
1287 
1288 			/*
1289 			 * check entry in lower.
1290 			 * Sometimes, readdir function returns
1291 			 * wrong entry.
1292 			 */
1293 			lookuperr = VOP_LOOKUP(lvp, &tvp, &cn);
1294 
1295 			if (!lookuperr)
1296 				vput(tvp);
1297 			else
1298 				continue; /* skip entry */
1299 
1300 			/*
1301 			 * check entry
1302 			 * If it has no exist/whiteout entry in upper,
1303 			 * directory is not empty.
1304 			 */
1305 			cn.cn_flags = LOCKPARENT | LOCKLEAF | RDONLY | ISLASTCN;
1306 			lookuperr = VOP_LOOKUP(uvp, &tvp, &cn);
1307 
1308 			if (!lookuperr)
1309 				vput(tvp);
1310 
1311 			/* ignore exist or whiteout entry */
1312 			if (!lookuperr ||
1313 			    (lookuperr == ENOENT && (cn.cn_flags & ISWHITEOUT)))
1314 				continue;
1315 
1316 			error = ENOTEMPTY;
1317 		}
1318 	}
1319 
1320 	/* close vnode */
1321 	VOP_CLOSE(vp, FREAD, cred, td);
1322 
1323 	return (error);
1324 }
1325 
1326