xref: /freebsd/sys/fs/unionfs/union_subr.c (revision 09d986419d8834aa4fdb998695a8b7cce7e8e52a)
1 /*-
2  * Copyright (c) 1994 Jan-Simon Pendry
3  * Copyright (c) 1994
4  *	The Regents of the University of California.  All rights reserved.
5  * Copyright (c) 2005, 2006, 2012 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc.
6  * Copyright (c) 2006, 2012 Daichi Goto <daichi@freebsd.org>
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Jan-Simon Pendry.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)union_subr.c	8.20 (Berkeley) 5/20/95
36  * $FreeBSD$
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/namei.h>
47 #include <sys/proc.h>
48 #include <sys/vnode.h>
49 #include <sys/dirent.h>
50 #include <sys/fcntl.h>
51 #include <sys/filedesc.h>
52 #include <sys/stat.h>
53 #include <sys/resourcevar.h>
54 
55 #include <security/mac/mac_framework.h>
56 
57 #include <vm/uma.h>
58 
59 #include <fs/unionfs/union.h>
60 
61 #define NUNIONFSNODECACHE 16
62 
63 static MALLOC_DEFINE(M_UNIONFSHASH, "UNIONFS hash", "UNIONFS hash table");
64 MALLOC_DEFINE(M_UNIONFSNODE, "UNIONFS node", "UNIONFS vnode private part");
65 MALLOC_DEFINE(M_UNIONFSPATH, "UNIONFS path", "UNIONFS path private part");
66 
67 /*
68  * Initialize
69  */
70 int
71 unionfs_init(struct vfsconf *vfsp)
72 {
73 	UNIONFSDEBUG("unionfs_init\n");	/* printed during system boot */
74 	return (0);
75 }
76 
77 /*
78  * Uninitialize
79  */
80 int
81 unionfs_uninit(struct vfsconf *vfsp)
82 {
83 	return (0);
84 }
85 
86 static struct unionfs_node_hashhead *
87 unionfs_get_hashhead(struct vnode *dvp, char *path)
88 {
89 	int		count;
90 	char		hash;
91 	struct unionfs_node *unp;
92 
93 	hash = 0;
94 	unp = VTOUNIONFS(dvp);
95 	if (path != NULL) {
96 		for (count = 0; path[count]; count++)
97 			hash += path[count];
98 	}
99 
100 	return (&(unp->un_hashtbl[hash & (unp->un_hashmask)]));
101 }
102 
103 /*
104  * Get the cached vnode.
105  */
106 static struct vnode *
107 unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp,
108 			struct vnode *dvp, char *path)
109 {
110 	struct unionfs_node_hashhead *hd;
111 	struct unionfs_node *unp;
112 	struct vnode   *vp;
113 
114 	KASSERT((uvp == NULLVP || uvp->v_type == VDIR),
115 	    ("unionfs_get_cached_vnode: v_type != VDIR"));
116 	KASSERT((lvp == NULLVP || lvp->v_type == VDIR),
117 	    ("unionfs_get_cached_vnode: v_type != VDIR"));
118 
119 	VI_LOCK(dvp);
120 	hd = unionfs_get_hashhead(dvp, path);
121 	LIST_FOREACH(unp, hd, un_hash) {
122 		if (!strcmp(unp->un_path, path)) {
123 			vp = UNIONFSTOV(unp);
124 			VI_LOCK_FLAGS(vp, MTX_DUPOK);
125 			VI_UNLOCK(dvp);
126 			vp->v_iflag &= ~VI_OWEINACT;
127 			if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) {
128 				VI_UNLOCK(vp);
129 				vp = NULLVP;
130 			} else
131 				VI_UNLOCK(vp);
132 			return (vp);
133 		}
134 	}
135 	VI_UNLOCK(dvp);
136 
137 	return (NULLVP);
138 }
139 
140 /*
141  * Add the new vnode into cache.
142  */
143 static struct vnode *
144 unionfs_ins_cached_vnode(struct unionfs_node *uncp,
145 			struct vnode *dvp, char *path)
146 {
147 	struct unionfs_node_hashhead *hd;
148 	struct unionfs_node *unp;
149 	struct vnode   *vp;
150 
151 	KASSERT((uncp->un_uppervp==NULLVP || uncp->un_uppervp->v_type==VDIR),
152 	    ("unionfs_ins_cached_vnode: v_type != VDIR"));
153 	KASSERT((uncp->un_lowervp==NULLVP || uncp->un_lowervp->v_type==VDIR),
154 	    ("unionfs_ins_cached_vnode: v_type != VDIR"));
155 
156 	VI_LOCK(dvp);
157 	hd = unionfs_get_hashhead(dvp, path);
158 	LIST_FOREACH(unp, hd, un_hash) {
159 		if (!strcmp(unp->un_path, path)) {
160 			vp = UNIONFSTOV(unp);
161 			VI_LOCK_FLAGS(vp, MTX_DUPOK);
162 			vp->v_iflag &= ~VI_OWEINACT;
163 			if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) {
164 				LIST_INSERT_HEAD(hd, uncp, un_hash);
165 				VI_UNLOCK(vp);
166 				vp = NULLVP;
167 			} else
168 				VI_UNLOCK(vp);
169 			VI_UNLOCK(dvp);
170 			return (vp);
171 		}
172 	}
173 
174 	LIST_INSERT_HEAD(hd, uncp, un_hash);
175 	VI_UNLOCK(dvp);
176 
177 	return (NULLVP);
178 }
179 
180 /*
181  * Remove the vnode.
182  */
183 static void
184 unionfs_rem_cached_vnode(struct unionfs_node *unp, struct vnode *dvp)
185 {
186 	KASSERT((unp != NULL), ("unionfs_rem_cached_vnode: null node"));
187 	KASSERT((dvp != NULLVP),
188 	    ("unionfs_rem_cached_vnode: null parent vnode"));
189 	KASSERT((unp->un_hash.le_prev != NULL),
190 	    ("unionfs_rem_cached_vnode: null hash"));
191 
192 	VI_LOCK(dvp);
193 	LIST_REMOVE(unp, un_hash);
194 	unp->un_hash.le_next = NULL;
195 	unp->un_hash.le_prev = NULL;
196 	VI_UNLOCK(dvp);
197 }
198 
199 /*
200  * Make a new or get existing unionfs node.
201  *
202  * uppervp and lowervp should be unlocked. Because if new unionfs vnode is
203  * locked, uppervp or lowervp is locked too. In order to prevent dead lock,
204  * you should not lock plurality simultaneously.
205  */
206 int
207 unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
208 		struct vnode *lowervp, struct vnode *dvp,
209 		struct vnode **vpp, struct componentname *cnp,
210 		struct thread *td)
211 {
212 	struct unionfs_mount *ump;
213 	struct unionfs_node *unp;
214 	struct vnode   *vp;
215 	int		error;
216 	int		lkflags;
217 	enum vtype	vt;
218 	char	       *path;
219 
220 	ump = MOUNTTOUNIONFSMOUNT(mp);
221 	lkflags = (cnp ? cnp->cn_lkflags : 0);
222 	path = (cnp ? cnp->cn_nameptr : NULL);
223 	*vpp = NULLVP;
224 
225 	if (uppervp == NULLVP && lowervp == NULLVP)
226 		panic("unionfs_nodeget: upper and lower is null");
227 
228 	vt = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type);
229 
230 	/* If it has no ISLASTCN flag, path check is skipped. */
231 	if (cnp && !(cnp->cn_flags & ISLASTCN))
232 		path = NULL;
233 
234 	/* check the cache */
235 	if (path != NULL && dvp != NULLVP && vt == VDIR) {
236 		vp = unionfs_get_cached_vnode(uppervp, lowervp, dvp, path);
237 		if (vp != NULLVP) {
238 			vref(vp);
239 			*vpp = vp;
240 			goto unionfs_nodeget_out;
241 		}
242 	}
243 
244 	if ((uppervp == NULLVP || ump->um_uppervp != uppervp) ||
245 	    (lowervp == NULLVP || ump->um_lowervp != lowervp)) {
246 		/* dvp will be NULLVP only in case of root vnode. */
247 		if (dvp == NULLVP)
248 			return (EINVAL);
249 	}
250 	unp = malloc(sizeof(struct unionfs_node),
251 	    M_UNIONFSNODE, M_WAITOK | M_ZERO);
252 
253 	error = getnewvnode("unionfs", mp, &unionfs_vnodeops, &vp);
254 	if (error != 0) {
255 		free(unp, M_UNIONFSNODE);
256 		return (error);
257 	}
258 	error = insmntque(vp, mp);	/* XXX: Too early for mpsafe fs */
259 	if (error != 0) {
260 		free(unp, M_UNIONFSNODE);
261 		return (error);
262 	}
263 	if (dvp != NULLVP)
264 		vref(dvp);
265 	if (uppervp != NULLVP)
266 		vref(uppervp);
267 	if (lowervp != NULLVP)
268 		vref(lowervp);
269 
270 	if (vt == VDIR)
271 		unp->un_hashtbl = hashinit(NUNIONFSNODECACHE, M_UNIONFSHASH,
272 		    &(unp->un_hashmask));
273 
274 	unp->un_vnode = vp;
275 	unp->un_uppervp = uppervp;
276 	unp->un_lowervp = lowervp;
277 	unp->un_dvp = dvp;
278 	if (uppervp != NULLVP)
279 		vp->v_vnlock = uppervp->v_vnlock;
280 	else
281 		vp->v_vnlock = lowervp->v_vnlock;
282 
283 	if (path != NULL) {
284 		unp->un_path = (char *)
285 		    malloc(cnp->cn_namelen +1, M_UNIONFSPATH, M_WAITOK|M_ZERO);
286 		bcopy(cnp->cn_nameptr, unp->un_path, cnp->cn_namelen);
287 		unp->un_path[cnp->cn_namelen] = '\0';
288 	}
289 	vp->v_type = vt;
290 	vp->v_data = unp;
291 
292 	if ((uppervp != NULLVP && ump->um_uppervp == uppervp) &&
293 	    (lowervp != NULLVP && ump->um_lowervp == lowervp))
294 		vp->v_vflag |= VV_ROOT;
295 
296 	if (path != NULL && dvp != NULLVP && vt == VDIR)
297 		*vpp = unionfs_ins_cached_vnode(unp, dvp, path);
298 	if ((*vpp) != NULLVP) {
299 		if (dvp != NULLVP)
300 			vrele(dvp);
301 		if (uppervp != NULLVP)
302 			vrele(uppervp);
303 		if (lowervp != NULLVP)
304 			vrele(lowervp);
305 
306 		unp->un_uppervp = NULLVP;
307 		unp->un_lowervp = NULLVP;
308 		unp->un_dvp = NULLVP;
309 		vrele(vp);
310 		vp = *vpp;
311 		vref(vp);
312 	} else
313 		*vpp = vp;
314 
315 unionfs_nodeget_out:
316 	if (lkflags & LK_TYPE_MASK)
317 		vn_lock(vp, lkflags | LK_RETRY);
318 
319 	return (0);
320 }
321 
322 /*
323  * Clean up the unionfs node.
324  */
325 void
326 unionfs_noderem(struct vnode *vp, struct thread *td)
327 {
328 	int		count;
329 	struct unionfs_node *unp, *unp_t1, *unp_t2;
330 	struct unionfs_node_hashhead *hd;
331 	struct unionfs_node_status *unsp, *unsp_tmp;
332 	struct vnode   *lvp;
333 	struct vnode   *uvp;
334 	struct vnode   *dvp;
335 
336 	/*
337 	 * Use the interlock to protect the clearing of v_data to
338 	 * prevent faults in unionfs_lock().
339 	 */
340 	VI_LOCK(vp);
341 	unp = VTOUNIONFS(vp);
342 	lvp = unp->un_lowervp;
343 	uvp = unp->un_uppervp;
344 	dvp = unp->un_dvp;
345 	unp->un_lowervp = unp->un_uppervp = NULLVP;
346 	vp->v_vnlock = &(vp->v_lock);
347 	vp->v_data = NULL;
348 	vp->v_object = NULL;
349 	VI_UNLOCK(vp);
350 
351 	if (lvp != NULLVP)
352 		VOP_UNLOCK(lvp, LK_RELEASE);
353 	if (uvp != NULLVP)
354 		VOP_UNLOCK(uvp, LK_RELEASE);
355 
356 	if (dvp != NULLVP && unp->un_hash.le_prev != NULL)
357 		unionfs_rem_cached_vnode(unp, dvp);
358 
359 	if (lockmgr(vp->v_vnlock, LK_EXCLUSIVE, VI_MTX(vp)) != 0)
360 		panic("the lock for deletion is unacquirable.");
361 
362 	if (lvp != NULLVP)
363 		vrele(lvp);
364 	if (uvp != NULLVP)
365 		vrele(uvp);
366 	if (dvp != NULLVP) {
367 		vrele(dvp);
368 		unp->un_dvp = NULLVP;
369 	}
370 	if (unp->un_path != NULL) {
371 		free(unp->un_path, M_UNIONFSPATH);
372 		unp->un_path = NULL;
373 	}
374 
375 	if (unp->un_hashtbl != NULL) {
376 		for (count = 0; count <= unp->un_hashmask; count++) {
377 			hd = unp->un_hashtbl + count;
378 			LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
379 				LIST_REMOVE(unp_t1, un_hash);
380 				unp_t1->un_hash.le_next = NULL;
381 				unp_t1->un_hash.le_prev = NULL;
382 			}
383 		}
384 		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
385 	}
386 
387 	LIST_FOREACH_SAFE(unsp, &(unp->un_unshead), uns_list, unsp_tmp) {
388 		LIST_REMOVE(unsp, uns_list);
389 		free(unsp, M_TEMP);
390 	}
391 	free(unp, M_UNIONFSNODE);
392 }
393 
394 /*
395  * Get the unionfs node status.
396  * You need exclusive lock this vnode.
397  */
398 void
399 unionfs_get_node_status(struct unionfs_node *unp, struct thread *td,
400 			struct unionfs_node_status **unspp)
401 {
402 	struct unionfs_node_status *unsp;
403 	pid_t pid = td->td_proc->p_pid;
404 
405 	KASSERT(NULL != unspp, ("null pointer"));
406 	ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), "unionfs_get_node_status");
407 
408 	LIST_FOREACH(unsp, &(unp->un_unshead), uns_list) {
409 		if (unsp->uns_pid == pid) {
410 			*unspp = unsp;
411 			return;
412 		}
413 	}
414 
415 	/* create a new unionfs node status */
416 	unsp = malloc(sizeof(struct unionfs_node_status),
417 	    M_TEMP, M_WAITOK | M_ZERO);
418 
419 	unsp->uns_pid = pid;
420 	LIST_INSERT_HEAD(&(unp->un_unshead), unsp, uns_list);
421 
422 	*unspp = unsp;
423 }
424 
425 /*
426  * Remove the unionfs node status, if you can.
427  * You need exclusive lock this vnode.
428  */
429 void
430 unionfs_tryrem_node_status(struct unionfs_node *unp,
431 			   struct unionfs_node_status *unsp)
432 {
433 	KASSERT(NULL != unsp, ("null pointer"));
434 	ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), "unionfs_get_node_status");
435 
436 	if (0 < unsp->uns_lower_opencnt || 0 < unsp->uns_upper_opencnt)
437 		return;
438 
439 	LIST_REMOVE(unsp, uns_list);
440 	free(unsp, M_TEMP);
441 }
442 
443 /*
444  * Create upper node attr.
445  */
446 void
447 unionfs_create_uppervattr_core(struct unionfs_mount *ump,
448 			       struct vattr *lva,
449 			       struct vattr *uva,
450 			       struct thread *td)
451 {
452 	VATTR_NULL(uva);
453 	uva->va_type = lva->va_type;
454 	uva->va_atime = lva->va_atime;
455 	uva->va_mtime = lva->va_mtime;
456 	uva->va_ctime = lva->va_ctime;
457 
458 	switch (ump->um_copymode) {
459 	case UNIONFS_TRANSPARENT:
460 		uva->va_mode = lva->va_mode;
461 		uva->va_uid = lva->va_uid;
462 		uva->va_gid = lva->va_gid;
463 		break;
464 	case UNIONFS_MASQUERADE:
465 		if (ump->um_uid == lva->va_uid) {
466 			uva->va_mode = lva->va_mode & 077077;
467 			uva->va_mode |= (lva->va_type == VDIR ? ump->um_udir : ump->um_ufile) & 0700;
468 			uva->va_uid = lva->va_uid;
469 			uva->va_gid = lva->va_gid;
470 		} else {
471 			uva->va_mode = (lva->va_type == VDIR ? ump->um_udir : ump->um_ufile);
472 			uva->va_uid = ump->um_uid;
473 			uva->va_gid = ump->um_gid;
474 		}
475 		break;
476 	default:		/* UNIONFS_TRADITIONAL */
477 		uva->va_mode = 0777 & ~td->td_proc->p_fd->fd_cmask;
478 		uva->va_uid = ump->um_uid;
479 		uva->va_gid = ump->um_gid;
480 		break;
481 	}
482 }
483 
484 /*
485  * Create upper node attr.
486  */
487 int
488 unionfs_create_uppervattr(struct unionfs_mount *ump,
489 			  struct vnode *lvp,
490 			  struct vattr *uva,
491 			  struct ucred *cred,
492 			  struct thread *td)
493 {
494 	int		error;
495 	struct vattr	lva;
496 
497 	if ((error = VOP_GETATTR(lvp, &lva, cred)))
498 		return (error);
499 
500 	unionfs_create_uppervattr_core(ump, &lva, uva, td);
501 
502 	return (error);
503 }
504 
505 /*
506  * relookup
507  *
508  * dvp should be locked on entry and will be locked on return.
509  *
510  * If an error is returned, *vpp will be invalid, otherwise it will hold a
511  * locked, referenced vnode. If *vpp == dvp then remember that only one
512  * LK_EXCLUSIVE lock is held.
513  */
514 int
515 unionfs_relookup(struct vnode *dvp, struct vnode **vpp,
516 		 struct componentname *cnp, struct componentname *cn,
517 		 struct thread *td, char *path, int pathlen, u_long nameiop)
518 {
519 	int	error;
520 
521 	cn->cn_namelen = pathlen;
522 	cn->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK);
523 	bcopy(path, cn->cn_pnbuf, pathlen);
524 	cn->cn_pnbuf[pathlen] = '\0';
525 
526 	cn->cn_nameiop = nameiop;
527 	cn->cn_flags = (LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME | ISLASTCN);
528 	cn->cn_lkflags = LK_EXCLUSIVE;
529 	cn->cn_thread = td;
530 	cn->cn_cred = cnp->cn_cred;
531 
532 	cn->cn_nameptr = cn->cn_pnbuf;
533 	cn->cn_consume = cnp->cn_consume;
534 
535 	if (nameiop == DELETE)
536 		cn->cn_flags |= (cnp->cn_flags & (DOWHITEOUT | SAVESTART));
537 	else if (RENAME == nameiop)
538 		cn->cn_flags |= (cnp->cn_flags & SAVESTART);
539 	else if (nameiop == CREATE)
540 		cn->cn_flags |= NOCACHE;
541 
542 	vref(dvp);
543 	VOP_UNLOCK(dvp, LK_RELEASE);
544 
545 	if ((error = relookup(dvp, vpp, cn))) {
546 		uma_zfree(namei_zone, cn->cn_pnbuf);
547 		cn->cn_flags &= ~HASBUF;
548 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
549 	} else
550 		vrele(dvp);
551 
552 	return (error);
553 }
554 
555 /*
556  * relookup for CREATE namei operation.
557  *
558  * dvp is unionfs vnode. dvp should be locked.
559  *
560  * If it called 'unionfs_copyfile' function by unionfs_link etc,
561  * VOP_LOOKUP information is broken.
562  * So it need relookup in order to create link etc.
563  */
564 int
565 unionfs_relookup_for_create(struct vnode *dvp, struct componentname *cnp,
566 			    struct thread *td)
567 {
568 	int	error;
569 	struct vnode *udvp;
570 	struct vnode *vp;
571 	struct componentname cn;
572 
573 	udvp = UNIONFSVPTOUPPERVP(dvp);
574 	vp = NULLVP;
575 
576 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
577 	    strlen(cnp->cn_nameptr), CREATE);
578 	if (error)
579 		return (error);
580 
581 	if (vp != NULLVP) {
582 		if (udvp == vp)
583 			vrele(vp);
584 		else
585 			vput(vp);
586 
587 		error = EEXIST;
588 	}
589 
590 	if (cn.cn_flags & HASBUF) {
591 		uma_zfree(namei_zone, cn.cn_pnbuf);
592 		cn.cn_flags &= ~HASBUF;
593 	}
594 
595 	if (!error) {
596 		cn.cn_flags |= (cnp->cn_flags & HASBUF);
597 		cnp->cn_flags = cn.cn_flags;
598 	}
599 
600 	return (error);
601 }
602 
603 /*
604  * relookup for DELETE namei operation.
605  *
606  * dvp is unionfs vnode. dvp should be locked.
607  */
608 int
609 unionfs_relookup_for_delete(struct vnode *dvp, struct componentname *cnp,
610 			    struct thread *td)
611 {
612 	int	error;
613 	struct vnode *udvp;
614 	struct vnode *vp;
615 	struct componentname cn;
616 
617 	udvp = UNIONFSVPTOUPPERVP(dvp);
618 	vp = NULLVP;
619 
620 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
621 	    strlen(cnp->cn_nameptr), DELETE);
622 	if (error)
623 		return (error);
624 
625 	if (vp == NULLVP)
626 		error = ENOENT;
627 	else {
628 		if (udvp == vp)
629 			vrele(vp);
630 		else
631 			vput(vp);
632 	}
633 
634 	if (cn.cn_flags & HASBUF) {
635 		uma_zfree(namei_zone, cn.cn_pnbuf);
636 		cn.cn_flags &= ~HASBUF;
637 	}
638 
639 	if (!error) {
640 		cn.cn_flags |= (cnp->cn_flags & HASBUF);
641 		cnp->cn_flags = cn.cn_flags;
642 	}
643 
644 	return (error);
645 }
646 
647 /*
648  * relookup for RENAME namei operation.
649  *
650  * dvp is unionfs vnode. dvp should be locked.
651  */
652 int
653 unionfs_relookup_for_rename(struct vnode *dvp, struct componentname *cnp,
654 			    struct thread *td)
655 {
656 	int error;
657 	struct vnode *udvp;
658 	struct vnode *vp;
659 	struct componentname cn;
660 
661 	udvp = UNIONFSVPTOUPPERVP(dvp);
662 	vp = NULLVP;
663 
664 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
665 	    strlen(cnp->cn_nameptr), RENAME);
666 	if (error)
667 		return (error);
668 
669 	if (vp != NULLVP) {
670 		if (udvp == vp)
671 			vrele(vp);
672 		else
673 			vput(vp);
674 	}
675 
676 	if (cn.cn_flags & HASBUF) {
677 		uma_zfree(namei_zone, cn.cn_pnbuf);
678 		cn.cn_flags &= ~HASBUF;
679 	}
680 
681 	if (!error) {
682 		cn.cn_flags |= (cnp->cn_flags & HASBUF);
683 		cnp->cn_flags = cn.cn_flags;
684 	}
685 
686 	return (error);
687 
688 }
689 
690 /*
691  * Update the unionfs_node.
692  *
693  * uvp is new locked upper vnode. unionfs vnode's lock will be exchanged to the
694  * uvp's lock and lower's lock will be unlocked.
695  */
696 static void
697 unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
698 		    struct thread *td)
699 {
700 	unsigned	count, lockrec;
701 	struct vnode   *vp;
702 	struct vnode   *lvp;
703 	struct vnode   *dvp;
704 
705 	vp = UNIONFSTOV(unp);
706 	lvp = unp->un_lowervp;
707 	ASSERT_VOP_ELOCKED(lvp, "unionfs_node_update");
708 	dvp = unp->un_dvp;
709 
710 	/*
711 	 * lock update
712 	 */
713 	VI_LOCK(vp);
714 	unp->un_uppervp = uvp;
715 	vp->v_vnlock = uvp->v_vnlock;
716 	VI_UNLOCK(vp);
717 	lockrec = lvp->v_vnlock->lk_recurse;
718 	for (count = 0; count < lockrec; count++)
719 		vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
720 
721 	/*
722 	 * cache update
723 	 */
724 	if (unp->un_path != NULL && dvp != NULLVP && vp->v_type == VDIR) {
725 		static struct unionfs_node_hashhead *hd;
726 
727 		VI_LOCK(dvp);
728 		hd = unionfs_get_hashhead(dvp, unp->un_path);
729 		LIST_REMOVE(unp, un_hash);
730 		LIST_INSERT_HEAD(hd, unp, un_hash);
731 		VI_UNLOCK(dvp);
732 	}
733 }
734 
735 /*
736  * Create a new shadow dir.
737  *
738  * udvp should be locked on entry and will be locked on return.
739  *
740  * If no error returned, unp will be updated.
741  */
742 int
743 unionfs_mkshadowdir(struct unionfs_mount *ump, struct vnode *udvp,
744 		    struct unionfs_node *unp, struct componentname *cnp,
745 		    struct thread *td)
746 {
747 	int		error;
748 	struct vnode   *lvp;
749 	struct vnode   *uvp;
750 	struct vattr	va;
751 	struct vattr	lva;
752 	struct componentname cn;
753 	struct mount   *mp;
754 	struct ucred   *cred;
755 	struct ucred   *credbk;
756 	struct uidinfo *rootinfo;
757 
758 	if (unp->un_uppervp != NULLVP)
759 		return (EEXIST);
760 
761 	lvp = unp->un_lowervp;
762 	uvp = NULLVP;
763 	credbk = cnp->cn_cred;
764 
765 	/* Authority change to root */
766 	rootinfo = uifind((uid_t)0);
767 	cred = crdup(cnp->cn_cred);
768 	/*
769 	 * The calls to chgproccnt() are needed to compensate for change_ruid()
770 	 * calling chgproccnt().
771 	 */
772 	chgproccnt(cred->cr_ruidinfo, 1, 0);
773 	change_euid(cred, rootinfo);
774 	change_ruid(cred, rootinfo);
775 	change_svuid(cred, (uid_t)0);
776 	uifree(rootinfo);
777 	cnp->cn_cred = cred;
778 
779 	memset(&cn, 0, sizeof(cn));
780 
781 	if ((error = VOP_GETATTR(lvp, &lva, cnp->cn_cred)))
782 		goto unionfs_mkshadowdir_abort;
783 
784 	if ((error = unionfs_relookup(udvp, &uvp, cnp, &cn, td, cnp->cn_nameptr, cnp->cn_namelen, CREATE)))
785 		goto unionfs_mkshadowdir_abort;
786 	if (uvp != NULLVP) {
787 		if (udvp == uvp)
788 			vrele(uvp);
789 		else
790 			vput(uvp);
791 
792 		error = EEXIST;
793 		goto unionfs_mkshadowdir_free_out;
794 	}
795 
796 	if ((error = vn_start_write(udvp, &mp, V_WAIT | PCATCH)))
797 		goto unionfs_mkshadowdir_free_out;
798 	unionfs_create_uppervattr_core(ump, &lva, &va, td);
799 
800 	error = VOP_MKDIR(udvp, &uvp, &cn, &va);
801 
802 	if (!error) {
803 		unionfs_node_update(unp, uvp, td);
804 
805 		/*
806 		 * XXX The bug which cannot set uid/gid was corrected.
807 		 * Ignore errors.
808 		 */
809 		va.va_type = VNON;
810 		VOP_SETATTR(uvp, &va, cn.cn_cred);
811 	}
812 	vn_finished_write(mp);
813 
814 unionfs_mkshadowdir_free_out:
815 	if (cn.cn_flags & HASBUF) {
816 		uma_zfree(namei_zone, cn.cn_pnbuf);
817 		cn.cn_flags &= ~HASBUF;
818 	}
819 
820 unionfs_mkshadowdir_abort:
821 	cnp->cn_cred = credbk;
822 	chgproccnt(cred->cr_ruidinfo, -1, 0);
823 	crfree(cred);
824 
825 	return (error);
826 }
827 
828 /*
829  * Create a new whiteout.
830  *
831  * dvp should be locked on entry and will be locked on return.
832  */
833 int
834 unionfs_mkwhiteout(struct vnode *dvp, struct componentname *cnp,
835 		   struct thread *td, char *path)
836 {
837 	int		error;
838 	struct vnode   *wvp;
839 	struct componentname cn;
840 	struct mount   *mp;
841 
842 	if (path == NULL)
843 		path = cnp->cn_nameptr;
844 
845 	wvp = NULLVP;
846 	if ((error = unionfs_relookup(dvp, &wvp, cnp, &cn, td, path, strlen(path), CREATE)))
847 		return (error);
848 	if (wvp != NULLVP) {
849 		if (cn.cn_flags & HASBUF) {
850 			uma_zfree(namei_zone, cn.cn_pnbuf);
851 			cn.cn_flags &= ~HASBUF;
852 		}
853 		if (dvp == wvp)
854 			vrele(wvp);
855 		else
856 			vput(wvp);
857 
858 		return (EEXIST);
859 	}
860 
861 	if ((error = vn_start_write(dvp, &mp, V_WAIT | PCATCH)))
862 		goto unionfs_mkwhiteout_free_out;
863 	error = VOP_WHITEOUT(dvp, &cn, CREATE);
864 
865 	vn_finished_write(mp);
866 
867 unionfs_mkwhiteout_free_out:
868 	if (cn.cn_flags & HASBUF) {
869 		uma_zfree(namei_zone, cn.cn_pnbuf);
870 		cn.cn_flags &= ~HASBUF;
871 	}
872 
873 	return (error);
874 }
875 
876 /*
877  * Create a new vnode for create a new shadow file.
878  *
879  * If an error is returned, *vpp will be invalid, otherwise it will hold a
880  * locked, referenced and opened vnode.
881  *
882  * unp is never updated.
883  */
884 static int
885 unionfs_vn_create_on_upper(struct vnode **vpp, struct vnode *udvp,
886 			   struct unionfs_node *unp, struct vattr *uvap,
887 			   struct thread *td)
888 {
889 	struct unionfs_mount *ump;
890 	struct vnode   *vp;
891 	struct vnode   *lvp;
892 	struct ucred   *cred;
893 	struct vattr	lva;
894 	int		fmode;
895 	int		error;
896 	struct componentname cn;
897 
898 	ump = MOUNTTOUNIONFSMOUNT(UNIONFSTOV(unp)->v_mount);
899 	vp = NULLVP;
900 	lvp = unp->un_lowervp;
901 	cred = td->td_ucred;
902 	fmode = FFLAGS(O_WRONLY | O_CREAT | O_TRUNC | O_EXCL);
903 	error = 0;
904 
905 	if ((error = VOP_GETATTR(lvp, &lva, cred)) != 0)
906 		return (error);
907 	unionfs_create_uppervattr_core(ump, &lva, uvap, td);
908 
909 	if (unp->un_path == NULL)
910 		panic("unionfs: un_path is null");
911 
912 	cn.cn_namelen = strlen(unp->un_path);
913 	cn.cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK);
914 	bcopy(unp->un_path, cn.cn_pnbuf, cn.cn_namelen + 1);
915 	cn.cn_nameiop = CREATE;
916 	cn.cn_flags = (LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME | ISLASTCN);
917 	cn.cn_lkflags = LK_EXCLUSIVE;
918 	cn.cn_thread = td;
919 	cn.cn_cred = cred;
920 	cn.cn_nameptr = cn.cn_pnbuf;
921 	cn.cn_consume = 0;
922 
923 	vref(udvp);
924 	if ((error = relookup(udvp, &vp, &cn)) != 0)
925 		goto unionfs_vn_create_on_upper_free_out2;
926 	vrele(udvp);
927 
928 	if (vp != NULLVP) {
929 		if (vp == udvp)
930 			vrele(vp);
931 		else
932 			vput(vp);
933 		error = EEXIST;
934 		goto unionfs_vn_create_on_upper_free_out1;
935 	}
936 
937 	if ((error = VOP_CREATE(udvp, &vp, &cn, uvap)) != 0)
938 		goto unionfs_vn_create_on_upper_free_out1;
939 
940 	if ((error = VOP_OPEN(vp, fmode, cred, td, NULL)) != 0) {
941 		vput(vp);
942 		goto unionfs_vn_create_on_upper_free_out1;
943 	}
944 	VOP_ADD_WRITECOUNT(vp, 1);
945 	CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",  __func__, vp,
946 	    vp->v_writecount);
947 	*vpp = vp;
948 
949 unionfs_vn_create_on_upper_free_out1:
950 	VOP_UNLOCK(udvp, LK_RELEASE);
951 
952 unionfs_vn_create_on_upper_free_out2:
953 	if (cn.cn_flags & HASBUF) {
954 		uma_zfree(namei_zone, cn.cn_pnbuf);
955 		cn.cn_flags &= ~HASBUF;
956 	}
957 
958 	return (error);
959 }
960 
961 /*
962  * Copy from lvp to uvp.
963  *
964  * lvp and uvp should be locked and opened on entry and will be locked and
965  * opened on return.
966  */
967 static int
968 unionfs_copyfile_core(struct vnode *lvp, struct vnode *uvp,
969 		      struct ucred *cred, struct thread *td)
970 {
971 	int		error;
972 	off_t		offset;
973 	int		count;
974 	int		bufoffset;
975 	char           *buf;
976 	struct uio	uio;
977 	struct iovec	iov;
978 
979 	error = 0;
980 	memset(&uio, 0, sizeof(uio));
981 
982 	uio.uio_td = td;
983 	uio.uio_segflg = UIO_SYSSPACE;
984 	uio.uio_offset = 0;
985 
986 	buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
987 
988 	while (error == 0) {
989 		offset = uio.uio_offset;
990 
991 		uio.uio_iov = &iov;
992 		uio.uio_iovcnt = 1;
993 		iov.iov_base = buf;
994 		iov.iov_len = MAXBSIZE;
995 		uio.uio_resid = iov.iov_len;
996 		uio.uio_rw = UIO_READ;
997 
998 		if ((error = VOP_READ(lvp, &uio, 0, cred)) != 0)
999 			break;
1000 		if ((count = MAXBSIZE - uio.uio_resid) == 0)
1001 			break;
1002 
1003 		bufoffset = 0;
1004 		while (bufoffset < count) {
1005 			uio.uio_iov = &iov;
1006 			uio.uio_iovcnt = 1;
1007 			iov.iov_base = buf + bufoffset;
1008 			iov.iov_len = count - bufoffset;
1009 			uio.uio_offset = offset + bufoffset;
1010 			uio.uio_resid = iov.iov_len;
1011 			uio.uio_rw = UIO_WRITE;
1012 
1013 			if ((error = VOP_WRITE(uvp, &uio, 0, cred)) != 0)
1014 				break;
1015 
1016 			bufoffset += (count - bufoffset) - uio.uio_resid;
1017 		}
1018 
1019 		uio.uio_offset = offset + bufoffset;
1020 	}
1021 
1022 	free(buf, M_TEMP);
1023 
1024 	return (error);
1025 }
1026 
1027 /*
1028  * Copy file from lower to upper.
1029  *
1030  * If you need copy of the contents, set 1 to docopy. Otherwise, set 0 to
1031  * docopy.
1032  *
1033  * If no error returned, unp will be updated.
1034  */
1035 int
1036 unionfs_copyfile(struct unionfs_node *unp, int docopy, struct ucred *cred,
1037 		 struct thread *td)
1038 {
1039 	int		error;
1040 	struct mount   *mp;
1041 	struct vnode   *udvp;
1042 	struct vnode   *lvp;
1043 	struct vnode   *uvp;
1044 	struct vattr	uva;
1045 
1046 	lvp = unp->un_lowervp;
1047 	uvp = NULLVP;
1048 
1049 	if ((UNIONFSTOV(unp)->v_mount->mnt_flag & MNT_RDONLY))
1050 		return (EROFS);
1051 	if (unp->un_dvp == NULLVP)
1052 		return (EINVAL);
1053 	if (unp->un_uppervp != NULLVP)
1054 		return (EEXIST);
1055 	udvp = VTOUNIONFS(unp->un_dvp)->un_uppervp;
1056 	if (udvp == NULLVP)
1057 		return (EROFS);
1058 	if ((udvp->v_mount->mnt_flag & MNT_RDONLY))
1059 		return (EROFS);
1060 
1061 	error = VOP_ACCESS(lvp, VREAD, cred, td);
1062 	if (error != 0)
1063 		return (error);
1064 
1065 	if ((error = vn_start_write(udvp, &mp, V_WAIT | PCATCH)) != 0)
1066 		return (error);
1067 	error = unionfs_vn_create_on_upper(&uvp, udvp, unp, &uva, td);
1068 	if (error != 0) {
1069 		vn_finished_write(mp);
1070 		return (error);
1071 	}
1072 
1073 	if (docopy != 0) {
1074 		error = VOP_OPEN(lvp, FREAD, cred, td, NULL);
1075 		if (error == 0) {
1076 			error = unionfs_copyfile_core(lvp, uvp, cred, td);
1077 			VOP_CLOSE(lvp, FREAD, cred, td);
1078 		}
1079 	}
1080 	VOP_CLOSE(uvp, FWRITE, cred, td);
1081 	VOP_ADD_WRITECOUNT(uvp, -1);
1082 	CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", __func__, uvp,
1083 	    uvp->v_writecount);
1084 
1085 	vn_finished_write(mp);
1086 
1087 	if (error == 0) {
1088 		/* Reset the attributes. Ignore errors. */
1089 		uva.va_type = VNON;
1090 		VOP_SETATTR(uvp, &uva, cred);
1091 	}
1092 
1093 	unionfs_node_update(unp, uvp, td);
1094 
1095 	return (error);
1096 }
1097 
1098 /*
1099  * It checks whether vp can rmdir. (check empty)
1100  *
1101  * vp is unionfs vnode.
1102  * vp should be locked.
1103  */
1104 int
1105 unionfs_check_rmdir(struct vnode *vp, struct ucred *cred, struct thread *td)
1106 {
1107 	int		error;
1108 	int		eofflag;
1109 	int		lookuperr;
1110 	struct vnode   *uvp;
1111 	struct vnode   *lvp;
1112 	struct vnode   *tvp;
1113 	struct vattr	va;
1114 	struct componentname cn;
1115 	/*
1116 	 * The size of buf needs to be larger than DIRBLKSIZ.
1117 	 */
1118 	char		buf[256 * 6];
1119 	struct dirent  *dp;
1120 	struct dirent  *edp;
1121 	struct uio	uio;
1122 	struct iovec	iov;
1123 
1124 	ASSERT_VOP_ELOCKED(vp, "unionfs_check_rmdir");
1125 
1126 	eofflag = 0;
1127 	uvp = UNIONFSVPTOUPPERVP(vp);
1128 	lvp = UNIONFSVPTOLOWERVP(vp);
1129 
1130 	/* check opaque */
1131 	if ((error = VOP_GETATTR(uvp, &va, cred)) != 0)
1132 		return (error);
1133 	if (va.va_flags & OPAQUE)
1134 		return (0);
1135 
1136 	/* open vnode */
1137 #ifdef MAC
1138 	if ((error = mac_vnode_check_open(cred, vp, VEXEC|VREAD)) != 0)
1139 		return (error);
1140 #endif
1141 	if ((error = VOP_ACCESS(vp, VEXEC|VREAD, cred, td)) != 0)
1142 		return (error);
1143 	if ((error = VOP_OPEN(vp, FREAD, cred, td, NULL)) != 0)
1144 		return (error);
1145 
1146 	uio.uio_rw = UIO_READ;
1147 	uio.uio_segflg = UIO_SYSSPACE;
1148 	uio.uio_td = td;
1149 	uio.uio_offset = 0;
1150 
1151 #ifdef MAC
1152 	error = mac_vnode_check_readdir(td->td_ucred, lvp);
1153 #endif
1154 	while (!error && !eofflag) {
1155 		iov.iov_base = buf;
1156 		iov.iov_len = sizeof(buf);
1157 		uio.uio_iov = &iov;
1158 		uio.uio_iovcnt = 1;
1159 		uio.uio_resid = iov.iov_len;
1160 
1161 		error = VOP_READDIR(lvp, &uio, cred, &eofflag, NULL, NULL);
1162 		if (error != 0)
1163 			break;
1164 		if (eofflag == 0 && uio.uio_resid == sizeof(buf)) {
1165 #ifdef DIAGNOSTIC
1166 			panic("bad readdir response from lower FS.");
1167 #endif
1168 			break;
1169 		}
1170 
1171 		edp = (struct dirent*)&buf[sizeof(buf) - uio.uio_resid];
1172 		for (dp = (struct dirent*)buf; !error && dp < edp;
1173 		     dp = (struct dirent*)((caddr_t)dp + dp->d_reclen)) {
1174 			if (dp->d_type == DT_WHT || dp->d_fileno == 0 ||
1175 			    (dp->d_namlen == 1 && dp->d_name[0] == '.') ||
1176 			    (dp->d_namlen == 2 && !bcmp(dp->d_name, "..", 2)))
1177 				continue;
1178 
1179 			cn.cn_namelen = dp->d_namlen;
1180 			cn.cn_pnbuf = NULL;
1181 			cn.cn_nameptr = dp->d_name;
1182 			cn.cn_nameiop = LOOKUP;
1183 			cn.cn_flags = (LOCKPARENT | LOCKLEAF | SAVENAME | RDONLY | ISLASTCN);
1184 			cn.cn_lkflags = LK_EXCLUSIVE;
1185 			cn.cn_thread = td;
1186 			cn.cn_cred = cred;
1187 			cn.cn_consume = 0;
1188 
1189 			/*
1190 			 * check entry in lower.
1191 			 * Sometimes, readdir function returns
1192 			 * wrong entry.
1193 			 */
1194 			lookuperr = VOP_LOOKUP(lvp, &tvp, &cn);
1195 
1196 			if (!lookuperr)
1197 				vput(tvp);
1198 			else
1199 				continue; /* skip entry */
1200 
1201 			/*
1202 			 * check entry
1203 			 * If it has no exist/whiteout entry in upper,
1204 			 * directory is not empty.
1205 			 */
1206 			cn.cn_flags = (LOCKPARENT | LOCKLEAF | SAVENAME | RDONLY | ISLASTCN);
1207 			lookuperr = VOP_LOOKUP(uvp, &tvp, &cn);
1208 
1209 			if (!lookuperr)
1210 				vput(tvp);
1211 
1212 			/* ignore exist or whiteout entry */
1213 			if (!lookuperr ||
1214 			    (lookuperr == ENOENT && (cn.cn_flags & ISWHITEOUT)))
1215 				continue;
1216 
1217 			error = ENOTEMPTY;
1218 		}
1219 	}
1220 
1221 	/* close vnode */
1222 	VOP_CLOSE(vp, FREAD, cred, td);
1223 
1224 	return (error);
1225 }
1226 
1227 #ifdef DIAGNOSTIC
1228 
1229 struct vnode   *
1230 unionfs_checkuppervp(struct vnode *vp, char *fil, int lno)
1231 {
1232 	struct unionfs_node *unp;
1233 
1234 	unp = VTOUNIONFS(vp);
1235 
1236 #ifdef notyet
1237 	if (vp->v_op != unionfs_vnodeop_p) {
1238 		printf("unionfs_checkuppervp: on non-unionfs-node.\n");
1239 #ifdef KDB
1240 		kdb_enter(KDB_WHY_UNIONFS,
1241 		    "unionfs_checkuppervp: on non-unionfs-node.\n");
1242 #endif
1243 		panic("unionfs_checkuppervp");
1244 	};
1245 #endif
1246 	return (unp->un_uppervp);
1247 }
1248 
1249 struct vnode   *
1250 unionfs_checklowervp(struct vnode *vp, char *fil, int lno)
1251 {
1252 	struct unionfs_node *unp;
1253 
1254 	unp = VTOUNIONFS(vp);
1255 
1256 #ifdef notyet
1257 	if (vp->v_op != unionfs_vnodeop_p) {
1258 		printf("unionfs_checklowervp: on non-unionfs-node.\n");
1259 #ifdef KDB
1260 		kdb_enter(KDB_WHY_UNIONFS,
1261 		    "unionfs_checklowervp: on non-unionfs-node.\n");
1262 #endif
1263 		panic("unionfs_checklowervp");
1264 	};
1265 #endif
1266 	return (unp->un_lowervp);
1267 }
1268 #endif
1269