xref: /freebsd/sys/fs/unionfs/union_vfsops.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 1994, 1995 The Regents of the University of California.
3  * Copyright (c) 1994, 1995 Jan-Simon Pendry.
4  * All rights reserved.
5  *
6  * This code is derived from software donated to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)union_vfsops.c	8.20 (Berkeley) 5/20/95
38  * $FreeBSD$
39  */
40 
41 /*
42  * Union Layer
43  */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/vnode.h>
52 #include <sys/mount.h>
53 #include <sys/namei.h>
54 #include <sys/malloc.h>
55 #include <sys/filedesc.h>
56 #include <fs/unionfs/union.h>
57 
58 static MALLOC_DEFINE(M_UNIONFSMNT, "UNION mount", "UNION mount structure");
59 
60 extern vfs_init_t       union_init;
61 static vfs_root_t       union_root;
62 static vfs_nmount_t	union_mount;
63 static vfs_statfs_t	union_statfs;
64 static vfs_unmount_t    union_unmount;
65 
66 /*
67  * Mount union filesystem.
68  */
69 static int
70 union_mount(mp, ndp, td)
71 	struct mount *mp;
72 	struct nameidata *ndp;
73 	struct thread *td;
74 {
75 	int error = 0;
76 	struct vfsoptlist *opts;
77 	struct vnode *lowerrootvp = NULLVP;
78 	struct vnode *upperrootvp = NULLVP;
79 	struct union_mount *um = 0;
80 	struct ucred *cred = 0;
81 	char *cp = 0, *target;
82 	int op;
83 	int len;
84 	size_t size;
85 	struct componentname fakecn;
86 
87 	UDEBUG(("union_mount(mp = %p)\n", (void *)mp));
88 
89 	opts = mp->mnt_optnew;
90 	/*
91 	 * Disable clustered write, otherwise system becomes unstable.
92 	 */
93 	mp->mnt_flag |= MNT_NOCLUSTERW;
94 
95 	/*
96 	 * Update is a no-op
97 	 */
98 	if (mp->mnt_flag & MNT_UPDATE)
99 		/*
100 		 * Need to provide:
101 		 * 1. a way to convert between rdonly and rdwr mounts.
102 		 * 2. support for nfs exports.
103 		 */
104 		return (EOPNOTSUPP);
105 
106 	/*
107 	 * Get arguments.
108 	 */
109 	error = vfs_getopt(opts, "target", (void **)&target, &len);
110 	if (error || target[len - 1] != '\0')
111 		return (EINVAL);
112 
113 	op = 0;
114 	if (vfs_getopt(opts, "below", NULL, NULL) == 0)
115 		op = UNMNT_BELOW;
116 	if (vfs_getopt(opts, "replace", NULL, NULL) == 0) {
117 		/* These options are mutually exclusive. */
118 		if (op)
119 			return (EINVAL);
120 		op = UNMNT_REPLACE;
121 	}
122 	/*
123 	 * UNMNT_ABOVE is the default.
124 	 */
125 	if (op == 0)
126 		op = UNMNT_ABOVE;
127 
128 	/*
129 	 * Obtain lower vnode.  Vnode is stored in mp->mnt_vnodecovered.
130 	 * We need to reference it but not lock it.
131 	 */
132 
133 	lowerrootvp = mp->mnt_vnodecovered;
134 	VREF(lowerrootvp);
135 
136 #if 0
137 	/*
138 	 * Unlock lower node to avoid deadlock.
139 	 */
140 	if (lowerrootvp->v_op == union_vnodeop_p)
141 		VOP_UNLOCK(lowerrootvp, 0, td);
142 #endif
143 
144 	/*
145 	 * Obtain upper vnode by calling namei() on the path.  The
146 	 * upperrootvp will be turned referenced but not locked.
147 	 */
148 	NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT, UIO_SYSSPACE, target, td);
149 
150 	error = namei(ndp);
151 
152 #if 0
153 	if (lowerrootvp->v_op == union_vnodeop_p)
154 		vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY, td);
155 #endif
156 	if (error)
157 		goto bad;
158 
159 	NDFREE(ndp, NDF_ONLY_PNBUF);
160 	upperrootvp = ndp->ni_vp;
161 	vrele(ndp->ni_dvp);
162 	ndp->ni_dvp = NULL;
163 
164 	UDEBUG(("mount_root UPPERVP %p locked = %d\n", upperrootvp,
165 	    VOP_ISLOCKED(upperrootvp, NULL)));
166 
167 	/*
168 	 * Check multi union mount to avoid `lock myself again' panic.
169 	 * Also require that it be a directory.
170 	 */
171 	if (upperrootvp == VTOUNION(lowerrootvp)->un_uppervp) {
172 #ifdef DIAGNOSTIC
173 		printf("union_mount: multi union mount?\n");
174 #endif
175 		error = EDEADLK;
176 		goto bad;
177 	}
178 
179 	if (upperrootvp->v_type != VDIR) {
180 		error = EINVAL;
181 		goto bad;
182 	}
183 
184 	/*
185 	 * Allocate our union_mount structure and populate the fields.
186 	 * The vnode references are stored in the union_mount as held,
187 	 * unlocked references.  Depending on the _BELOW flag, the
188 	 * filesystems are viewed in a different order.  In effect this
189 	 * is the same as providing a mount-under option to the mount
190 	 * syscall.
191 	 */
192 
193 	um = (struct union_mount *) malloc(sizeof(struct union_mount),
194 				M_UNIONFSMNT, M_WAITOK | M_ZERO);
195 
196 	um->um_op = op;
197 
198 	switch (um->um_op) {
199 	case UNMNT_ABOVE:
200 		um->um_lowervp = lowerrootvp;
201 		um->um_uppervp = upperrootvp;
202 		upperrootvp = NULL;
203 		lowerrootvp = NULL;
204 		break;
205 
206 	case UNMNT_BELOW:
207 		um->um_lowervp = upperrootvp;
208 		um->um_uppervp = lowerrootvp;
209 		upperrootvp = NULL;
210 		lowerrootvp = NULL;
211 		break;
212 
213 	case UNMNT_REPLACE:
214 		vrele(lowerrootvp);
215 		lowerrootvp = NULL;
216 		um->um_uppervp = upperrootvp;
217 		um->um_lowervp = lowerrootvp;
218 		upperrootvp = NULL;
219 		break;
220 
221 	default:
222 		error = EINVAL;
223 		goto bad;
224 	}
225 
226 	/*
227 	 * Unless the mount is readonly, ensure that the top layer
228 	 * supports whiteout operations.
229 	 */
230 	if ((mp->mnt_flag & MNT_RDONLY) == 0) {
231 		/*
232 		 * XXX Fake up a struct componentname with only cn_nameiop
233 		 * and cn_thread valid; union_whiteout() needs to use the
234 		 * thread pointer to lock the vnode.
235 		 */
236 		bzero(&fakecn, sizeof(fakecn));
237 		fakecn.cn_nameiop = LOOKUP;
238 		fakecn.cn_thread = td;
239 		error = VOP_WHITEOUT(um->um_uppervp, &fakecn, LOOKUP);
240 		if (error)
241 			goto bad;
242 	}
243 
244 	um->um_cred = crhold(td->td_ucred);
245 	FILEDESC_LOCK(td->td_proc->p_fd);
246 	um->um_cmode = UN_DIRMODE &~ td->td_proc->p_fd->fd_cmask;
247 	FILEDESC_UNLOCK(td->td_proc->p_fd);
248 
249 	/*
250 	 * Depending on what you think the MNT_LOCAL flag might mean,
251 	 * you may want the && to be || on the conditional below.
252 	 * At the moment it has been defined that the filesystem is
253 	 * only local if it is all local, ie the MNT_LOCAL flag implies
254 	 * that the entire namespace is local.  If you think the MNT_LOCAL
255 	 * flag implies that some of the files might be stored locally
256 	 * then you will want to change the conditional.
257 	 */
258 	if (um->um_op == UNMNT_ABOVE) {
259 		if (((um->um_lowervp == NULLVP) ||
260 		     (um->um_lowervp->v_mount->mnt_flag & MNT_LOCAL)) &&
261 		    (um->um_uppervp->v_mount->mnt_flag & MNT_LOCAL))
262 			mp->mnt_flag |= MNT_LOCAL;
263 	}
264 
265 	/*
266 	 * Copy in the upper layer's RDONLY flag.  This is for the benefit
267 	 * of lookup() which explicitly checks the flag, rather than asking
268 	 * the filesystem for its own opinion.  This means, that an update
269 	 * mount of the underlying filesystem to go from rdonly to rdwr
270 	 * will leave the unioned view as read-only.
271 	 */
272 	mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY);
273 
274 	mp->mnt_data = (qaddr_t) um;
275 	vfs_getnewfsid(mp);
276 
277 	switch (um->um_op) {
278 	case UNMNT_ABOVE:
279 		cp = "<above>:";
280 		break;
281 	case UNMNT_BELOW:
282 		cp = "<below>:";
283 		break;
284 	case UNMNT_REPLACE:
285 		cp = "";
286 		break;
287 	}
288 	len = strlen(cp);
289 	bcopy(cp, mp->mnt_stat.f_mntfromname, len);
290 
291 	cp = mp->mnt_stat.f_mntfromname + len;
292 	len = MNAMELEN - len;
293 
294 	(void) copystr(target, cp, len - 1, &size);
295 	bzero(cp + size, len - size);
296 
297 	(void)union_statfs(mp, &mp->mnt_stat, td);
298 
299 	UDEBUG(("union_mount: from %s, on %s\n",
300 		mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname));
301 	return (0);
302 
303 bad:
304 	if (um) {
305 		if (um->um_uppervp)
306 			vrele(um->um_uppervp);
307 		if (um->um_lowervp)
308 			vrele(um->um_lowervp);
309 		/* XXX other fields */
310 		free(um, M_UNIONFSMNT);
311 	}
312 	if (cred)
313 		crfree(cred);
314 	if (upperrootvp)
315 		vrele(upperrootvp);
316 	if (lowerrootvp)
317 		vrele(lowerrootvp);
318 	return (error);
319 }
320 
321 /*
322  * Free reference to union layer.
323  */
324 static int
325 union_unmount(mp, mntflags, td)
326 	struct mount *mp;
327 	int mntflags;
328 	struct thread *td;
329 {
330 	struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
331 	int error;
332 	int freeing;
333 	int flags = 0;
334 
335 	UDEBUG(("union_unmount(mp = %p)\n", (void *)mp));
336 
337 	if (mntflags & MNT_FORCE)
338 		flags |= FORCECLOSE;
339 
340 	/*
341 	 * Keep flushing vnodes from the mount list.
342 	 * This is needed because of the un_pvp held
343 	 * reference to the parent vnode.
344 	 * If more vnodes have been freed on a given pass,
345 	 * the try again.  The loop will iterate at most
346 	 * (d) times, where (d) is the maximum tree depth
347 	 * in the filesystem.
348 	 */
349 	for (freeing = 0; (error = vflush(mp, 0, flags)) != 0;) {
350 		struct vnode *vp;
351 		int n;
352 
353 		/* count #vnodes held on mount list */
354 		mtx_lock(&mntvnode_mtx);
355 		n = 0;
356 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes)
357 			n++;
358 		mtx_unlock(&mntvnode_mtx);
359 
360 		/* if this is unchanged then stop */
361 		if (n == freeing)
362 			break;
363 
364 		/* otherwise try once more time */
365 		freeing = n;
366 	}
367 
368 	/*
369 	 * If the most recent vflush failed, the filesystem is still busy.
370 	 */
371 	if (error)
372 		return (error);
373 
374 	/*
375 	 * Discard references to upper and lower target vnodes.
376 	 */
377 	if (um->um_lowervp)
378 		vrele(um->um_lowervp);
379 	vrele(um->um_uppervp);
380 	crfree(um->um_cred);
381 	/*
382 	 * Finally, throw away the union_mount structure.
383 	 */
384 	free(mp->mnt_data, M_UNIONFSMNT);	/* XXX */
385 	mp->mnt_data = 0;
386 	return (0);
387 }
388 
389 static int
390 union_root(mp, vpp)
391 	struct mount *mp;
392 	struct vnode **vpp;
393 {
394 	struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
395 	int error;
396 
397 	/*
398 	 * Supply an unlocked reference to um_uppervp and to um_lowervp.  It
399 	 * is possible for um_uppervp to be locked without the associated
400 	 * root union_node being locked.  We let union_allocvp() deal with
401 	 * it.
402 	 */
403 	UDEBUG(("union_root UPPERVP %p locked = %d\n", um->um_uppervp,
404 	    VOP_ISLOCKED(um->um_uppervp, NULL)));
405 
406 	VREF(um->um_uppervp);
407 	if (um->um_lowervp)
408 		VREF(um->um_lowervp);
409 
410 	error = union_allocvp(vpp, mp, NULLVP, NULLVP, NULL,
411 		    um->um_uppervp, um->um_lowervp, 1);
412 	UDEBUG(("error %d\n", error));
413 	UDEBUG(("union_root2 UPPERVP %p locked = %d\n", um->um_uppervp,
414 	    VOP_ISLOCKED(um->um_uppervp, NULL)));
415 
416 	return (error);
417 }
418 
419 static int
420 union_statfs(mp, sbp, td)
421 	struct mount *mp;
422 	struct statfs *sbp;
423 	struct thread *td;
424 {
425 	int error;
426 	struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
427 	struct statfs mstat;
428 	int lbsize;
429 
430 	UDEBUG(("union_statfs(mp = %p, lvp = %p, uvp = %p)\n",
431 	    (void *)mp, (void *)um->um_lowervp, (void *)um->um_uppervp));
432 
433 	bzero(&mstat, sizeof(mstat));
434 
435 	if (um->um_lowervp) {
436 		error = VFS_STATFS(um->um_lowervp->v_mount, &mstat, td);
437 		if (error)
438 			return (error);
439 	}
440 
441 	/*
442 	 * Now copy across the "interesting" information and fake the rest.
443 	 */
444 #if 0
445 	sbp->f_type = mstat.f_type;
446 	sbp->f_flags = mstat.f_flags;
447 	sbp->f_bsize = mstat.f_bsize;
448 	sbp->f_iosize = mstat.f_iosize;
449 #endif
450 	lbsize = mstat.f_bsize;
451 	sbp->f_blocks = mstat.f_blocks;
452 	sbp->f_bfree = mstat.f_bfree;
453 	sbp->f_bavail = mstat.f_bavail;
454 	sbp->f_files = mstat.f_files;
455 	sbp->f_ffree = mstat.f_ffree;
456 
457 	error = VFS_STATFS(um->um_uppervp->v_mount, &mstat, td);
458 	if (error)
459 		return (error);
460 
461 	sbp->f_flags = mstat.f_flags;
462 	sbp->f_bsize = mstat.f_bsize;
463 	sbp->f_iosize = mstat.f_iosize;
464 
465 	/*
466 	 * If the lower and upper blocksizes differ, then frig the
467 	 * block counts so that the sizes reported by df make some
468 	 * kind of sense.  None of this makes sense though.
469 	 */
470 
471 	if (mstat.f_bsize != lbsize)
472 		sbp->f_blocks = ((off_t) sbp->f_blocks * lbsize) / mstat.f_bsize;
473 
474 	/*
475 	 * The "total" fields count total resources in all layers,
476 	 * the "free" fields count only those resources which are
477 	 * free in the upper layer (since only the upper layer
478 	 * is writeable).
479 	 */
480 	sbp->f_blocks += mstat.f_blocks;
481 	sbp->f_bfree = mstat.f_bfree;
482 	sbp->f_bavail = mstat.f_bavail;
483 	sbp->f_files += mstat.f_files;
484 	sbp->f_ffree = mstat.f_ffree;
485 
486 	if (sbp != &mp->mnt_stat) {
487 		sbp->f_type = mp->mnt_vfc->vfc_typenum;
488 		bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid));
489 		bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
490 		bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
491 	}
492 	return (0);
493 }
494 
495 static struct vfsops union_vfsops = {
496 	.vfs_init = 		union_init,
497 	.vfs_nmount =		union_mount,
498 	.vfs_root =		union_root,
499 	.vfs_statfs =		union_statfs,
500 	.vfs_unmount =		union_unmount,
501 };
502 
503 VFS_SET(union_vfsops, unionfs, VFCF_LOOPBACK);
504