xref: /freebsd/sys/fs/unionfs/union_vfsops.c (revision 23f282aa31e9b6fceacd449020e936e98d6f2298)
1 /*
2  * Copyright (c) 1994, 1995 The Regents of the University of California.
3  * Copyright (c) 1994, 1995 Jan-Simon Pendry.
4  * All rights reserved.
5  *
6  * This code is derived from software donated to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)union_vfsops.c	8.20 (Berkeley) 5/20/95
38  * $FreeBSD$
39  */
40 
41 /*
42  * Union Layer
43  */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/proc.h>
49 #include <sys/vnode.h>
50 #include <sys/mount.h>
51 #include <sys/namei.h>
52 #include <sys/malloc.h>
53 #include <sys/filedesc.h>
54 #include <miscfs/union/union.h>
55 
56 static MALLOC_DEFINE(M_UNIONFSMNT, "UNION mount", "UNION mount structure");
57 
58 extern int	union_init __P((struct vfsconf *));
59 static int	union_mount __P((struct mount *mp, char *path, caddr_t data,
60 				 struct nameidata *ndp, struct proc *p));
61 static int	union_root __P((struct mount *mp, struct vnode **vpp));
62 static int	union_statfs __P((struct mount *mp, struct statfs *sbp,
63 				  struct proc *p));
64 static int	union_unmount __P((struct mount *mp, int mntflags,
65 				   struct proc *p));
66 
67 /*
68  * Mount union filesystem
69  */
70 static int
71 union_mount(mp, path, data, ndp, p)
72 	struct mount *mp;
73 	char *path;
74 	caddr_t data;
75 	struct nameidata *ndp;
76 	struct proc *p;
77 {
78 	int error = 0;
79 	struct union_args args;
80 	struct vnode *lowerrootvp = NULLVP;
81 	struct vnode *upperrootvp = NULLVP;
82 	struct union_mount *um = 0;
83 	struct ucred *cred = 0;
84 	char *cp = 0;
85 	int len;
86 	u_int size;
87 
88 	UDEBUG(("union_mount(mp = %p)\n", (void *)mp));
89 
90 	/*
91 	 * Disable clustered write, otherwise system becomes unstable.
92 	 */
93 	mp->mnt_flag |= MNT_NOCLUSTERW;
94 
95 	/*
96 	 * Update is a no-op
97 	 */
98 	if (mp->mnt_flag & MNT_UPDATE) {
99 		/*
100 		 * Need to provide.
101 		 * 1. a way to convert between rdonly and rdwr mounts.
102 		 * 2. support for nfs exports.
103 		 */
104 		error = EOPNOTSUPP;
105 		goto bad;
106 	}
107 
108 	/*
109 	 * Get argument
110 	 */
111 	error = copyin(data, (caddr_t)&args, sizeof(struct union_args));
112 	if (error)
113 		goto bad;
114 
115 	/*
116 	 * Obtain lower vnode.  Vnode is stored in mp->mnt_vnodecovered.
117 	 * We need to reference it but not lock it.
118 	 */
119 
120 	lowerrootvp = mp->mnt_vnodecovered;
121 	VREF(lowerrootvp);
122 
123 #if 0
124 	/*
125 	 * Unlock lower node to avoid deadlock.
126 	 */
127 	if (lowerrootvp->v_op == union_vnodeop_p)
128 		VOP_UNLOCK(lowerrootvp, 0, p);
129 #endif
130 
131 	/*
132 	 * Obtain upper vnode by calling namei() on the path.  The
133 	 * upperrootvp will be turned referenced but not locked.
134 	 */
135 	NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT,
136 	       UIO_USERSPACE, args.target, p);
137 
138 	error = namei(ndp);
139 
140 #if 0
141 	if (lowerrootvp->v_op == union_vnodeop_p)
142 		vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY, p);
143 #endif
144 	if (error)
145 		goto bad;
146 
147 	NDFREE(ndp, NDF_ONLY_PNBUF);
148 	upperrootvp = ndp->ni_vp;
149 	vrele(ndp->ni_dvp);
150 	ndp->ni_dvp = NULL;
151 
152 	UDEBUG(("mount_root UPPERVP %p locked = %d\n", upperrootvp,
153 	    VOP_ISLOCKED(upperrootvp, NULL)));
154 
155 	/*
156 	 * Check multi union mount to avoid `lock myself again' panic.
157 	 * Also require that it be a directory.
158 	 */
159 	if (upperrootvp == VTOUNION(lowerrootvp)->un_uppervp) {
160 #ifdef DIAGNOSTIC
161 		printf("union_mount: multi union mount?\n");
162 #endif
163 		error = EDEADLK;
164 		goto bad;
165 	}
166 
167 	if (upperrootvp->v_type != VDIR) {
168 		error = EINVAL;
169 		goto bad;
170 	}
171 
172 	/*
173 	 * Allocate our union_mount structure and populate the fields.
174 	 * The vnode references are stored in the union_mount as held,
175 	 * unlocked references.  Depending on the _BELOW flag, the
176 	 * filesystems are viewed in a different order.  In effect this
177 	 * is the same as providing a mount-under option to the mount
178 	 * syscall.
179 	 */
180 
181 	um = (struct union_mount *) malloc(sizeof(struct union_mount),
182 				M_UNIONFSMNT, M_WAITOK);
183 
184 	bzero(um, sizeof(struct union_mount));
185 
186 	um->um_op = args.mntflags & UNMNT_OPMASK;
187 
188 	switch (um->um_op) {
189 	case UNMNT_ABOVE:
190 		um->um_lowervp = lowerrootvp;
191 		um->um_uppervp = upperrootvp;
192 		upperrootvp = NULL;
193 		lowerrootvp = NULL;
194 		break;
195 
196 	case UNMNT_BELOW:
197 		um->um_lowervp = upperrootvp;
198 		um->um_uppervp = lowerrootvp;
199 		upperrootvp = NULL;
200 		lowerrootvp = NULL;
201 		break;
202 
203 	case UNMNT_REPLACE:
204 		vrele(lowerrootvp);
205 		lowerrootvp = NULL;
206 		um->um_uppervp = upperrootvp;
207 		um->um_lowervp = lowerrootvp;
208 		upperrootvp = NULL;
209 		break;
210 
211 	default:
212 		error = EINVAL;
213 		goto bad;
214 	}
215 
216 	/*
217 	 * Unless the mount is readonly, ensure that the top layer
218 	 * supports whiteout operations
219 	 */
220 	if ((mp->mnt_flag & MNT_RDONLY) == 0) {
221 		error = VOP_WHITEOUT(um->um_uppervp, NULL, LOOKUP);
222 		if (error)
223 			goto bad;
224 	}
225 
226 	um->um_cred = p->p_ucred;
227 	crhold(um->um_cred);
228 	um->um_cmode = UN_DIRMODE &~ p->p_fd->fd_cmask;
229 
230 	/*
231 	 * Depending on what you think the MNT_LOCAL flag might mean,
232 	 * you may want the && to be || on the conditional below.
233 	 * At the moment it has been defined that the filesystem is
234 	 * only local if it is all local, ie the MNT_LOCAL flag implies
235 	 * that the entire namespace is local.  If you think the MNT_LOCAL
236 	 * flag implies that some of the files might be stored locally
237 	 * then you will want to change the conditional.
238 	 */
239 	if (um->um_op == UNMNT_ABOVE) {
240 		if (((um->um_lowervp == NULLVP) ||
241 		     (um->um_lowervp->v_mount->mnt_flag & MNT_LOCAL)) &&
242 		    (um->um_uppervp->v_mount->mnt_flag & MNT_LOCAL))
243 			mp->mnt_flag |= MNT_LOCAL;
244 	}
245 
246 	/*
247 	 * Copy in the upper layer's RDONLY flag.  This is for the benefit
248 	 * of lookup() which explicitly checks the flag, rather than asking
249 	 * the filesystem for its own opinion.  This means, that an update
250 	 * mount of the underlying filesystem to go from rdonly to rdwr
251 	 * will leave the unioned view as read-only.
252 	 */
253 	mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY);
254 
255 	mp->mnt_data = (qaddr_t) um;
256 	vfs_getnewfsid(mp);
257 
258 	(void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
259 	bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
260 
261 	switch (um->um_op) {
262 	case UNMNT_ABOVE:
263 		cp = "<above>:";
264 		break;
265 	case UNMNT_BELOW:
266 		cp = "<below>:";
267 		break;
268 	case UNMNT_REPLACE:
269 		cp = "";
270 		break;
271 	}
272 	len = strlen(cp);
273 	bcopy(cp, mp->mnt_stat.f_mntfromname, len);
274 
275 	cp = mp->mnt_stat.f_mntfromname + len;
276 	len = MNAMELEN - len;
277 
278 	(void) copyinstr(args.target, cp, len - 1, &size);
279 	bzero(cp + size, len - size);
280 
281 	(void)union_statfs(mp, &mp->mnt_stat, p);
282 
283 	UDEBUG(("union_mount: from %s, on %s\n",
284 		mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname));
285 	return (0);
286 
287 bad:
288 	if (um) {
289 		if (um->um_uppervp)
290 			vrele(um->um_uppervp);
291 		if (um->um_lowervp)
292 			vrele(um->um_lowervp);
293 		/* XXX other fields */
294 		free(um, M_UNIONFSMNT);
295 	}
296 	if (cred)
297 		crfree(cred);
298 	if (upperrootvp)
299 		vrele(upperrootvp);
300 	if (lowerrootvp)
301 		vrele(lowerrootvp);
302 	return (error);
303 }
304 
305 /*
306  * Free reference to union layer
307  */
308 static int
309 union_unmount(mp, mntflags, p)
310 	struct mount *mp;
311 	int mntflags;
312 	struct proc *p;
313 {
314 	struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
315 	struct vnode *um_rootvp;
316 	int error;
317 	int freeing;
318 	int flags = 0;
319 
320 	UDEBUG(("union_unmount(mp = %p)\n", (void *)mp));
321 
322 	if (mntflags & MNT_FORCE)
323 		flags |= FORCECLOSE;
324 
325 	if ((error = union_root(mp, &um_rootvp)) != 0)
326 		return (error);
327 
328 	/*
329 	 * Keep flushing vnodes from the mount list.
330 	 * This is needed because of the un_pvp held
331 	 * reference to the parent vnode.
332 	 * If more vnodes have been freed on a given pass,
333 	 * the try again.  The loop will iterate at most
334 	 * (d) times, where (d) is the maximum tree depth
335 	 * in the filesystem.
336 	 */
337 	for (freeing = 0; vflush(mp, um_rootvp, flags) != 0;) {
338 		struct vnode *vp;
339 		int n;
340 
341 		/* count #vnodes held on mount list */
342 		for (n = 0, vp = mp->mnt_vnodelist.lh_first;
343 				vp != NULLVP;
344 				vp = vp->v_mntvnodes.le_next)
345 			n++;
346 
347 		/* if this is unchanged then stop */
348 		if (n == freeing)
349 			break;
350 
351 		/* otherwise try once more time */
352 		freeing = n;
353 	}
354 
355 	/* At this point the root vnode should have a single reference */
356 	if (um_rootvp->v_usecount > 1) {
357 		vput(um_rootvp);
358 		return (EBUSY);
359 	}
360 
361 #ifdef DEBUG
362 	vprint("union root", um_rootvp);
363 #endif
364 	/*
365 	 * Discard references to upper and lower target vnodes.
366 	 */
367 	if (um->um_lowervp)
368 		vrele(um->um_lowervp);
369 	vrele(um->um_uppervp);
370 	crfree(um->um_cred);
371 	/*
372 	 * Release reference on underlying root vnode
373 	 */
374 	vput(um_rootvp);
375 	/*
376 	 * And blow it away for future re-use
377 	 */
378 	vgone(um_rootvp);
379 	/*
380 	 * Finally, throw away the union_mount structure
381 	 */
382 	free(mp->mnt_data, M_UNIONFSMNT);	/* XXX */
383 	mp->mnt_data = 0;
384 	return (0);
385 }
386 
387 static int
388 union_root(mp, vpp)
389 	struct mount *mp;
390 	struct vnode **vpp;
391 {
392 	struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
393 	int error;
394 
395 	/*
396 	 * Supply an unlocked reference to um_uppervp and to um_lowervp.  It
397 	 * is possible for um_uppervp to be locked without the associated
398 	 * root union_node being locked.  We let union_allocvp() deal with
399 	 * it.
400 	 */
401 	UDEBUG(("union_root UPPERVP %p locked = %d\n", um->um_uppervp,
402 	    VOP_ISLOCKED(um->um_uppervp, NULL)));
403 
404 	VREF(um->um_uppervp);
405 	if (um->um_lowervp)
406 		VREF(um->um_lowervp);
407 
408 	error = union_allocvp(vpp, mp, NULLVP, NULLVP, NULL,
409 		    um->um_uppervp, um->um_lowervp, 1);
410 	UDEBUG(("error %d\n", error));
411 	UDEBUG(("union_root2 UPPERVP %p locked = %d\n", um->um_uppervp,
412 	    VOP_ISLOCKED(um->um_uppervp, NULL)));
413 
414 	return (error);
415 }
416 
417 static int
418 union_statfs(mp, sbp, p)
419 	struct mount *mp;
420 	struct statfs *sbp;
421 	struct proc *p;
422 {
423 	int error;
424 	struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
425 	struct statfs mstat;
426 	int lbsize;
427 
428 	UDEBUG(("union_statfs(mp = %p, lvp = %p, uvp = %p)\n",
429 	    (void *)mp, (void *)um->um_lowervp, (void *)um->um_uppervp));
430 
431 	bzero(&mstat, sizeof(mstat));
432 
433 	if (um->um_lowervp) {
434 		error = VFS_STATFS(um->um_lowervp->v_mount, &mstat, p);
435 		if (error)
436 			return (error);
437 	}
438 
439 	/* now copy across the "interesting" information and fake the rest */
440 #if 0
441 	sbp->f_type = mstat.f_type;
442 	sbp->f_flags = mstat.f_flags;
443 	sbp->f_bsize = mstat.f_bsize;
444 	sbp->f_iosize = mstat.f_iosize;
445 #endif
446 	lbsize = mstat.f_bsize;
447 	sbp->f_blocks = mstat.f_blocks;
448 	sbp->f_bfree = mstat.f_bfree;
449 	sbp->f_bavail = mstat.f_bavail;
450 	sbp->f_files = mstat.f_files;
451 	sbp->f_ffree = mstat.f_ffree;
452 
453 	error = VFS_STATFS(um->um_uppervp->v_mount, &mstat, p);
454 	if (error)
455 		return (error);
456 
457 	sbp->f_flags = mstat.f_flags;
458 	sbp->f_bsize = mstat.f_bsize;
459 	sbp->f_iosize = mstat.f_iosize;
460 
461 	/*
462 	 * if the lower and upper blocksizes differ, then frig the
463 	 * block counts so that the sizes reported by df make some
464 	 * kind of sense.  none of this makes sense though.
465 	 */
466 
467 	if (mstat.f_bsize != lbsize)
468 		sbp->f_blocks = ((off_t) sbp->f_blocks * lbsize) / mstat.f_bsize;
469 
470 	/*
471 	 * The "total" fields count total resources in all layers,
472 	 * the "free" fields count only those resources which are
473 	 * free in the upper layer (since only the upper layer
474 	 * is writeable).
475 	 */
476 	sbp->f_blocks += mstat.f_blocks;
477 	sbp->f_bfree = mstat.f_bfree;
478 	sbp->f_bavail = mstat.f_bavail;
479 	sbp->f_files += mstat.f_files;
480 	sbp->f_ffree = mstat.f_ffree;
481 
482 	if (sbp != &mp->mnt_stat) {
483 		sbp->f_type = mp->mnt_vfc->vfc_typenum;
484 		bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid));
485 		bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
486 		bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
487 	}
488 	return (0);
489 }
490 
491 static struct vfsops union_vfsops = {
492 	union_mount,
493 	vfs_stdstart,	/* underlying start already done */
494 	union_unmount,
495 	union_root,
496 	vfs_stdquotactl,
497 	union_statfs,
498 	vfs_stdsync,    /* XXX assumes no cached data on union level */
499 	vfs_stdvget,
500 	vfs_stdfhtovp,
501 	vfs_stdcheckexp,
502 	vfs_stdvptofh,
503 	union_init,
504 	vfs_stduninit,
505 	vfs_stdextattrctl,
506 };
507 
508 VFS_SET(union_vfsops, union, VFCF_LOOPBACK);
509