1 /* 2 * Copyright (c) 1994, 1995 The Regents of the University of California. 3 * Copyright (c) 1994, 1995 Jan-Simon Pendry. 4 * All rights reserved. 5 * 6 * This code is derived from software donated to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)union_vfsops.c 8.20 (Berkeley) 5/20/95 38 * $FreeBSD$ 39 */ 40 41 /* 42 * Union Layer 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/proc.h> 51 #include <sys/vnode.h> 52 #include <sys/mount.h> 53 #include <sys/namei.h> 54 #include <sys/malloc.h> 55 #include <sys/filedesc.h> 56 #include <fs/unionfs/union.h> 57 58 static MALLOC_DEFINE(M_UNIONFSMNT, "UNION mount", "UNION mount structure"); 59 60 extern int union_init __P((struct vfsconf *)); 61 static int union_mount __P((struct mount *mp, char *path, caddr_t data, 62 struct nameidata *ndp, struct proc *p)); 63 static int union_root __P((struct mount *mp, struct vnode **vpp)); 64 static int union_statfs __P((struct mount *mp, struct statfs *sbp, 65 struct proc *p)); 66 static int union_unmount __P((struct mount *mp, int mntflags, 67 struct proc *p)); 68 69 /* 70 * Mount union filesystem 71 */ 72 static int 73 union_mount(mp, path, data, ndp, p) 74 struct mount *mp; 75 char *path; 76 caddr_t data; 77 struct nameidata *ndp; 78 struct proc *p; 79 { 80 int error = 0; 81 struct union_args args; 82 struct vnode *lowerrootvp = NULLVP; 83 struct vnode *upperrootvp = NULLVP; 84 struct union_mount *um = 0; 85 struct ucred *cred = 0; 86 char *cp = 0; 87 int len; 88 u_int size; 89 90 UDEBUG(("union_mount(mp = %p)\n", (void *)mp)); 91 92 /* 93 * Disable clustered write, otherwise system becomes unstable. 94 */ 95 mp->mnt_flag |= MNT_NOCLUSTERW; 96 97 /* 98 * Update is a no-op 99 */ 100 if (mp->mnt_flag & MNT_UPDATE) { 101 /* 102 * Need to provide. 103 * 1. a way to convert between rdonly and rdwr mounts. 104 * 2. support for nfs exports. 105 */ 106 error = EOPNOTSUPP; 107 goto bad; 108 } 109 110 /* 111 * Get argument 112 */ 113 error = copyin(data, (caddr_t)&args, sizeof(struct union_args)); 114 if (error) 115 goto bad; 116 117 /* 118 * Obtain lower vnode. Vnode is stored in mp->mnt_vnodecovered. 119 * We need to reference it but not lock it. 120 */ 121 122 lowerrootvp = mp->mnt_vnodecovered; 123 VREF(lowerrootvp); 124 125 #if 0 126 /* 127 * Unlock lower node to avoid deadlock. 128 */ 129 if (lowerrootvp->v_op == union_vnodeop_p) 130 VOP_UNLOCK(lowerrootvp, 0, p); 131 #endif 132 133 /* 134 * Obtain upper vnode by calling namei() on the path. The 135 * upperrootvp will be turned referenced but not locked. 136 */ 137 NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT, 138 UIO_USERSPACE, args.target, p); 139 140 error = namei(ndp); 141 142 #if 0 143 if (lowerrootvp->v_op == union_vnodeop_p) 144 vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY, p); 145 #endif 146 if (error) 147 goto bad; 148 149 NDFREE(ndp, NDF_ONLY_PNBUF); 150 upperrootvp = ndp->ni_vp; 151 vrele(ndp->ni_dvp); 152 ndp->ni_dvp = NULL; 153 154 UDEBUG(("mount_root UPPERVP %p locked = %d\n", upperrootvp, 155 VOP_ISLOCKED(upperrootvp, NULL))); 156 157 /* 158 * Check multi union mount to avoid `lock myself again' panic. 159 * Also require that it be a directory. 160 */ 161 if (upperrootvp == VTOUNION(lowerrootvp)->un_uppervp) { 162 #ifdef DIAGNOSTIC 163 printf("union_mount: multi union mount?\n"); 164 #endif 165 error = EDEADLK; 166 goto bad; 167 } 168 169 if (upperrootvp->v_type != VDIR) { 170 error = EINVAL; 171 goto bad; 172 } 173 174 /* 175 * Allocate our union_mount structure and populate the fields. 176 * The vnode references are stored in the union_mount as held, 177 * unlocked references. Depending on the _BELOW flag, the 178 * filesystems are viewed in a different order. In effect this 179 * is the same as providing a mount-under option to the mount 180 * syscall. 181 */ 182 183 um = (struct union_mount *) malloc(sizeof(struct union_mount), 184 M_UNIONFSMNT, M_WAITOK | M_ZERO); 185 186 um->um_op = args.mntflags & UNMNT_OPMASK; 187 188 switch (um->um_op) { 189 case UNMNT_ABOVE: 190 um->um_lowervp = lowerrootvp; 191 um->um_uppervp = upperrootvp; 192 upperrootvp = NULL; 193 lowerrootvp = NULL; 194 break; 195 196 case UNMNT_BELOW: 197 um->um_lowervp = upperrootvp; 198 um->um_uppervp = lowerrootvp; 199 upperrootvp = NULL; 200 lowerrootvp = NULL; 201 break; 202 203 case UNMNT_REPLACE: 204 vrele(lowerrootvp); 205 lowerrootvp = NULL; 206 um->um_uppervp = upperrootvp; 207 um->um_lowervp = lowerrootvp; 208 upperrootvp = NULL; 209 break; 210 211 default: 212 error = EINVAL; 213 goto bad; 214 } 215 216 /* 217 * Unless the mount is readonly, ensure that the top layer 218 * supports whiteout operations 219 */ 220 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 221 error = VOP_WHITEOUT(um->um_uppervp, NULL, LOOKUP); 222 if (error) 223 goto bad; 224 } 225 226 um->um_cred = p->p_ucred; 227 crhold(um->um_cred); 228 um->um_cmode = UN_DIRMODE &~ p->p_fd->fd_cmask; 229 230 /* 231 * Depending on what you think the MNT_LOCAL flag might mean, 232 * you may want the && to be || on the conditional below. 233 * At the moment it has been defined that the filesystem is 234 * only local if it is all local, ie the MNT_LOCAL flag implies 235 * that the entire namespace is local. If you think the MNT_LOCAL 236 * flag implies that some of the files might be stored locally 237 * then you will want to change the conditional. 238 */ 239 if (um->um_op == UNMNT_ABOVE) { 240 if (((um->um_lowervp == NULLVP) || 241 (um->um_lowervp->v_mount->mnt_flag & MNT_LOCAL)) && 242 (um->um_uppervp->v_mount->mnt_flag & MNT_LOCAL)) 243 mp->mnt_flag |= MNT_LOCAL; 244 } 245 246 /* 247 * Copy in the upper layer's RDONLY flag. This is for the benefit 248 * of lookup() which explicitly checks the flag, rather than asking 249 * the filesystem for its own opinion. This means, that an update 250 * mount of the underlying filesystem to go from rdonly to rdwr 251 * will leave the unioned view as read-only. 252 */ 253 mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY); 254 255 mp->mnt_data = (qaddr_t) um; 256 vfs_getnewfsid(mp); 257 258 switch (um->um_op) { 259 case UNMNT_ABOVE: 260 cp = "<above>:"; 261 break; 262 case UNMNT_BELOW: 263 cp = "<below>:"; 264 break; 265 case UNMNT_REPLACE: 266 cp = ""; 267 break; 268 } 269 len = strlen(cp); 270 bcopy(cp, mp->mnt_stat.f_mntfromname, len); 271 272 cp = mp->mnt_stat.f_mntfromname + len; 273 len = MNAMELEN - len; 274 275 (void) copyinstr(args.target, cp, len - 1, &size); 276 bzero(cp + size, len - size); 277 278 (void)union_statfs(mp, &mp->mnt_stat, p); 279 280 UDEBUG(("union_mount: from %s, on %s\n", 281 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname)); 282 return (0); 283 284 bad: 285 if (um) { 286 if (um->um_uppervp) 287 vrele(um->um_uppervp); 288 if (um->um_lowervp) 289 vrele(um->um_lowervp); 290 /* XXX other fields */ 291 free(um, M_UNIONFSMNT); 292 } 293 if (cred) 294 crfree(cred); 295 if (upperrootvp) 296 vrele(upperrootvp); 297 if (lowerrootvp) 298 vrele(lowerrootvp); 299 return (error); 300 } 301 302 /* 303 * Free reference to union layer 304 */ 305 static int 306 union_unmount(mp, mntflags, p) 307 struct mount *mp; 308 int mntflags; 309 struct proc *p; 310 { 311 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 312 int error; 313 int freeing; 314 int flags = 0; 315 316 UDEBUG(("union_unmount(mp = %p)\n", (void *)mp)); 317 318 if (mntflags & MNT_FORCE) 319 flags |= FORCECLOSE; 320 321 /* 322 * Keep flushing vnodes from the mount list. 323 * This is needed because of the un_pvp held 324 * reference to the parent vnode. 325 * If more vnodes have been freed on a given pass, 326 * the try again. The loop will iterate at most 327 * (d) times, where (d) is the maximum tree depth 328 * in the filesystem. 329 */ 330 for (freeing = 0; (error = vflush(mp, 0, flags)) != 0;) { 331 struct vnode *vp; 332 int n; 333 334 /* count #vnodes held on mount list */ 335 mtx_lock(&mntvnode_mtx); 336 n = 0; 337 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 338 n++; 339 mtx_unlock(&mntvnode_mtx); 340 341 /* if this is unchanged then stop */ 342 if (n == freeing) 343 break; 344 345 /* otherwise try once more time */ 346 freeing = n; 347 } 348 349 /* If the most recent vflush failed, the filesystem is still busy. */ 350 if (error) 351 return (error); 352 353 /* 354 * Discard references to upper and lower target vnodes. 355 */ 356 if (um->um_lowervp) 357 vrele(um->um_lowervp); 358 vrele(um->um_uppervp); 359 crfree(um->um_cred); 360 /* 361 * Finally, throw away the union_mount structure 362 */ 363 free(mp->mnt_data, M_UNIONFSMNT); /* XXX */ 364 mp->mnt_data = 0; 365 return (0); 366 } 367 368 static int 369 union_root(mp, vpp) 370 struct mount *mp; 371 struct vnode **vpp; 372 { 373 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 374 int error; 375 376 /* 377 * Supply an unlocked reference to um_uppervp and to um_lowervp. It 378 * is possible for um_uppervp to be locked without the associated 379 * root union_node being locked. We let union_allocvp() deal with 380 * it. 381 */ 382 UDEBUG(("union_root UPPERVP %p locked = %d\n", um->um_uppervp, 383 VOP_ISLOCKED(um->um_uppervp, NULL))); 384 385 VREF(um->um_uppervp); 386 if (um->um_lowervp) 387 VREF(um->um_lowervp); 388 389 error = union_allocvp(vpp, mp, NULLVP, NULLVP, NULL, 390 um->um_uppervp, um->um_lowervp, 1); 391 UDEBUG(("error %d\n", error)); 392 UDEBUG(("union_root2 UPPERVP %p locked = %d\n", um->um_uppervp, 393 VOP_ISLOCKED(um->um_uppervp, NULL))); 394 395 return (error); 396 } 397 398 static int 399 union_statfs(mp, sbp, p) 400 struct mount *mp; 401 struct statfs *sbp; 402 struct proc *p; 403 { 404 int error; 405 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 406 struct statfs mstat; 407 int lbsize; 408 409 UDEBUG(("union_statfs(mp = %p, lvp = %p, uvp = %p)\n", 410 (void *)mp, (void *)um->um_lowervp, (void *)um->um_uppervp)); 411 412 bzero(&mstat, sizeof(mstat)); 413 414 if (um->um_lowervp) { 415 error = VFS_STATFS(um->um_lowervp->v_mount, &mstat, p); 416 if (error) 417 return (error); 418 } 419 420 /* now copy across the "interesting" information and fake the rest */ 421 #if 0 422 sbp->f_type = mstat.f_type; 423 sbp->f_flags = mstat.f_flags; 424 sbp->f_bsize = mstat.f_bsize; 425 sbp->f_iosize = mstat.f_iosize; 426 #endif 427 lbsize = mstat.f_bsize; 428 sbp->f_blocks = mstat.f_blocks; 429 sbp->f_bfree = mstat.f_bfree; 430 sbp->f_bavail = mstat.f_bavail; 431 sbp->f_files = mstat.f_files; 432 sbp->f_ffree = mstat.f_ffree; 433 434 error = VFS_STATFS(um->um_uppervp->v_mount, &mstat, p); 435 if (error) 436 return (error); 437 438 sbp->f_flags = mstat.f_flags; 439 sbp->f_bsize = mstat.f_bsize; 440 sbp->f_iosize = mstat.f_iosize; 441 442 /* 443 * if the lower and upper blocksizes differ, then frig the 444 * block counts so that the sizes reported by df make some 445 * kind of sense. none of this makes sense though. 446 */ 447 448 if (mstat.f_bsize != lbsize) 449 sbp->f_blocks = ((off_t) sbp->f_blocks * lbsize) / mstat.f_bsize; 450 451 /* 452 * The "total" fields count total resources in all layers, 453 * the "free" fields count only those resources which are 454 * free in the upper layer (since only the upper layer 455 * is writeable). 456 */ 457 sbp->f_blocks += mstat.f_blocks; 458 sbp->f_bfree = mstat.f_bfree; 459 sbp->f_bavail = mstat.f_bavail; 460 sbp->f_files += mstat.f_files; 461 sbp->f_ffree = mstat.f_ffree; 462 463 if (sbp != &mp->mnt_stat) { 464 sbp->f_type = mp->mnt_vfc->vfc_typenum; 465 bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); 466 bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); 467 bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); 468 } 469 return (0); 470 } 471 472 static struct vfsops union_vfsops = { 473 union_mount, 474 vfs_stdstart, /* underlying start already done */ 475 union_unmount, 476 union_root, 477 vfs_stdquotactl, 478 union_statfs, 479 vfs_stdsync, /* XXX assumes no cached data on union level */ 480 vfs_stdvget, 481 vfs_stdfhtovp, 482 vfs_stdcheckexp, 483 vfs_stdvptofh, 484 union_init, 485 vfs_stduninit, 486 vfs_stdextattrctl, 487 }; 488 489 VFS_SET(union_vfsops, unionfs, VFCF_LOOPBACK); 490