1 /* 2 * Copyright (c) 1994, 1995 The Regents of the University of California. 3 * Copyright (c) 1994, 1995 Jan-Simon Pendry. 4 * All rights reserved. 5 * 6 * This code is derived from software donated to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)union_vfsops.c 8.20 (Berkeley) 5/20/95 38 * $FreeBSD$ 39 */ 40 41 /* 42 * Union Layer 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/proc.h> 49 #include <sys/vnode.h> 50 #include <sys/mount.h> 51 #include <sys/namei.h> 52 #include <sys/malloc.h> 53 #include <sys/filedesc.h> 54 #include <miscfs/union/union.h> 55 56 static MALLOC_DEFINE(M_UNIONFSMNT, "UNION mount", "UNION mount structure"); 57 58 extern int union_init __P((struct vfsconf *)); 59 static int union_mount __P((struct mount *mp, char *path, caddr_t data, 60 struct nameidata *ndp, struct proc *p)); 61 static int union_root __P((struct mount *mp, struct vnode **vpp)); 62 static int union_statfs __P((struct mount *mp, struct statfs *sbp, 63 struct proc *p)); 64 static int union_unmount __P((struct mount *mp, int mntflags, 65 struct proc *p)); 66 67 /* 68 * Mount union filesystem 69 */ 70 static int 71 union_mount(mp, path, data, ndp, p) 72 struct mount *mp; 73 char *path; 74 caddr_t data; 75 struct nameidata *ndp; 76 struct proc *p; 77 { 78 int error = 0; 79 struct union_args args; 80 struct vnode *lowerrootvp = NULLVP; 81 struct vnode *upperrootvp = NULLVP; 82 struct union_mount *um = 0; 83 struct ucred *cred = 0; 84 char *cp = 0; 85 int len; 86 u_int size; 87 88 UDEBUG(("union_mount(mp = %p)\n", (void *)mp)); 89 90 /* 91 * Disable clustered write, otherwise system becomes unstable. 92 */ 93 mp->mnt_flag |= MNT_NOCLUSTERW; 94 95 /* 96 * Update is a no-op 97 */ 98 if (mp->mnt_flag & MNT_UPDATE) { 99 /* 100 * Need to provide. 101 * 1. a way to convert between rdonly and rdwr mounts. 102 * 2. support for nfs exports. 103 */ 104 error = EOPNOTSUPP; 105 goto bad; 106 } 107 108 /* 109 * Get argument 110 */ 111 error = copyin(data, (caddr_t)&args, sizeof(struct union_args)); 112 if (error) 113 goto bad; 114 115 /* 116 * Obtain lower vnode. Vnode is stored in mp->mnt_vnodecovered. 117 * We need to reference it but not lock it. 118 */ 119 120 lowerrootvp = mp->mnt_vnodecovered; 121 VREF(lowerrootvp); 122 123 #if 0 124 /* 125 * Unlock lower node to avoid deadlock. 126 */ 127 if (lowerrootvp->v_op == union_vnodeop_p) 128 VOP_UNLOCK(lowerrootvp, 0, p); 129 #endif 130 131 /* 132 * Obtain upper vnode by calling namei() on the path. The 133 * upperrootvp will be turned referenced but not locked. 134 */ 135 NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT, 136 UIO_USERSPACE, args.target, p); 137 138 error = namei(ndp); 139 140 #if 0 141 if (lowerrootvp->v_op == union_vnodeop_p) 142 vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY, p); 143 #endif 144 if (error) 145 goto bad; 146 147 upperrootvp = ndp->ni_vp; 148 vrele(ndp->ni_dvp); 149 ndp->ni_dvp = NULL; 150 151 UDEBUG(("mount_root UPPERVP %p locked = %d\n", upperrootvp, VOP_ISLOCKED(upperrootvp))); 152 153 /* 154 * Check multi union mount to avoid `lock myself again' panic. 155 * Also require that it be a directory. 156 */ 157 if (upperrootvp == VTOUNION(lowerrootvp)->un_uppervp) { 158 #ifdef DIAGNOSTIC 159 printf("union_mount: multi union mount?\n"); 160 #endif 161 error = EDEADLK; 162 goto bad; 163 } 164 165 if (upperrootvp->v_type != VDIR) { 166 error = EINVAL; 167 goto bad; 168 } 169 170 /* 171 * Allocate our union_mount structure and populate the fields. 172 * The vnode references are stored in the union_mount as held, 173 * unlocked references. Depending on the _BELOW flag, the 174 * filesystems are viewed in a different order. In effect this 175 * is the same as providing a mount-under option to the mount 176 * syscall. 177 */ 178 179 um = (struct union_mount *) malloc(sizeof(struct union_mount), 180 M_UNIONFSMNT, M_WAITOK); 181 182 bzero(um, sizeof(struct union_mount)); 183 184 um->um_op = args.mntflags & UNMNT_OPMASK; 185 186 switch (um->um_op) { 187 case UNMNT_ABOVE: 188 um->um_lowervp = lowerrootvp; 189 um->um_uppervp = upperrootvp; 190 upperrootvp = NULL; 191 lowerrootvp = NULL; 192 break; 193 194 case UNMNT_BELOW: 195 um->um_lowervp = upperrootvp; 196 um->um_uppervp = lowerrootvp; 197 upperrootvp = NULL; 198 lowerrootvp = NULL; 199 break; 200 201 case UNMNT_REPLACE: 202 vrele(lowerrootvp); 203 lowerrootvp = NULL; 204 um->um_uppervp = upperrootvp; 205 um->um_lowervp = lowerrootvp; 206 upperrootvp = NULL; 207 break; 208 209 default: 210 error = EINVAL; 211 goto bad; 212 } 213 214 /* 215 * Unless the mount is readonly, ensure that the top layer 216 * supports whiteout operations 217 */ 218 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 219 error = VOP_WHITEOUT(um->um_uppervp, NULL, LOOKUP); 220 if (error) 221 goto bad; 222 } 223 224 um->um_cred = p->p_ucred; 225 crhold(um->um_cred); 226 um->um_cmode = UN_DIRMODE &~ p->p_fd->fd_cmask; 227 228 /* 229 * Depending on what you think the MNT_LOCAL flag might mean, 230 * you may want the && to be || on the conditional below. 231 * At the moment it has been defined that the filesystem is 232 * only local if it is all local, ie the MNT_LOCAL flag implies 233 * that the entire namespace is local. If you think the MNT_LOCAL 234 * flag implies that some of the files might be stored locally 235 * then you will want to change the conditional. 236 */ 237 if (um->um_op == UNMNT_ABOVE) { 238 if (((um->um_lowervp == NULLVP) || 239 (um->um_lowervp->v_mount->mnt_flag & MNT_LOCAL)) && 240 (um->um_uppervp->v_mount->mnt_flag & MNT_LOCAL)) 241 mp->mnt_flag |= MNT_LOCAL; 242 } 243 244 /* 245 * Copy in the upper layer's RDONLY flag. This is for the benefit 246 * of lookup() which explicitly checks the flag, rather than asking 247 * the filesystem for its own opinion. This means, that an update 248 * mount of the underlying filesystem to go from rdonly to rdwr 249 * will leave the unioned view as read-only. 250 */ 251 mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY); 252 253 mp->mnt_data = (qaddr_t) um; 254 vfs_getnewfsid(mp); 255 256 (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); 257 bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); 258 259 switch (um->um_op) { 260 case UNMNT_ABOVE: 261 cp = "<above>:"; 262 break; 263 case UNMNT_BELOW: 264 cp = "<below>:"; 265 break; 266 case UNMNT_REPLACE: 267 cp = ""; 268 break; 269 } 270 len = strlen(cp); 271 bcopy(cp, mp->mnt_stat.f_mntfromname, len); 272 273 cp = mp->mnt_stat.f_mntfromname + len; 274 len = MNAMELEN - len; 275 276 (void) copyinstr(args.target, cp, len - 1, &size); 277 bzero(cp + size, len - size); 278 279 (void)union_statfs(mp, &mp->mnt_stat, p); 280 281 UDEBUG(("union_mount: from %s, on %s\n", 282 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname)); 283 return (0); 284 285 bad: 286 if (um) { 287 if (um->um_uppervp) 288 vrele(um->um_uppervp); 289 if (um->um_lowervp) 290 vrele(um->um_lowervp); 291 /* XXX other fields */ 292 free(um, M_UNIONFSMNT); 293 } 294 if (cred) 295 crfree(cred); 296 if (upperrootvp) 297 vrele(upperrootvp); 298 if (lowerrootvp) 299 vrele(lowerrootvp); 300 return (error); 301 } 302 303 /* 304 * Free reference to union layer 305 */ 306 static int 307 union_unmount(mp, mntflags, p) 308 struct mount *mp; 309 int mntflags; 310 struct proc *p; 311 { 312 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 313 struct vnode *um_rootvp; 314 int error; 315 int freeing; 316 int flags = 0; 317 318 UDEBUG(("union_unmount(mp = %p)\n", (void *)mp)); 319 320 if (mntflags & MNT_FORCE) 321 flags |= FORCECLOSE; 322 323 if ((error = union_root(mp, &um_rootvp)) != 0) 324 return (error); 325 326 /* 327 * Keep flushing vnodes from the mount list. 328 * This is needed because of the un_pvp held 329 * reference to the parent vnode. 330 * If more vnodes have been freed on a given pass, 331 * the try again. The loop will iterate at most 332 * (d) times, where (d) is the maximum tree depth 333 * in the filesystem. 334 */ 335 for (freeing = 0; vflush(mp, um_rootvp, flags) != 0;) { 336 struct vnode *vp; 337 int n; 338 339 /* count #vnodes held on mount list */ 340 for (n = 0, vp = mp->mnt_vnodelist.lh_first; 341 vp != NULLVP; 342 vp = vp->v_mntvnodes.le_next) 343 n++; 344 345 /* if this is unchanged then stop */ 346 if (n == freeing) 347 break; 348 349 /* otherwise try once more time */ 350 freeing = n; 351 } 352 353 /* At this point the root vnode should have a single reference */ 354 if (um_rootvp->v_usecount > 1) { 355 vput(um_rootvp); 356 return (EBUSY); 357 } 358 359 #ifdef DEBUG 360 vprint("union root", um_rootvp); 361 #endif 362 /* 363 * Discard references to upper and lower target vnodes. 364 */ 365 if (um->um_lowervp) 366 vrele(um->um_lowervp); 367 vrele(um->um_uppervp); 368 crfree(um->um_cred); 369 /* 370 * Release reference on underlying root vnode 371 */ 372 vput(um_rootvp); 373 /* 374 * And blow it away for future re-use 375 */ 376 vgone(um_rootvp); 377 /* 378 * Finally, throw away the union_mount structure 379 */ 380 free(mp->mnt_data, M_UNIONFSMNT); /* XXX */ 381 mp->mnt_data = 0; 382 return (0); 383 } 384 385 static int 386 union_root(mp, vpp) 387 struct mount *mp; 388 struct vnode **vpp; 389 { 390 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 391 int error; 392 393 /* 394 * Supply an unlocked reference to um_uppervp and to um_lowervp. It 395 * is possible for um_uppervp to be locked without the associated 396 * root union_node being locked. We let union_allocvp() deal with 397 * it. 398 */ 399 UDEBUG(("union_root UPPERVP %p locked = %d\n", um->um_uppervp, VOP_ISLOCKED(um->um_uppervp))); 400 401 VREF(um->um_uppervp); 402 if (um->um_lowervp) 403 VREF(um->um_lowervp); 404 405 error = union_allocvp(vpp, mp, NULLVP, NULLVP, NULL, 406 um->um_uppervp, um->um_lowervp, 1); 407 UDEBUG(("error %d\n", error)); 408 UDEBUG(("union_root2 UPPERVP %p locked = %d\n", um->um_uppervp, VOP_ISLOCKED(um->um_uppervp))); 409 410 return (error); 411 } 412 413 static int 414 union_statfs(mp, sbp, p) 415 struct mount *mp; 416 struct statfs *sbp; 417 struct proc *p; 418 { 419 int error; 420 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 421 struct statfs mstat; 422 int lbsize; 423 424 UDEBUG(("union_statfs(mp = %p, lvp = %p, uvp = %p)\n", 425 (void *)mp, (void *)um->um_lowervp, (void *)um->um_uppervp)); 426 427 bzero(&mstat, sizeof(mstat)); 428 429 if (um->um_lowervp) { 430 error = VFS_STATFS(um->um_lowervp->v_mount, &mstat, p); 431 if (error) 432 return (error); 433 } 434 435 /* now copy across the "interesting" information and fake the rest */ 436 #if 0 437 sbp->f_type = mstat.f_type; 438 sbp->f_flags = mstat.f_flags; 439 sbp->f_bsize = mstat.f_bsize; 440 sbp->f_iosize = mstat.f_iosize; 441 #endif 442 lbsize = mstat.f_bsize; 443 sbp->f_blocks = mstat.f_blocks; 444 sbp->f_bfree = mstat.f_bfree; 445 sbp->f_bavail = mstat.f_bavail; 446 sbp->f_files = mstat.f_files; 447 sbp->f_ffree = mstat.f_ffree; 448 449 error = VFS_STATFS(um->um_uppervp->v_mount, &mstat, p); 450 if (error) 451 return (error); 452 453 sbp->f_flags = mstat.f_flags; 454 sbp->f_bsize = mstat.f_bsize; 455 sbp->f_iosize = mstat.f_iosize; 456 457 /* 458 * if the lower and upper blocksizes differ, then frig the 459 * block counts so that the sizes reported by df make some 460 * kind of sense. none of this makes sense though. 461 */ 462 463 if (mstat.f_bsize != lbsize) 464 sbp->f_blocks = ((off_t) sbp->f_blocks * lbsize) / mstat.f_bsize; 465 466 /* 467 * The "total" fields count total resources in all layers, 468 * the "free" fields count only those resources which are 469 * free in the upper layer (since only the upper layer 470 * is writeable). 471 */ 472 sbp->f_blocks += mstat.f_blocks; 473 sbp->f_bfree = mstat.f_bfree; 474 sbp->f_bavail = mstat.f_bavail; 475 sbp->f_files += mstat.f_files; 476 sbp->f_ffree = mstat.f_ffree; 477 478 if (sbp != &mp->mnt_stat) { 479 sbp->f_type = mp->mnt_vfc->vfc_typenum; 480 bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); 481 bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); 482 bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); 483 } 484 return (0); 485 } 486 487 static struct vfsops union_vfsops = { 488 union_mount, 489 vfs_stdstart, /* underlying start already done */ 490 union_unmount, 491 union_root, 492 vfs_stdquotactl, 493 union_statfs, 494 vfs_stdsync, /* XXX assumes no cached data on union level */ 495 vfs_stdvget, 496 vfs_stdfhtovp, 497 vfs_stdcheckexp, 498 vfs_stdvptofh, 499 union_init, 500 }; 501 502 VFS_SET(union_vfsops, union, VFCF_LOOPBACK); 503