1 /* 2 * Copyright (c) 1994, 1995 The Regents of the University of California. 3 * Copyright (c) 1994, 1995 Jan-Simon Pendry. 4 * All rights reserved. 5 * 6 * This code is derived from software donated to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)union_vfsops.c 8.20 (Berkeley) 5/20/95 38 * $FreeBSD$ 39 */ 40 41 /* 42 * Union Layer 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/proc.h> 51 #include <sys/vnode.h> 52 #include <sys/mount.h> 53 #include <sys/namei.h> 54 #include <sys/malloc.h> 55 #include <sys/filedesc.h> 56 #include <fs/unionfs/union.h> 57 58 static MALLOC_DEFINE(M_UNIONFSMNT, "UNION mount", "UNION mount structure"); 59 60 extern int union_init(struct vfsconf *); 61 static int union_mount(struct mount *mp, char *path, caddr_t data, 62 struct nameidata *ndp, struct thread *td); 63 static int union_root(struct mount *mp, struct vnode **vpp); 64 static int union_statfs(struct mount *mp, struct statfs *sbp, 65 struct thread *td); 66 static int union_unmount(struct mount *mp, int mntflags, 67 struct thread *td); 68 69 /* 70 * Mount union filesystem 71 */ 72 static int 73 union_mount(mp, path, data, ndp, td) 74 struct mount *mp; 75 char *path; 76 caddr_t data; 77 struct nameidata *ndp; 78 struct thread *td; 79 { 80 int error = 0; 81 struct union_args args; 82 struct vnode *lowerrootvp = NULLVP; 83 struct vnode *upperrootvp = NULLVP; 84 struct union_mount *um = 0; 85 struct ucred *cred = 0; 86 char *cp = 0; 87 int len; 88 u_int size; 89 90 UDEBUG(("union_mount(mp = %p)\n", (void *)mp)); 91 92 /* 93 * Disable clustered write, otherwise system becomes unstable. 94 */ 95 mp->mnt_flag |= MNT_NOCLUSTERW; 96 97 /* 98 * Update is a no-op 99 */ 100 if (mp->mnt_flag & MNT_UPDATE) { 101 /* 102 * Need to provide. 103 * 1. a way to convert between rdonly and rdwr mounts. 104 * 2. support for nfs exports. 105 */ 106 error = EOPNOTSUPP; 107 goto bad; 108 } 109 110 /* 111 * Get argument 112 */ 113 error = copyin(data, (caddr_t)&args, sizeof(struct union_args)); 114 if (error) 115 goto bad; 116 117 /* 118 * Obtain lower vnode. Vnode is stored in mp->mnt_vnodecovered. 119 * We need to reference it but not lock it. 120 */ 121 122 lowerrootvp = mp->mnt_vnodecovered; 123 VREF(lowerrootvp); 124 125 #if 0 126 /* 127 * Unlock lower node to avoid deadlock. 128 */ 129 if (lowerrootvp->v_op == union_vnodeop_p) 130 VOP_UNLOCK(lowerrootvp, 0, td); 131 #endif 132 133 /* 134 * Obtain upper vnode by calling namei() on the path. The 135 * upperrootvp will be turned referenced but not locked. 136 */ 137 NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT, 138 UIO_USERSPACE, args.target, td); 139 140 error = namei(ndp); 141 142 #if 0 143 if (lowerrootvp->v_op == union_vnodeop_p) 144 vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY, td); 145 #endif 146 if (error) 147 goto bad; 148 149 NDFREE(ndp, NDF_ONLY_PNBUF); 150 upperrootvp = ndp->ni_vp; 151 vrele(ndp->ni_dvp); 152 ndp->ni_dvp = NULL; 153 154 UDEBUG(("mount_root UPPERVP %p locked = %d\n", upperrootvp, 155 VOP_ISLOCKED(upperrootvp, NULL))); 156 157 /* 158 * Check multi union mount to avoid `lock myself again' panic. 159 * Also require that it be a directory. 160 */ 161 if (upperrootvp == VTOUNION(lowerrootvp)->un_uppervp) { 162 #ifdef DIAGNOSTIC 163 printf("union_mount: multi union mount?\n"); 164 #endif 165 error = EDEADLK; 166 goto bad; 167 } 168 169 if (upperrootvp->v_type != VDIR) { 170 error = EINVAL; 171 goto bad; 172 } 173 174 /* 175 * Allocate our union_mount structure and populate the fields. 176 * The vnode references are stored in the union_mount as held, 177 * unlocked references. Depending on the _BELOW flag, the 178 * filesystems are viewed in a different order. In effect this 179 * is the same as providing a mount-under option to the mount 180 * syscall. 181 */ 182 183 um = (struct union_mount *) malloc(sizeof(struct union_mount), 184 M_UNIONFSMNT, M_WAITOK | M_ZERO); 185 186 um->um_op = args.mntflags & UNMNT_OPMASK; 187 188 switch (um->um_op) { 189 case UNMNT_ABOVE: 190 um->um_lowervp = lowerrootvp; 191 um->um_uppervp = upperrootvp; 192 upperrootvp = NULL; 193 lowerrootvp = NULL; 194 break; 195 196 case UNMNT_BELOW: 197 um->um_lowervp = upperrootvp; 198 um->um_uppervp = lowerrootvp; 199 upperrootvp = NULL; 200 lowerrootvp = NULL; 201 break; 202 203 case UNMNT_REPLACE: 204 vrele(lowerrootvp); 205 lowerrootvp = NULL; 206 um->um_uppervp = upperrootvp; 207 um->um_lowervp = lowerrootvp; 208 upperrootvp = NULL; 209 break; 210 211 default: 212 error = EINVAL; 213 goto bad; 214 } 215 216 /* 217 * Unless the mount is readonly, ensure that the top layer 218 * supports whiteout operations 219 */ 220 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 221 error = VOP_WHITEOUT(um->um_uppervp, NULL, LOOKUP); 222 if (error) 223 goto bad; 224 } 225 226 um->um_cred = crhold(td->td_ucred); 227 FILEDESC_LOCK(td->td_proc->p_fd); 228 um->um_cmode = UN_DIRMODE &~ td->td_proc->p_fd->fd_cmask; 229 FILEDESC_UNLOCK(td->td_proc->p_fd); 230 231 /* 232 * Depending on what you think the MNT_LOCAL flag might mean, 233 * you may want the && to be || on the conditional below. 234 * At the moment it has been defined that the filesystem is 235 * only local if it is all local, ie the MNT_LOCAL flag implies 236 * that the entire namespace is local. If you think the MNT_LOCAL 237 * flag implies that some of the files might be stored locally 238 * then you will want to change the conditional. 239 */ 240 if (um->um_op == UNMNT_ABOVE) { 241 if (((um->um_lowervp == NULLVP) || 242 (um->um_lowervp->v_mount->mnt_flag & MNT_LOCAL)) && 243 (um->um_uppervp->v_mount->mnt_flag & MNT_LOCAL)) 244 mp->mnt_flag |= MNT_LOCAL; 245 } 246 247 /* 248 * Copy in the upper layer's RDONLY flag. This is for the benefit 249 * of lookup() which explicitly checks the flag, rather than asking 250 * the filesystem for its own opinion. This means, that an update 251 * mount of the underlying filesystem to go from rdonly to rdwr 252 * will leave the unioned view as read-only. 253 */ 254 mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY); 255 256 mp->mnt_data = (qaddr_t) um; 257 vfs_getnewfsid(mp); 258 259 switch (um->um_op) { 260 case UNMNT_ABOVE: 261 cp = "<above>:"; 262 break; 263 case UNMNT_BELOW: 264 cp = "<below>:"; 265 break; 266 case UNMNT_REPLACE: 267 cp = ""; 268 break; 269 } 270 len = strlen(cp); 271 bcopy(cp, mp->mnt_stat.f_mntfromname, len); 272 273 cp = mp->mnt_stat.f_mntfromname + len; 274 len = MNAMELEN - len; 275 276 (void) copyinstr(args.target, cp, len - 1, &size); 277 bzero(cp + size, len - size); 278 279 (void)union_statfs(mp, &mp->mnt_stat, td); 280 281 UDEBUG(("union_mount: from %s, on %s\n", 282 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname)); 283 return (0); 284 285 bad: 286 if (um) { 287 if (um->um_uppervp) 288 vrele(um->um_uppervp); 289 if (um->um_lowervp) 290 vrele(um->um_lowervp); 291 /* XXX other fields */ 292 free(um, M_UNIONFSMNT); 293 } 294 if (cred) 295 crfree(cred); 296 if (upperrootvp) 297 vrele(upperrootvp); 298 if (lowerrootvp) 299 vrele(lowerrootvp); 300 return (error); 301 } 302 303 /* 304 * Free reference to union layer 305 */ 306 static int 307 union_unmount(mp, mntflags, td) 308 struct mount *mp; 309 int mntflags; 310 struct thread *td; 311 { 312 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 313 int error; 314 int freeing; 315 int flags = 0; 316 317 UDEBUG(("union_unmount(mp = %p)\n", (void *)mp)); 318 319 if (mntflags & MNT_FORCE) 320 flags |= FORCECLOSE; 321 322 /* 323 * Keep flushing vnodes from the mount list. 324 * This is needed because of the un_pvp held 325 * reference to the parent vnode. 326 * If more vnodes have been freed on a given pass, 327 * the try again. The loop will iterate at most 328 * (d) times, where (d) is the maximum tree depth 329 * in the filesystem. 330 */ 331 for (freeing = 0; (error = vflush(mp, 0, flags)) != 0;) { 332 struct vnode *vp; 333 int n; 334 335 /* count #vnodes held on mount list */ 336 mtx_lock(&mntvnode_mtx); 337 n = 0; 338 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) 339 n++; 340 mtx_unlock(&mntvnode_mtx); 341 342 /* if this is unchanged then stop */ 343 if (n == freeing) 344 break; 345 346 /* otherwise try once more time */ 347 freeing = n; 348 } 349 350 /* If the most recent vflush failed, the filesystem is still busy. */ 351 if (error) 352 return (error); 353 354 /* 355 * Discard references to upper and lower target vnodes. 356 */ 357 if (um->um_lowervp) 358 vrele(um->um_lowervp); 359 vrele(um->um_uppervp); 360 crfree(um->um_cred); 361 /* 362 * Finally, throw away the union_mount structure 363 */ 364 free(mp->mnt_data, M_UNIONFSMNT); /* XXX */ 365 mp->mnt_data = 0; 366 return (0); 367 } 368 369 static int 370 union_root(mp, vpp) 371 struct mount *mp; 372 struct vnode **vpp; 373 { 374 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 375 int error; 376 377 /* 378 * Supply an unlocked reference to um_uppervp and to um_lowervp. It 379 * is possible for um_uppervp to be locked without the associated 380 * root union_node being locked. We let union_allocvp() deal with 381 * it. 382 */ 383 UDEBUG(("union_root UPPERVP %p locked = %d\n", um->um_uppervp, 384 VOP_ISLOCKED(um->um_uppervp, NULL))); 385 386 VREF(um->um_uppervp); 387 if (um->um_lowervp) 388 VREF(um->um_lowervp); 389 390 error = union_allocvp(vpp, mp, NULLVP, NULLVP, NULL, 391 um->um_uppervp, um->um_lowervp, 1); 392 UDEBUG(("error %d\n", error)); 393 UDEBUG(("union_root2 UPPERVP %p locked = %d\n", um->um_uppervp, 394 VOP_ISLOCKED(um->um_uppervp, NULL))); 395 396 return (error); 397 } 398 399 static int 400 union_statfs(mp, sbp, td) 401 struct mount *mp; 402 struct statfs *sbp; 403 struct thread *td; 404 { 405 int error; 406 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 407 struct statfs mstat; 408 int lbsize; 409 410 UDEBUG(("union_statfs(mp = %p, lvp = %p, uvp = %p)\n", 411 (void *)mp, (void *)um->um_lowervp, (void *)um->um_uppervp)); 412 413 bzero(&mstat, sizeof(mstat)); 414 415 if (um->um_lowervp) { 416 error = VFS_STATFS(um->um_lowervp->v_mount, &mstat, td); 417 if (error) 418 return (error); 419 } 420 421 /* now copy across the "interesting" information and fake the rest */ 422 #if 0 423 sbp->f_type = mstat.f_type; 424 sbp->f_flags = mstat.f_flags; 425 sbp->f_bsize = mstat.f_bsize; 426 sbp->f_iosize = mstat.f_iosize; 427 #endif 428 lbsize = mstat.f_bsize; 429 sbp->f_blocks = mstat.f_blocks; 430 sbp->f_bfree = mstat.f_bfree; 431 sbp->f_bavail = mstat.f_bavail; 432 sbp->f_files = mstat.f_files; 433 sbp->f_ffree = mstat.f_ffree; 434 435 error = VFS_STATFS(um->um_uppervp->v_mount, &mstat, td); 436 if (error) 437 return (error); 438 439 sbp->f_flags = mstat.f_flags; 440 sbp->f_bsize = mstat.f_bsize; 441 sbp->f_iosize = mstat.f_iosize; 442 443 /* 444 * if the lower and upper blocksizes differ, then frig the 445 * block counts so that the sizes reported by df make some 446 * kind of sense. none of this makes sense though. 447 */ 448 449 if (mstat.f_bsize != lbsize) 450 sbp->f_blocks = ((off_t) sbp->f_blocks * lbsize) / mstat.f_bsize; 451 452 /* 453 * The "total" fields count total resources in all layers, 454 * the "free" fields count only those resources which are 455 * free in the upper layer (since only the upper layer 456 * is writeable). 457 */ 458 sbp->f_blocks += mstat.f_blocks; 459 sbp->f_bfree = mstat.f_bfree; 460 sbp->f_bavail = mstat.f_bavail; 461 sbp->f_files += mstat.f_files; 462 sbp->f_ffree = mstat.f_ffree; 463 464 if (sbp != &mp->mnt_stat) { 465 sbp->f_type = mp->mnt_vfc->vfc_typenum; 466 bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); 467 bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); 468 bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); 469 } 470 return (0); 471 } 472 473 static struct vfsops union_vfsops = { 474 union_mount, 475 vfs_stdstart, /* underlying start already done */ 476 union_unmount, 477 union_root, 478 vfs_stdquotactl, 479 union_statfs, 480 vfs_stdsync, /* XXX assumes no cached data on union level */ 481 vfs_stdvget, 482 vfs_stdfhtovp, 483 vfs_stdcheckexp, 484 vfs_stdvptofh, 485 union_init, 486 vfs_stduninit, 487 vfs_stdextattrctl, 488 }; 489 490 VFS_SET(union_vfsops, unionfs, VFCF_LOOPBACK); 491