1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1994, 1995 The Regents of the University of California. 5 * Copyright (c) 1994, 1995 Jan-Simon Pendry. 6 * Copyright (c) 2005, 2006, 2012 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc. 7 * Copyright (c) 2006, 2012 Daichi Goto <daichi@freebsd.org> 8 * All rights reserved. 9 * 10 * This code is derived from software donated to Berkeley by 11 * Jan-Simon Pendry. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kdb.h> 41 #include <sys/fcntl.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/namei.h> 47 #include <sys/proc.h> 48 #include <sys/vnode.h> 49 #include <sys/stat.h> 50 51 #include <fs/unionfs/union.h> 52 53 static MALLOC_DEFINE(M_UNIONFSMNT, "UNIONFS mount", "UNIONFS mount structure"); 54 55 static vfs_fhtovp_t unionfs_fhtovp; 56 static vfs_checkexp_t unionfs_checkexp; 57 static vfs_mount_t unionfs_domount; 58 static vfs_quotactl_t unionfs_quotactl; 59 static vfs_root_t unionfs_root; 60 static vfs_sync_t unionfs_sync; 61 static vfs_statfs_t unionfs_statfs; 62 static vfs_unmount_t unionfs_unmount; 63 static vfs_vget_t unionfs_vget; 64 static vfs_extattrctl_t unionfs_extattrctl; 65 66 static struct vfsops unionfs_vfsops; 67 68 /* 69 * Mount unionfs layer. 70 */ 71 static int 72 unionfs_domount(struct mount *mp) 73 { 74 struct vnode *lowerrootvp; 75 struct vnode *upperrootvp; 76 struct unionfs_mount *ump; 77 char *target; 78 char *tmp; 79 char *ep; 80 struct nameidata nd, *ndp; 81 struct vattr va; 82 unionfs_copymode copymode; 83 unionfs_whitemode whitemode; 84 int below; 85 int error; 86 int len; 87 uid_t uid; 88 gid_t gid; 89 u_short udir; 90 u_short ufile; 91 92 UNIONFSDEBUG("unionfs_mount(mp = %p)\n", mp); 93 94 error = 0; 95 below = 0; 96 uid = 0; 97 gid = 0; 98 udir = 0; 99 ufile = 0; 100 copymode = UNIONFS_TRANSPARENT; /* default */ 101 whitemode = UNIONFS_WHITE_ALWAYS; 102 ndp = &nd; 103 104 if (mp->mnt_flag & MNT_ROOTFS) { 105 vfs_mount_error(mp, "Cannot union mount root filesystem"); 106 return (EOPNOTSUPP); 107 } 108 109 /* 110 * Update is a no operation. 111 */ 112 if (mp->mnt_flag & MNT_UPDATE) { 113 vfs_mount_error(mp, "unionfs does not support mount update"); 114 return (EOPNOTSUPP); 115 } 116 117 /* 118 * Get argument 119 */ 120 error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); 121 if (error) 122 error = vfs_getopt(mp->mnt_optnew, "from", (void **)&target, 123 &len); 124 if (error || target[len - 1] != '\0') { 125 vfs_mount_error(mp, "Invalid target"); 126 return (EINVAL); 127 } 128 if (vfs_getopt(mp->mnt_optnew, "below", NULL, NULL) == 0) 129 below = 1; 130 if (vfs_getopt(mp->mnt_optnew, "udir", (void **)&tmp, NULL) == 0) { 131 if (tmp != NULL) 132 udir = (mode_t)strtol(tmp, &ep, 8); 133 if (tmp == NULL || *ep) { 134 vfs_mount_error(mp, "Invalid udir"); 135 return (EINVAL); 136 } 137 udir &= S_IRWXU | S_IRWXG | S_IRWXO; 138 } 139 if (vfs_getopt(mp->mnt_optnew, "ufile", (void **)&tmp, NULL) == 0) { 140 if (tmp != NULL) 141 ufile = (mode_t)strtol(tmp, &ep, 8); 142 if (tmp == NULL || *ep) { 143 vfs_mount_error(mp, "Invalid ufile"); 144 return (EINVAL); 145 } 146 ufile &= S_IRWXU | S_IRWXG | S_IRWXO; 147 } 148 /* check umask, uid and gid */ 149 if (udir == 0 && ufile != 0) 150 udir = ufile; 151 if (ufile == 0 && udir != 0) 152 ufile = udir; 153 154 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); 155 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); 156 if (!error) { 157 if (udir == 0) 158 udir = va.va_mode; 159 if (ufile == 0) 160 ufile = va.va_mode; 161 uid = va.va_uid; 162 gid = va.va_gid; 163 } 164 VOP_UNLOCK(mp->mnt_vnodecovered); 165 if (error) 166 return (error); 167 168 if (mp->mnt_cred->cr_ruid == 0) { /* root only */ 169 if (vfs_getopt(mp->mnt_optnew, "uid", (void **)&tmp, 170 NULL) == 0) { 171 if (tmp != NULL) 172 uid = (uid_t)strtol(tmp, &ep, 10); 173 if (tmp == NULL || *ep) { 174 vfs_mount_error(mp, "Invalid uid"); 175 return (EINVAL); 176 } 177 } 178 if (vfs_getopt(mp->mnt_optnew, "gid", (void **)&tmp, 179 NULL) == 0) { 180 if (tmp != NULL) 181 gid = (gid_t)strtol(tmp, &ep, 10); 182 if (tmp == NULL || *ep) { 183 vfs_mount_error(mp, "Invalid gid"); 184 return (EINVAL); 185 } 186 } 187 if (vfs_getopt(mp->mnt_optnew, "copymode", (void **)&tmp, 188 NULL) == 0) { 189 if (tmp == NULL) { 190 vfs_mount_error(mp, "Invalid copymode"); 191 return (EINVAL); 192 } else if (strcasecmp(tmp, "traditional") == 0) 193 copymode = UNIONFS_TRADITIONAL; 194 else if (strcasecmp(tmp, "transparent") == 0) 195 copymode = UNIONFS_TRANSPARENT; 196 else if (strcasecmp(tmp, "masquerade") == 0) 197 copymode = UNIONFS_MASQUERADE; 198 else { 199 vfs_mount_error(mp, "Invalid copymode"); 200 return (EINVAL); 201 } 202 } 203 if (vfs_getopt(mp->mnt_optnew, "whiteout", (void **)&tmp, 204 NULL) == 0) { 205 if (tmp == NULL) { 206 vfs_mount_error(mp, "Invalid whiteout mode"); 207 return (EINVAL); 208 } else if (strcasecmp(tmp, "always") == 0) 209 whitemode = UNIONFS_WHITE_ALWAYS; 210 else if (strcasecmp(tmp, "whenneeded") == 0) 211 whitemode = UNIONFS_WHITE_WHENNEEDED; 212 else { 213 vfs_mount_error(mp, "Invalid whiteout mode"); 214 return (EINVAL); 215 } 216 } 217 } 218 /* If copymode is UNIONFS_TRADITIONAL, uid/gid is mounted user. */ 219 if (copymode == UNIONFS_TRADITIONAL) { 220 uid = mp->mnt_cred->cr_ruid; 221 gid = mp->mnt_cred->cr_rgid; 222 } 223 224 UNIONFSDEBUG("unionfs_mount: uid=%d, gid=%d\n", uid, gid); 225 UNIONFSDEBUG("unionfs_mount: udir=0%03o, ufile=0%03o\n", udir, ufile); 226 UNIONFSDEBUG("unionfs_mount: copymode=%d\n", copymode); 227 228 /* 229 * Find upper node 230 */ 231 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, target); 232 if ((error = namei(ndp))) 233 return (error); 234 235 NDFREE_PNBUF(ndp); 236 237 /* get root vnodes */ 238 lowerrootvp = mp->mnt_vnodecovered; 239 upperrootvp = ndp->ni_vp; 240 KASSERT(lowerrootvp != NULL, ("%s: NULL lower root vp", __func__)); 241 KASSERT(upperrootvp != NULL, ("%s: NULL upper root vp", __func__)); 242 243 /* create unionfs_mount */ 244 ump = malloc(sizeof(struct unionfs_mount), M_UNIONFSMNT, 245 M_WAITOK | M_ZERO); 246 247 /* 248 * Save reference 249 */ 250 if (below) { 251 VOP_UNLOCK(upperrootvp); 252 vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY); 253 ump->um_lowervp = upperrootvp; 254 ump->um_uppervp = lowerrootvp; 255 } else { 256 ump->um_lowervp = lowerrootvp; 257 ump->um_uppervp = upperrootvp; 258 } 259 ump->um_rootvp = NULLVP; 260 ump->um_uid = uid; 261 ump->um_gid = gid; 262 ump->um_udir = udir; 263 ump->um_ufile = ufile; 264 ump->um_copymode = copymode; 265 ump->um_whitemode = whitemode; 266 267 mp->mnt_data = ump; 268 269 /* 270 * Copy upper layer's RDONLY flag. 271 */ 272 mp->mnt_flag |= ump->um_uppervp->v_mount->mnt_flag & MNT_RDONLY; 273 274 /* 275 * Unlock the node 276 */ 277 VOP_UNLOCK(ump->um_uppervp); 278 279 /* 280 * Get the unionfs root vnode. 281 */ 282 error = unionfs_nodeget(mp, ump->um_uppervp, ump->um_lowervp, 283 NULLVP, &(ump->um_rootvp), NULL); 284 if (error != 0) { 285 vrele(upperrootvp); 286 free(ump, M_UNIONFSMNT); 287 mp->mnt_data = NULL; 288 return (error); 289 } 290 KASSERT(ump->um_rootvp != NULL, ("rootvp cannot be NULL")); 291 KASSERT((ump->um_rootvp->v_vflag & VV_ROOT) != 0, 292 ("%s: rootvp without VV_ROOT", __func__)); 293 294 /* 295 * Do not release the namei() reference on upperrootvp until after 296 * we attempt to register the upper mounts. A concurrent unmount 297 * of the upper or lower FS may have caused unionfs_nodeget() to 298 * create a unionfs node with a NULL upper or lower vp and with 299 * no reference held on upperrootvp or lowerrootvp. 300 * vfs_register_upper() should subsequently fail, which is what 301 * we want, but we must ensure neither underlying vnode can be 302 * reused until that happens. We assume the caller holds a reference 303 * to lowerrootvp as it is the mount's covered vnode. 304 */ 305 ump->um_lowermp = vfs_register_upper_from_vp(ump->um_lowervp, mp, 306 &ump->um_lower_link); 307 ump->um_uppermp = vfs_register_upper_from_vp(ump->um_uppervp, mp, 308 &ump->um_upper_link); 309 310 vrele(upperrootvp); 311 312 if (ump->um_lowermp == NULL || ump->um_uppermp == NULL) { 313 if (ump->um_lowermp != NULL) 314 vfs_unregister_upper(ump->um_lowermp, &ump->um_lower_link); 315 if (ump->um_uppermp != NULL) 316 vfs_unregister_upper(ump->um_uppermp, &ump->um_upper_link); 317 vflush(mp, 1, FORCECLOSE, curthread); 318 free(ump, M_UNIONFSMNT); 319 mp->mnt_data = NULL; 320 return (ENOENT); 321 } 322 323 /* 324 * Specify that the covered vnode lock should remain held while 325 * lookup() performs the cross-mount walk. This prevents a lock-order 326 * reversal between the covered vnode lock (which is also locked by 327 * unionfs_lock()) and the mountpoint's busy count. Without this, 328 * unmount will lock the covered vnode lock (directly through the 329 * covered vnode) and wait for the busy count to drain, while a 330 * concurrent lookup will increment the busy count and then may lock 331 * the covered vnode lock (indirectly through unionfs_lock()). 332 * 333 * Note that this is only needed for the 'below' case in which the 334 * upper vnode is also the covered vnode, because unionfs_lock() 335 * only locks the upper vnode as long as both lower and upper vnodes 336 * are present (which they will always be for the unionfs mount root). 337 */ 338 if (below) { 339 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); 340 mp->mnt_vnodecovered->v_vflag |= VV_CROSSLOCK; 341 VOP_UNLOCK(mp->mnt_vnodecovered); 342 } 343 344 MNT_ILOCK(mp); 345 if ((ump->um_lowermp->mnt_flag & MNT_LOCAL) != 0 && 346 (ump->um_uppermp->mnt_flag & MNT_LOCAL) != 0) 347 mp->mnt_flag |= MNT_LOCAL; 348 mp->mnt_kern_flag |= MNTK_NOMSYNC | MNTK_UNIONFS | 349 (ump->um_uppermp->mnt_kern_flag & MNTK_SHARED_WRITES); 350 MNT_IUNLOCK(mp); 351 352 /* 353 * Get new fsid 354 */ 355 vfs_getnewfsid(mp); 356 357 snprintf(mp->mnt_stat.f_mntfromname, MNAMELEN, "<%s>:%s", 358 below ? "below" : "above", target); 359 360 UNIONFSDEBUG("unionfs_mount: from %s, on %s\n", 361 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); 362 363 return (0); 364 } 365 366 /* 367 * Free reference to unionfs layer 368 */ 369 static int 370 unionfs_unmount(struct mount *mp, int mntflags) 371 { 372 struct unionfs_mount *ump; 373 int error; 374 int num; 375 int freeing; 376 int flags; 377 378 UNIONFSDEBUG("unionfs_unmount: mp = %p\n", mp); 379 380 ump = MOUNTTOUNIONFSMOUNT(mp); 381 flags = 0; 382 383 if (mntflags & MNT_FORCE) 384 flags |= FORCECLOSE; 385 386 /* vflush (no need to call vrele) */ 387 for (freeing = 0; (error = vflush(mp, 1, flags, curthread)) != 0;) { 388 num = mp->mnt_nvnodelistsize; 389 if (num == freeing) 390 break; 391 freeing = num; 392 } 393 394 if (error) 395 return (error); 396 397 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); 398 mp->mnt_vnodecovered->v_vflag &= ~VV_CROSSLOCK; 399 VOP_UNLOCK(mp->mnt_vnodecovered); 400 vfs_unregister_upper(ump->um_lowermp, &ump->um_lower_link); 401 vfs_unregister_upper(ump->um_uppermp, &ump->um_upper_link); 402 free(ump, M_UNIONFSMNT); 403 mp->mnt_data = NULL; 404 405 return (0); 406 } 407 408 static int 409 unionfs_root(struct mount *mp, int flags, struct vnode **vpp) 410 { 411 struct unionfs_mount *ump; 412 struct vnode *vp; 413 414 ump = MOUNTTOUNIONFSMOUNT(mp); 415 vp = ump->um_rootvp; 416 417 UNIONFSDEBUG("unionfs_root: rootvp=%p locked=%x\n", 418 vp, VOP_ISLOCKED(vp)); 419 420 vref(vp); 421 if (flags & LK_TYPE_MASK) 422 vn_lock(vp, flags); 423 424 *vpp = vp; 425 426 return (0); 427 } 428 429 static int 430 unionfs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg, 431 bool *mp_busy) 432 { 433 struct mount *uppermp; 434 struct unionfs_mount *ump; 435 int error; 436 bool unbusy; 437 438 ump = MOUNTTOUNIONFSMOUNT(mp); 439 /* 440 * Issue a volatile load of um_uppermp here, as the mount may be 441 * torn down after we call vfs_unbusy(). 442 */ 443 uppermp = atomic_load_ptr(&ump->um_uppermp); 444 KASSERT(*mp_busy == true, ("upper mount not busy")); 445 /* 446 * See comment in sys_quotactl() for an explanation of why the 447 * lower mount needs to be busied by the caller of VFS_QUOTACTL() 448 * but may be unbusied by the implementation. We must unbusy 449 * the upper mount for the same reason; otherwise a namei lookup 450 * issued by the VFS_QUOTACTL() implementation could traverse the 451 * upper mount and deadlock. 452 */ 453 vfs_unbusy(mp); 454 *mp_busy = false; 455 unbusy = true; 456 error = vfs_busy(uppermp, 0); 457 /* 458 * Writing is always performed to upper vnode. 459 */ 460 if (error == 0) 461 error = VFS_QUOTACTL(uppermp, cmd, uid, arg, &unbusy); 462 if (unbusy) 463 vfs_unbusy(uppermp); 464 465 return (error); 466 } 467 468 static int 469 unionfs_statfs(struct mount *mp, struct statfs *sbp) 470 { 471 struct unionfs_mount *ump; 472 struct statfs *mstat; 473 uint64_t lbsize; 474 int error; 475 476 ump = MOUNTTOUNIONFSMOUNT(mp); 477 478 UNIONFSDEBUG("unionfs_statfs(mp = %p, lvp = %p, uvp = %p)\n", 479 mp, ump->um_lowervp, ump->um_uppervp); 480 481 mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO); 482 483 error = VFS_STATFS(ump->um_lowermp, mstat); 484 if (error) { 485 free(mstat, M_STATFS); 486 return (error); 487 } 488 489 /* now copy across the "interesting" information and fake the rest */ 490 sbp->f_blocks = mstat->f_blocks; 491 sbp->f_files = mstat->f_files; 492 493 lbsize = mstat->f_bsize; 494 495 error = VFS_STATFS(ump->um_uppermp, mstat); 496 if (error) { 497 free(mstat, M_STATFS); 498 return (error); 499 } 500 501 /* 502 * The FS type etc is copy from upper vfs. 503 * (write able vfs have priority) 504 */ 505 sbp->f_type = mstat->f_type; 506 sbp->f_flags = mstat->f_flags; 507 sbp->f_bsize = mstat->f_bsize; 508 sbp->f_iosize = mstat->f_iosize; 509 510 if (mstat->f_bsize != lbsize) 511 sbp->f_blocks = ((off_t)sbp->f_blocks * lbsize) / 512 mstat->f_bsize; 513 514 sbp->f_blocks += mstat->f_blocks; 515 sbp->f_bfree = mstat->f_bfree; 516 sbp->f_bavail = mstat->f_bavail; 517 sbp->f_files += mstat->f_files; 518 sbp->f_ffree = mstat->f_ffree; 519 520 free(mstat, M_STATFS); 521 return (0); 522 } 523 524 static int 525 unionfs_sync(struct mount *mp, int waitfor) 526 { 527 /* nothing to do */ 528 return (0); 529 } 530 531 static int 532 unionfs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) 533 { 534 return (EOPNOTSUPP); 535 } 536 537 static int 538 unionfs_fhtovp(struct mount *mp, struct fid *fidp, int flags, 539 struct vnode **vpp) 540 { 541 return (EOPNOTSUPP); 542 } 543 544 static int 545 unionfs_checkexp(struct mount *mp, struct sockaddr *nam, uint64_t *extflagsp, 546 struct ucred **credanonp, int *numsecflavors, int *secflavors) 547 { 548 return (EOPNOTSUPP); 549 } 550 551 static int 552 unionfs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 553 int namespace, const char *attrname) 554 { 555 struct unionfs_mount *ump; 556 struct unionfs_node *unp; 557 558 ump = MOUNTTOUNIONFSMOUNT(mp); 559 unp = VTOUNIONFS(filename_vp); 560 561 if (unp->un_uppervp != NULLVP) { 562 return (VFS_EXTATTRCTL(ump->um_uppermp, cmd, 563 unp->un_uppervp, namespace, attrname)); 564 } else { 565 return (VFS_EXTATTRCTL(ump->um_lowermp, cmd, 566 unp->un_lowervp, namespace, attrname)); 567 } 568 } 569 570 static struct vfsops unionfs_vfsops = { 571 .vfs_checkexp = unionfs_checkexp, 572 .vfs_extattrctl = unionfs_extattrctl, 573 .vfs_fhtovp = unionfs_fhtovp, 574 .vfs_init = unionfs_init, 575 .vfs_mount = unionfs_domount, 576 .vfs_quotactl = unionfs_quotactl, 577 .vfs_root = unionfs_root, 578 .vfs_statfs = unionfs_statfs, 579 .vfs_sync = unionfs_sync, 580 .vfs_uninit = unionfs_uninit, 581 .vfs_unmount = unionfs_unmount, 582 .vfs_vget = unionfs_vget, 583 }; 584 585 VFS_SET(unionfs_vfsops, unionfs, VFCF_LOOPBACK); 586