1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1994, 1995 The Regents of the University of California. 5 * Copyright (c) 1994, 1995 Jan-Simon Pendry. 6 * Copyright (c) 2005, 2006, 2012 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc. 7 * Copyright (c) 2006, 2012 Daichi Goto <daichi@freebsd.org> 8 * All rights reserved. 9 * 10 * This code is derived from software donated to Berkeley by 11 * Jan-Simon Pendry. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)union_vfsops.c 8.20 (Berkeley) 5/20/95 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kdb.h> 43 #include <sys/fcntl.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mount.h> 48 #include <sys/namei.h> 49 #include <sys/proc.h> 50 #include <sys/vnode.h> 51 #include <sys/stat.h> 52 53 #include <fs/unionfs/union.h> 54 55 static MALLOC_DEFINE(M_UNIONFSMNT, "UNIONFS mount", "UNIONFS mount structure"); 56 57 static vfs_fhtovp_t unionfs_fhtovp; 58 static vfs_checkexp_t unionfs_checkexp; 59 static vfs_mount_t unionfs_domount; 60 static vfs_quotactl_t unionfs_quotactl; 61 static vfs_root_t unionfs_root; 62 static vfs_sync_t unionfs_sync; 63 static vfs_statfs_t unionfs_statfs; 64 static vfs_unmount_t unionfs_unmount; 65 static vfs_vget_t unionfs_vget; 66 static vfs_extattrctl_t unionfs_extattrctl; 67 68 static struct vfsops unionfs_vfsops; 69 70 /* 71 * Mount unionfs layer. 72 */ 73 static int 74 unionfs_domount(struct mount *mp) 75 { 76 struct mount *lowermp, *uppermp; 77 struct vnode *lowerrootvp; 78 struct vnode *upperrootvp; 79 struct unionfs_mount *ump; 80 char *target; 81 char *tmp; 82 char *ep; 83 struct nameidata nd, *ndp; 84 struct vattr va; 85 unionfs_copymode copymode; 86 unionfs_whitemode whitemode; 87 int below; 88 int error; 89 int len; 90 uid_t uid; 91 gid_t gid; 92 u_short udir; 93 u_short ufile; 94 95 UNIONFSDEBUG("unionfs_mount(mp = %p)\n", mp); 96 97 error = 0; 98 below = 0; 99 uid = 0; 100 gid = 0; 101 udir = 0; 102 ufile = 0; 103 copymode = UNIONFS_TRANSPARENT; /* default */ 104 whitemode = UNIONFS_WHITE_ALWAYS; 105 ndp = &nd; 106 107 if (mp->mnt_flag & MNT_ROOTFS) { 108 vfs_mount_error(mp, "Cannot union mount root filesystem"); 109 return (EOPNOTSUPP); 110 } 111 112 /* 113 * Update is a no operation. 114 */ 115 if (mp->mnt_flag & MNT_UPDATE) { 116 vfs_mount_error(mp, "unionfs does not support mount update"); 117 return (EOPNOTSUPP); 118 } 119 120 /* 121 * Get argument 122 */ 123 error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); 124 if (error) 125 error = vfs_getopt(mp->mnt_optnew, "from", (void **)&target, 126 &len); 127 if (error || target[len - 1] != '\0') { 128 vfs_mount_error(mp, "Invalid target"); 129 return (EINVAL); 130 } 131 if (vfs_getopt(mp->mnt_optnew, "below", NULL, NULL) == 0) 132 below = 1; 133 if (vfs_getopt(mp->mnt_optnew, "udir", (void **)&tmp, NULL) == 0) { 134 if (tmp != NULL) 135 udir = (mode_t)strtol(tmp, &ep, 8); 136 if (tmp == NULL || *ep) { 137 vfs_mount_error(mp, "Invalid udir"); 138 return (EINVAL); 139 } 140 udir &= S_IRWXU | S_IRWXG | S_IRWXO; 141 } 142 if (vfs_getopt(mp->mnt_optnew, "ufile", (void **)&tmp, NULL) == 0) { 143 if (tmp != NULL) 144 ufile = (mode_t)strtol(tmp, &ep, 8); 145 if (tmp == NULL || *ep) { 146 vfs_mount_error(mp, "Invalid ufile"); 147 return (EINVAL); 148 } 149 ufile &= S_IRWXU | S_IRWXG | S_IRWXO; 150 } 151 /* check umask, uid and gid */ 152 if (udir == 0 && ufile != 0) 153 udir = ufile; 154 if (ufile == 0 && udir != 0) 155 ufile = udir; 156 157 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); 158 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); 159 if (!error) { 160 if (udir == 0) 161 udir = va.va_mode; 162 if (ufile == 0) 163 ufile = va.va_mode; 164 uid = va.va_uid; 165 gid = va.va_gid; 166 } 167 VOP_UNLOCK(mp->mnt_vnodecovered); 168 if (error) 169 return (error); 170 171 if (mp->mnt_cred->cr_ruid == 0) { /* root only */ 172 if (vfs_getopt(mp->mnt_optnew, "uid", (void **)&tmp, 173 NULL) == 0) { 174 if (tmp != NULL) 175 uid = (uid_t)strtol(tmp, &ep, 10); 176 if (tmp == NULL || *ep) { 177 vfs_mount_error(mp, "Invalid uid"); 178 return (EINVAL); 179 } 180 } 181 if (vfs_getopt(mp->mnt_optnew, "gid", (void **)&tmp, 182 NULL) == 0) { 183 if (tmp != NULL) 184 gid = (gid_t)strtol(tmp, &ep, 10); 185 if (tmp == NULL || *ep) { 186 vfs_mount_error(mp, "Invalid gid"); 187 return (EINVAL); 188 } 189 } 190 if (vfs_getopt(mp->mnt_optnew, "copymode", (void **)&tmp, 191 NULL) == 0) { 192 if (tmp == NULL) { 193 vfs_mount_error(mp, "Invalid copymode"); 194 return (EINVAL); 195 } else if (strcasecmp(tmp, "traditional") == 0) 196 copymode = UNIONFS_TRADITIONAL; 197 else if (strcasecmp(tmp, "transparent") == 0) 198 copymode = UNIONFS_TRANSPARENT; 199 else if (strcasecmp(tmp, "masquerade") == 0) 200 copymode = UNIONFS_MASQUERADE; 201 else { 202 vfs_mount_error(mp, "Invalid copymode"); 203 return (EINVAL); 204 } 205 } 206 if (vfs_getopt(mp->mnt_optnew, "whiteout", (void **)&tmp, 207 NULL) == 0) { 208 if (tmp == NULL) { 209 vfs_mount_error(mp, "Invalid whiteout mode"); 210 return (EINVAL); 211 } else if (strcasecmp(tmp, "always") == 0) 212 whitemode = UNIONFS_WHITE_ALWAYS; 213 else if (strcasecmp(tmp, "whenneeded") == 0) 214 whitemode = UNIONFS_WHITE_WHENNEEDED; 215 else { 216 vfs_mount_error(mp, "Invalid whiteout mode"); 217 return (EINVAL); 218 } 219 } 220 } 221 /* If copymode is UNIONFS_TRADITIONAL, uid/gid is mounted user. */ 222 if (copymode == UNIONFS_TRADITIONAL) { 223 uid = mp->mnt_cred->cr_ruid; 224 gid = mp->mnt_cred->cr_rgid; 225 } 226 227 UNIONFSDEBUG("unionfs_mount: uid=%d, gid=%d\n", uid, gid); 228 UNIONFSDEBUG("unionfs_mount: udir=0%03o, ufile=0%03o\n", udir, ufile); 229 UNIONFSDEBUG("unionfs_mount: copymode=%d\n", copymode); 230 231 /* 232 * Find upper node 233 */ 234 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, target); 235 if ((error = namei(ndp))) 236 return (error); 237 238 NDFREE_PNBUF(ndp); 239 240 /* get root vnodes */ 241 lowerrootvp = mp->mnt_vnodecovered; 242 upperrootvp = ndp->ni_vp; 243 KASSERT(lowerrootvp != NULL, ("%s: NULL lower root vp", __func__)); 244 KASSERT(upperrootvp != NULL, ("%s: NULL upper root vp", __func__)); 245 246 /* create unionfs_mount */ 247 ump = malloc(sizeof(struct unionfs_mount), M_UNIONFSMNT, 248 M_WAITOK | M_ZERO); 249 250 /* 251 * Save reference 252 */ 253 if (below) { 254 VOP_UNLOCK(upperrootvp); 255 vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY); 256 ump->um_lowervp = upperrootvp; 257 ump->um_uppervp = lowerrootvp; 258 } else { 259 ump->um_lowervp = lowerrootvp; 260 ump->um_uppervp = upperrootvp; 261 } 262 ump->um_rootvp = NULLVP; 263 ump->um_uid = uid; 264 ump->um_gid = gid; 265 ump->um_udir = udir; 266 ump->um_ufile = ufile; 267 ump->um_copymode = copymode; 268 ump->um_whitemode = whitemode; 269 270 mp->mnt_data = ump; 271 272 /* 273 * Copy upper layer's RDONLY flag. 274 */ 275 mp->mnt_flag |= ump->um_uppervp->v_mount->mnt_flag & MNT_RDONLY; 276 277 /* 278 * Unlock the node 279 */ 280 VOP_UNLOCK(ump->um_uppervp); 281 282 /* 283 * Get the unionfs root vnode. 284 */ 285 error = unionfs_nodeget(mp, ump->um_uppervp, ump->um_lowervp, 286 NULLVP, &(ump->um_rootvp), NULL); 287 if (error != 0) { 288 vrele(upperrootvp); 289 free(ump, M_UNIONFSMNT); 290 mp->mnt_data = NULL; 291 return (error); 292 } 293 KASSERT(ump->um_rootvp != NULL, ("rootvp cannot be NULL")); 294 KASSERT((ump->um_rootvp->v_vflag & VV_ROOT) != 0, 295 ("%s: rootvp without VV_ROOT", __func__)); 296 297 /* 298 * Do not release the namei() reference on upperrootvp until after 299 * we attempt to register the upper mounts. A concurrent unmount 300 * of the upper or lower FS may have caused unionfs_nodeget() to 301 * create a unionfs node with a NULL upper or lower vp and with 302 * no reference held on upperrootvp or lowerrootvp. 303 * vfs_register_upper() should subsequently fail, which is what 304 * we want, but we must ensure neither underlying vnode can be 305 * reused until that happens. We assume the caller holds a reference 306 * to lowerrootvp as it is the mount's covered vnode. 307 */ 308 lowermp = vfs_register_upper_from_vp(ump->um_lowervp, mp, 309 &ump->um_lower_link); 310 uppermp = vfs_register_upper_from_vp(ump->um_uppervp, mp, 311 &ump->um_upper_link); 312 313 vrele(upperrootvp); 314 315 if (lowermp == NULL || uppermp == NULL) { 316 if (lowermp != NULL) 317 vfs_unregister_upper(lowermp, &ump->um_lower_link); 318 if (uppermp != NULL) 319 vfs_unregister_upper(uppermp, &ump->um_upper_link); 320 vflush(mp, 1, FORCECLOSE, curthread); 321 free(ump, M_UNIONFSMNT); 322 mp->mnt_data = NULL; 323 return (ENOENT); 324 } 325 326 /* 327 * Specify that the covered vnode lock should remain held while 328 * lookup() performs the cross-mount walk. This prevents a lock-order 329 * reversal between the covered vnode lock (which is also locked by 330 * unionfs_lock()) and the mountpoint's busy count. Without this, 331 * unmount will lock the covered vnode lock (directly through the 332 * covered vnode) and wait for the busy count to drain, while a 333 * concurrent lookup will increment the busy count and then lock 334 * the covered vnode lock (indirectly through unionfs_lock()). 335 * 336 * Note that we can't yet use this facility for the 'below' case 337 * in which the upper vnode is the covered vnode, because that would 338 * introduce a different LOR in which the cross-mount lookup would 339 * effectively hold the upper vnode lock before acquiring the lower 340 * vnode lock, while an unrelated lock operation would still acquire 341 * the lower vnode lock before the upper vnode lock, which is the 342 * order unionfs currently requires. 343 */ 344 if (!below) { 345 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); 346 mp->mnt_vnodecovered->v_vflag |= VV_CROSSLOCK; 347 VOP_UNLOCK(mp->mnt_vnodecovered); 348 } 349 350 MNT_ILOCK(mp); 351 if ((lowermp->mnt_flag & MNT_LOCAL) != 0 && 352 (uppermp->mnt_flag & MNT_LOCAL) != 0) 353 mp->mnt_flag |= MNT_LOCAL; 354 mp->mnt_kern_flag |= MNTK_NOMSYNC | MNTK_UNIONFS; 355 MNT_IUNLOCK(mp); 356 357 /* 358 * Get new fsid 359 */ 360 vfs_getnewfsid(mp); 361 362 snprintf(mp->mnt_stat.f_mntfromname, MNAMELEN, "<%s>:%s", 363 below ? "below" : "above", target); 364 365 UNIONFSDEBUG("unionfs_mount: from %s, on %s\n", 366 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); 367 368 return (0); 369 } 370 371 /* 372 * Free reference to unionfs layer 373 */ 374 static int 375 unionfs_unmount(struct mount *mp, int mntflags) 376 { 377 struct unionfs_mount *ump; 378 int error; 379 int num; 380 int freeing; 381 int flags; 382 383 UNIONFSDEBUG("unionfs_unmount: mp = %p\n", mp); 384 385 ump = MOUNTTOUNIONFSMOUNT(mp); 386 flags = 0; 387 388 if (mntflags & MNT_FORCE) 389 flags |= FORCECLOSE; 390 391 /* vflush (no need to call vrele) */ 392 for (freeing = 0; (error = vflush(mp, 1, flags, curthread)) != 0;) { 393 num = mp->mnt_nvnodelistsize; 394 if (num == freeing) 395 break; 396 freeing = num; 397 } 398 399 if (error) 400 return (error); 401 402 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); 403 mp->mnt_vnodecovered->v_vflag &= ~VV_CROSSLOCK; 404 VOP_UNLOCK(mp->mnt_vnodecovered); 405 vfs_unregister_upper(ump->um_lowervp->v_mount, &ump->um_lower_link); 406 vfs_unregister_upper(ump->um_uppervp->v_mount, &ump->um_upper_link); 407 free(ump, M_UNIONFSMNT); 408 mp->mnt_data = NULL; 409 410 return (0); 411 } 412 413 static int 414 unionfs_root(struct mount *mp, int flags, struct vnode **vpp) 415 { 416 struct unionfs_mount *ump; 417 struct vnode *vp; 418 419 ump = MOUNTTOUNIONFSMOUNT(mp); 420 vp = ump->um_rootvp; 421 422 UNIONFSDEBUG("unionfs_root: rootvp=%p locked=%x\n", 423 vp, VOP_ISLOCKED(vp)); 424 425 vref(vp); 426 if (flags & LK_TYPE_MASK) 427 vn_lock(vp, flags); 428 429 *vpp = vp; 430 431 return (0); 432 } 433 434 static int 435 unionfs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg, 436 bool *mp_busy) 437 { 438 struct mount *uppermp; 439 struct unionfs_mount *ump; 440 int error; 441 bool unbusy; 442 443 ump = MOUNTTOUNIONFSMOUNT(mp); 444 uppermp = atomic_load_ptr(&ump->um_uppervp->v_mount); 445 KASSERT(*mp_busy == true, ("upper mount not busy")); 446 /* 447 * See comment in sys_quotactl() for an explanation of why the 448 * lower mount needs to be busied by the caller of VFS_QUOTACTL() 449 * but may be unbusied by the implementation. We must unbusy 450 * the upper mount for the same reason; otherwise a namei lookup 451 * issued by the VFS_QUOTACTL() implementation could traverse the 452 * upper mount and deadlock. 453 */ 454 vfs_unbusy(mp); 455 *mp_busy = false; 456 unbusy = true; 457 error = vfs_busy(uppermp, 0); 458 /* 459 * Writing is always performed to upper vnode. 460 */ 461 if (error == 0) 462 error = VFS_QUOTACTL(uppermp, cmd, uid, arg, &unbusy); 463 if (unbusy) 464 vfs_unbusy(uppermp); 465 466 return (error); 467 } 468 469 static int 470 unionfs_statfs(struct mount *mp, struct statfs *sbp) 471 { 472 struct unionfs_mount *ump; 473 struct statfs *mstat; 474 uint64_t lbsize; 475 int error; 476 477 ump = MOUNTTOUNIONFSMOUNT(mp); 478 479 UNIONFSDEBUG("unionfs_statfs(mp = %p, lvp = %p, uvp = %p)\n", 480 mp, ump->um_lowervp, ump->um_uppervp); 481 482 mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO); 483 484 error = VFS_STATFS(ump->um_lowervp->v_mount, mstat); 485 if (error) { 486 free(mstat, M_STATFS); 487 return (error); 488 } 489 490 /* now copy across the "interesting" information and fake the rest */ 491 sbp->f_blocks = mstat->f_blocks; 492 sbp->f_files = mstat->f_files; 493 494 lbsize = mstat->f_bsize; 495 496 error = VFS_STATFS(ump->um_uppervp->v_mount, mstat); 497 if (error) { 498 free(mstat, M_STATFS); 499 return (error); 500 } 501 502 /* 503 * The FS type etc is copy from upper vfs. 504 * (write able vfs have priority) 505 */ 506 sbp->f_type = mstat->f_type; 507 sbp->f_flags = mstat->f_flags; 508 sbp->f_bsize = mstat->f_bsize; 509 sbp->f_iosize = mstat->f_iosize; 510 511 if (mstat->f_bsize != lbsize) 512 sbp->f_blocks = ((off_t)sbp->f_blocks * lbsize) / 513 mstat->f_bsize; 514 515 sbp->f_blocks += mstat->f_blocks; 516 sbp->f_bfree = mstat->f_bfree; 517 sbp->f_bavail = mstat->f_bavail; 518 sbp->f_files += mstat->f_files; 519 sbp->f_ffree = mstat->f_ffree; 520 521 free(mstat, M_STATFS); 522 return (0); 523 } 524 525 static int 526 unionfs_sync(struct mount *mp, int waitfor) 527 { 528 /* nothing to do */ 529 return (0); 530 } 531 532 static int 533 unionfs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) 534 { 535 return (EOPNOTSUPP); 536 } 537 538 static int 539 unionfs_fhtovp(struct mount *mp, struct fid *fidp, int flags, 540 struct vnode **vpp) 541 { 542 return (EOPNOTSUPP); 543 } 544 545 static int 546 unionfs_checkexp(struct mount *mp, struct sockaddr *nam, uint64_t *extflagsp, 547 struct ucred **credanonp, int *numsecflavors, int *secflavors) 548 { 549 return (EOPNOTSUPP); 550 } 551 552 static int 553 unionfs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 554 int namespace, const char *attrname) 555 { 556 struct unionfs_mount *ump; 557 struct unionfs_node *unp; 558 559 ump = MOUNTTOUNIONFSMOUNT(mp); 560 unp = VTOUNIONFS(filename_vp); 561 562 if (unp->un_uppervp != NULLVP) { 563 return (VFS_EXTATTRCTL(ump->um_uppervp->v_mount, cmd, 564 unp->un_uppervp, namespace, attrname)); 565 } else { 566 return (VFS_EXTATTRCTL(ump->um_lowervp->v_mount, cmd, 567 unp->un_lowervp, namespace, attrname)); 568 } 569 } 570 571 static struct vfsops unionfs_vfsops = { 572 .vfs_checkexp = unionfs_checkexp, 573 .vfs_extattrctl = unionfs_extattrctl, 574 .vfs_fhtovp = unionfs_fhtovp, 575 .vfs_init = unionfs_init, 576 .vfs_mount = unionfs_domount, 577 .vfs_quotactl = unionfs_quotactl, 578 .vfs_root = unionfs_root, 579 .vfs_statfs = unionfs_statfs, 580 .vfs_sync = unionfs_sync, 581 .vfs_uninit = unionfs_uninit, 582 .vfs_unmount = unionfs_unmount, 583 .vfs_vget = unionfs_vget, 584 }; 585 586 VFS_SET(unionfs_vfsops, unionfs, VFCF_LOOPBACK); 587