1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1994, 1995 The Regents of the University of California. 5 * Copyright (c) 1994, 1995 Jan-Simon Pendry. 6 * Copyright (c) 2005, 2006, 2012 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc. 7 * Copyright (c) 2006, 2012 Daichi Goto <daichi@freebsd.org> 8 * All rights reserved. 9 * 10 * This code is derived from software donated to Berkeley by 11 * Jan-Simon Pendry. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)union_vfsops.c 8.20 (Berkeley) 5/20/95 38 * $FreeBSD$ 39 */ 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kdb.h> 44 #include <sys/fcntl.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/namei.h> 50 #include <sys/proc.h> 51 #include <sys/vnode.h> 52 #include <sys/stat.h> 53 54 #include <fs/unionfs/union.h> 55 56 static MALLOC_DEFINE(M_UNIONFSMNT, "UNIONFS mount", "UNIONFS mount structure"); 57 58 static vfs_fhtovp_t unionfs_fhtovp; 59 static vfs_checkexp_t unionfs_checkexp; 60 static vfs_mount_t unionfs_domount; 61 static vfs_quotactl_t unionfs_quotactl; 62 static vfs_root_t unionfs_root; 63 static vfs_sync_t unionfs_sync; 64 static vfs_statfs_t unionfs_statfs; 65 static vfs_unmount_t unionfs_unmount; 66 static vfs_vget_t unionfs_vget; 67 static vfs_extattrctl_t unionfs_extattrctl; 68 69 static struct vfsops unionfs_vfsops; 70 71 /* 72 * Mount unionfs layer. 73 */ 74 static int 75 unionfs_domount(struct mount *mp) 76 { 77 struct mount *lowermp, *uppermp; 78 struct vnode *lowerrootvp; 79 struct vnode *upperrootvp; 80 struct unionfs_mount *ump; 81 char *target; 82 char *tmp; 83 char *ep; 84 struct nameidata nd, *ndp; 85 struct vattr va; 86 unionfs_copymode copymode; 87 unionfs_whitemode whitemode; 88 int below; 89 int error; 90 int len; 91 uid_t uid; 92 gid_t gid; 93 u_short udir; 94 u_short ufile; 95 96 UNIONFSDEBUG("unionfs_mount(mp = %p)\n", mp); 97 98 error = 0; 99 below = 0; 100 uid = 0; 101 gid = 0; 102 udir = 0; 103 ufile = 0; 104 copymode = UNIONFS_TRANSPARENT; /* default */ 105 whitemode = UNIONFS_WHITE_ALWAYS; 106 ndp = &nd; 107 108 if (mp->mnt_flag & MNT_ROOTFS) { 109 vfs_mount_error(mp, "Cannot union mount root filesystem"); 110 return (EOPNOTSUPP); 111 } 112 113 /* 114 * Update is a no operation. 115 */ 116 if (mp->mnt_flag & MNT_UPDATE) { 117 vfs_mount_error(mp, "unionfs does not support mount update"); 118 return (EOPNOTSUPP); 119 } 120 121 /* 122 * Get argument 123 */ 124 error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); 125 if (error) 126 error = vfs_getopt(mp->mnt_optnew, "from", (void **)&target, 127 &len); 128 if (error || target[len - 1] != '\0') { 129 vfs_mount_error(mp, "Invalid target"); 130 return (EINVAL); 131 } 132 if (vfs_getopt(mp->mnt_optnew, "below", NULL, NULL) == 0) 133 below = 1; 134 if (vfs_getopt(mp->mnt_optnew, "udir", (void **)&tmp, NULL) == 0) { 135 if (tmp != NULL) 136 udir = (mode_t)strtol(tmp, &ep, 8); 137 if (tmp == NULL || *ep) { 138 vfs_mount_error(mp, "Invalid udir"); 139 return (EINVAL); 140 } 141 udir &= S_IRWXU | S_IRWXG | S_IRWXO; 142 } 143 if (vfs_getopt(mp->mnt_optnew, "ufile", (void **)&tmp, NULL) == 0) { 144 if (tmp != NULL) 145 ufile = (mode_t)strtol(tmp, &ep, 8); 146 if (tmp == NULL || *ep) { 147 vfs_mount_error(mp, "Invalid ufile"); 148 return (EINVAL); 149 } 150 ufile &= S_IRWXU | S_IRWXG | S_IRWXO; 151 } 152 /* check umask, uid and gid */ 153 if (udir == 0 && ufile != 0) 154 udir = ufile; 155 if (ufile == 0 && udir != 0) 156 ufile = udir; 157 158 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); 159 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); 160 if (!error) { 161 if (udir == 0) 162 udir = va.va_mode; 163 if (ufile == 0) 164 ufile = va.va_mode; 165 uid = va.va_uid; 166 gid = va.va_gid; 167 } 168 VOP_UNLOCK(mp->mnt_vnodecovered); 169 if (error) 170 return (error); 171 172 if (mp->mnt_cred->cr_ruid == 0) { /* root only */ 173 if (vfs_getopt(mp->mnt_optnew, "uid", (void **)&tmp, 174 NULL) == 0) { 175 if (tmp != NULL) 176 uid = (uid_t)strtol(tmp, &ep, 10); 177 if (tmp == NULL || *ep) { 178 vfs_mount_error(mp, "Invalid uid"); 179 return (EINVAL); 180 } 181 } 182 if (vfs_getopt(mp->mnt_optnew, "gid", (void **)&tmp, 183 NULL) == 0) { 184 if (tmp != NULL) 185 gid = (gid_t)strtol(tmp, &ep, 10); 186 if (tmp == NULL || *ep) { 187 vfs_mount_error(mp, "Invalid gid"); 188 return (EINVAL); 189 } 190 } 191 if (vfs_getopt(mp->mnt_optnew, "copymode", (void **)&tmp, 192 NULL) == 0) { 193 if (tmp == NULL) { 194 vfs_mount_error(mp, "Invalid copymode"); 195 return (EINVAL); 196 } else if (strcasecmp(tmp, "traditional") == 0) 197 copymode = UNIONFS_TRADITIONAL; 198 else if (strcasecmp(tmp, "transparent") == 0) 199 copymode = UNIONFS_TRANSPARENT; 200 else if (strcasecmp(tmp, "masquerade") == 0) 201 copymode = UNIONFS_MASQUERADE; 202 else { 203 vfs_mount_error(mp, "Invalid copymode"); 204 return (EINVAL); 205 } 206 } 207 if (vfs_getopt(mp->mnt_optnew, "whiteout", (void **)&tmp, 208 NULL) == 0) { 209 if (tmp == NULL) { 210 vfs_mount_error(mp, "Invalid whiteout mode"); 211 return (EINVAL); 212 } else if (strcasecmp(tmp, "always") == 0) 213 whitemode = UNIONFS_WHITE_ALWAYS; 214 else if (strcasecmp(tmp, "whenneeded") == 0) 215 whitemode = UNIONFS_WHITE_WHENNEEDED; 216 else { 217 vfs_mount_error(mp, "Invalid whiteout mode"); 218 return (EINVAL); 219 } 220 } 221 } 222 /* If copymode is UNIONFS_TRADITIONAL, uid/gid is mounted user. */ 223 if (copymode == UNIONFS_TRADITIONAL) { 224 uid = mp->mnt_cred->cr_ruid; 225 gid = mp->mnt_cred->cr_rgid; 226 } 227 228 UNIONFSDEBUG("unionfs_mount: uid=%d, gid=%d\n", uid, gid); 229 UNIONFSDEBUG("unionfs_mount: udir=0%03o, ufile=0%03o\n", udir, ufile); 230 UNIONFSDEBUG("unionfs_mount: copymode=%d\n", copymode); 231 232 /* 233 * Find upper node 234 */ 235 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, target); 236 if ((error = namei(ndp))) 237 return (error); 238 239 NDFREE_PNBUF(ndp); 240 241 /* get root vnodes */ 242 lowerrootvp = mp->mnt_vnodecovered; 243 upperrootvp = ndp->ni_vp; 244 KASSERT(lowerrootvp != NULL, ("%s: NULL lower root vp", __func__)); 245 KASSERT(upperrootvp != NULL, ("%s: NULL upper root vp", __func__)); 246 247 /* create unionfs_mount */ 248 ump = malloc(sizeof(struct unionfs_mount), M_UNIONFSMNT, 249 M_WAITOK | M_ZERO); 250 251 /* 252 * Save reference 253 */ 254 if (below) { 255 VOP_UNLOCK(upperrootvp); 256 vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY); 257 ump->um_lowervp = upperrootvp; 258 ump->um_uppervp = lowerrootvp; 259 } else { 260 ump->um_lowervp = lowerrootvp; 261 ump->um_uppervp = upperrootvp; 262 } 263 ump->um_rootvp = NULLVP; 264 ump->um_uid = uid; 265 ump->um_gid = gid; 266 ump->um_udir = udir; 267 ump->um_ufile = ufile; 268 ump->um_copymode = copymode; 269 ump->um_whitemode = whitemode; 270 271 mp->mnt_data = ump; 272 273 /* 274 * Copy upper layer's RDONLY flag. 275 */ 276 mp->mnt_flag |= ump->um_uppervp->v_mount->mnt_flag & MNT_RDONLY; 277 278 /* 279 * Unlock the node 280 */ 281 VOP_UNLOCK(ump->um_uppervp); 282 283 /* 284 * Get the unionfs root vnode. 285 */ 286 error = unionfs_nodeget(mp, ump->um_uppervp, ump->um_lowervp, 287 NULLVP, &(ump->um_rootvp), NULL); 288 if (error != 0) { 289 vrele(upperrootvp); 290 free(ump, M_UNIONFSMNT); 291 mp->mnt_data = NULL; 292 return (error); 293 } 294 KASSERT(ump->um_rootvp != NULL, ("rootvp cannot be NULL")); 295 KASSERT((ump->um_rootvp->v_vflag & VV_ROOT) != 0, 296 ("%s: rootvp without VV_ROOT", __func__)); 297 298 /* 299 * Do not release the namei() reference on upperrootvp until after 300 * we attempt to register the upper mounts. A concurrent unmount 301 * of the upper or lower FS may have caused unionfs_nodeget() to 302 * create a unionfs node with a NULL upper or lower vp and with 303 * no reference held on upperrootvp or lowerrootvp. 304 * vfs_register_upper() should subsequently fail, which is what 305 * we want, but we must ensure neither underlying vnode can be 306 * reused until that happens. We assume the caller holds a reference 307 * to lowerrootvp as it is the mount's covered vnode. 308 */ 309 lowermp = vfs_register_upper_from_vp(ump->um_lowervp, mp, 310 &ump->um_lower_link); 311 uppermp = vfs_register_upper_from_vp(ump->um_uppervp, mp, 312 &ump->um_upper_link); 313 314 vrele(upperrootvp); 315 316 if (lowermp == NULL || uppermp == NULL) { 317 if (lowermp != NULL) 318 vfs_unregister_upper(lowermp, &ump->um_lower_link); 319 if (uppermp != NULL) 320 vfs_unregister_upper(uppermp, &ump->um_upper_link); 321 vflush(mp, 1, FORCECLOSE, curthread); 322 free(ump, M_UNIONFSMNT); 323 mp->mnt_data = NULL; 324 return (ENOENT); 325 } 326 327 /* 328 * Specify that the covered vnode lock should remain held while 329 * lookup() performs the cross-mount walk. This prevents a lock-order 330 * reversal between the covered vnode lock (which is also locked by 331 * unionfs_lock()) and the mountpoint's busy count. Without this, 332 * unmount will lock the covered vnode lock (directly through the 333 * covered vnode) and wait for the busy count to drain, while a 334 * concurrent lookup will increment the busy count and then lock 335 * the covered vnode lock (indirectly through unionfs_lock()). 336 * 337 * Note that we can't yet use this facility for the 'below' case 338 * in which the upper vnode is the covered vnode, because that would 339 * introduce a different LOR in which the cross-mount lookup would 340 * effectively hold the upper vnode lock before acquiring the lower 341 * vnode lock, while an unrelated lock operation would still acquire 342 * the lower vnode lock before the upper vnode lock, which is the 343 * order unionfs currently requires. 344 */ 345 if (!below) { 346 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); 347 mp->mnt_vnodecovered->v_vflag |= VV_CROSSLOCK; 348 VOP_UNLOCK(mp->mnt_vnodecovered); 349 } 350 351 MNT_ILOCK(mp); 352 if ((lowermp->mnt_flag & MNT_LOCAL) != 0 && 353 (uppermp->mnt_flag & MNT_LOCAL) != 0) 354 mp->mnt_flag |= MNT_LOCAL; 355 mp->mnt_kern_flag |= MNTK_NOMSYNC | MNTK_UNIONFS; 356 MNT_IUNLOCK(mp); 357 358 /* 359 * Get new fsid 360 */ 361 vfs_getnewfsid(mp); 362 363 snprintf(mp->mnt_stat.f_mntfromname, MNAMELEN, "<%s>:%s", 364 below ? "below" : "above", target); 365 366 UNIONFSDEBUG("unionfs_mount: from %s, on %s\n", 367 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); 368 369 return (0); 370 } 371 372 /* 373 * Free reference to unionfs layer 374 */ 375 static int 376 unionfs_unmount(struct mount *mp, int mntflags) 377 { 378 struct unionfs_mount *ump; 379 int error; 380 int num; 381 int freeing; 382 int flags; 383 384 UNIONFSDEBUG("unionfs_unmount: mp = %p\n", mp); 385 386 ump = MOUNTTOUNIONFSMOUNT(mp); 387 flags = 0; 388 389 if (mntflags & MNT_FORCE) 390 flags |= FORCECLOSE; 391 392 /* vflush (no need to call vrele) */ 393 for (freeing = 0; (error = vflush(mp, 1, flags, curthread)) != 0;) { 394 num = mp->mnt_nvnodelistsize; 395 if (num == freeing) 396 break; 397 freeing = num; 398 } 399 400 if (error) 401 return (error); 402 403 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); 404 mp->mnt_vnodecovered->v_vflag &= ~VV_CROSSLOCK; 405 VOP_UNLOCK(mp->mnt_vnodecovered); 406 vfs_unregister_upper(ump->um_lowervp->v_mount, &ump->um_lower_link); 407 vfs_unregister_upper(ump->um_uppervp->v_mount, &ump->um_upper_link); 408 free(ump, M_UNIONFSMNT); 409 mp->mnt_data = NULL; 410 411 return (0); 412 } 413 414 static int 415 unionfs_root(struct mount *mp, int flags, struct vnode **vpp) 416 { 417 struct unionfs_mount *ump; 418 struct vnode *vp; 419 420 ump = MOUNTTOUNIONFSMOUNT(mp); 421 vp = ump->um_rootvp; 422 423 UNIONFSDEBUG("unionfs_root: rootvp=%p locked=%x\n", 424 vp, VOP_ISLOCKED(vp)); 425 426 vref(vp); 427 if (flags & LK_TYPE_MASK) 428 vn_lock(vp, flags); 429 430 *vpp = vp; 431 432 return (0); 433 } 434 435 static int 436 unionfs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg, 437 bool *mp_busy) 438 { 439 struct mount *uppermp; 440 struct unionfs_mount *ump; 441 int error; 442 bool unbusy; 443 444 ump = MOUNTTOUNIONFSMOUNT(mp); 445 uppermp = atomic_load_ptr(&ump->um_uppervp->v_mount); 446 KASSERT(*mp_busy == true, ("upper mount not busy")); 447 /* 448 * See comment in sys_quotactl() for an explanation of why the 449 * lower mount needs to be busied by the caller of VFS_QUOTACTL() 450 * but may be unbusied by the implementation. We must unbusy 451 * the upper mount for the same reason; otherwise a namei lookup 452 * issued by the VFS_QUOTACTL() implementation could traverse the 453 * upper mount and deadlock. 454 */ 455 vfs_unbusy(mp); 456 *mp_busy = false; 457 unbusy = true; 458 error = vfs_busy(uppermp, 0); 459 /* 460 * Writing is always performed to upper vnode. 461 */ 462 if (error == 0) 463 error = VFS_QUOTACTL(uppermp, cmd, uid, arg, &unbusy); 464 if (unbusy) 465 vfs_unbusy(uppermp); 466 467 return (error); 468 } 469 470 static int 471 unionfs_statfs(struct mount *mp, struct statfs *sbp) 472 { 473 struct unionfs_mount *ump; 474 struct statfs *mstat; 475 uint64_t lbsize; 476 int error; 477 478 ump = MOUNTTOUNIONFSMOUNT(mp); 479 480 UNIONFSDEBUG("unionfs_statfs(mp = %p, lvp = %p, uvp = %p)\n", 481 mp, ump->um_lowervp, ump->um_uppervp); 482 483 mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO); 484 485 error = VFS_STATFS(ump->um_lowervp->v_mount, mstat); 486 if (error) { 487 free(mstat, M_STATFS); 488 return (error); 489 } 490 491 /* now copy across the "interesting" information and fake the rest */ 492 sbp->f_blocks = mstat->f_blocks; 493 sbp->f_files = mstat->f_files; 494 495 lbsize = mstat->f_bsize; 496 497 error = VFS_STATFS(ump->um_uppervp->v_mount, mstat); 498 if (error) { 499 free(mstat, M_STATFS); 500 return (error); 501 } 502 503 /* 504 * The FS type etc is copy from upper vfs. 505 * (write able vfs have priority) 506 */ 507 sbp->f_type = mstat->f_type; 508 sbp->f_flags = mstat->f_flags; 509 sbp->f_bsize = mstat->f_bsize; 510 sbp->f_iosize = mstat->f_iosize; 511 512 if (mstat->f_bsize != lbsize) 513 sbp->f_blocks = ((off_t)sbp->f_blocks * lbsize) / 514 mstat->f_bsize; 515 516 sbp->f_blocks += mstat->f_blocks; 517 sbp->f_bfree = mstat->f_bfree; 518 sbp->f_bavail = mstat->f_bavail; 519 sbp->f_files += mstat->f_files; 520 sbp->f_ffree = mstat->f_ffree; 521 522 free(mstat, M_STATFS); 523 return (0); 524 } 525 526 static int 527 unionfs_sync(struct mount *mp, int waitfor) 528 { 529 /* nothing to do */ 530 return (0); 531 } 532 533 static int 534 unionfs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) 535 { 536 return (EOPNOTSUPP); 537 } 538 539 static int 540 unionfs_fhtovp(struct mount *mp, struct fid *fidp, int flags, 541 struct vnode **vpp) 542 { 543 return (EOPNOTSUPP); 544 } 545 546 static int 547 unionfs_checkexp(struct mount *mp, struct sockaddr *nam, uint64_t *extflagsp, 548 struct ucred **credanonp, int *numsecflavors, int *secflavors) 549 { 550 return (EOPNOTSUPP); 551 } 552 553 static int 554 unionfs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 555 int namespace, const char *attrname) 556 { 557 struct unionfs_mount *ump; 558 struct unionfs_node *unp; 559 560 ump = MOUNTTOUNIONFSMOUNT(mp); 561 unp = VTOUNIONFS(filename_vp); 562 563 if (unp->un_uppervp != NULLVP) { 564 return (VFS_EXTATTRCTL(ump->um_uppervp->v_mount, cmd, 565 unp->un_uppervp, namespace, attrname)); 566 } else { 567 return (VFS_EXTATTRCTL(ump->um_lowervp->v_mount, cmd, 568 unp->un_lowervp, namespace, attrname)); 569 } 570 } 571 572 static struct vfsops unionfs_vfsops = { 573 .vfs_checkexp = unionfs_checkexp, 574 .vfs_extattrctl = unionfs_extattrctl, 575 .vfs_fhtovp = unionfs_fhtovp, 576 .vfs_init = unionfs_init, 577 .vfs_mount = unionfs_domount, 578 .vfs_quotactl = unionfs_quotactl, 579 .vfs_root = unionfs_root, 580 .vfs_statfs = unionfs_statfs, 581 .vfs_sync = unionfs_sync, 582 .vfs_uninit = unionfs_uninit, 583 .vfs_unmount = unionfs_unmount, 584 .vfs_vget = unionfs_vget, 585 }; 586 587 VFS_SET(unionfs_vfsops, unionfs, VFCF_LOOPBACK); 588