1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1992, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software donated to Berkeley by 8 * Jan-Simon Pendry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)null_vfsops.c 8.2 (Berkeley) 1/21/94 35 * 36 * @(#)lofs_vfsops.c 1.2 (Berkeley) 6/18/92 37 * $FreeBSD$ 38 */ 39 40 /* 41 * Null Layer 42 * (See null_vnops.c for a description of what this does.) 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/fcntl.h> 48 #include <sys/kernel.h> 49 #include <sys/lock.h> 50 #include <sys/malloc.h> 51 #include <sys/mount.h> 52 #include <sys/namei.h> 53 #include <sys/proc.h> 54 #include <sys/vnode.h> 55 #include <sys/jail.h> 56 57 #include <fs/nullfs/null.h> 58 59 static MALLOC_DEFINE(M_NULLFSMNT, "nullfs_mount", "NULLFS mount structure"); 60 61 static vfs_fhtovp_t nullfs_fhtovp; 62 static vfs_mount_t nullfs_mount; 63 static vfs_quotactl_t nullfs_quotactl; 64 static vfs_root_t nullfs_root; 65 static vfs_sync_t nullfs_sync; 66 static vfs_statfs_t nullfs_statfs; 67 static vfs_unmount_t nullfs_unmount; 68 static vfs_vget_t nullfs_vget; 69 static vfs_extattrctl_t nullfs_extattrctl; 70 71 /* 72 * Mount null layer 73 */ 74 static int 75 nullfs_mount(struct mount *mp) 76 { 77 struct vnode *lowerrootvp; 78 struct vnode *nullm_rootvp; 79 struct null_mount *xmp; 80 struct null_node *nn; 81 struct nameidata nd, *ndp; 82 char *target; 83 int error, len; 84 bool isvnunlocked; 85 86 NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp); 87 88 if (mp->mnt_flag & MNT_ROOTFS) 89 return (EOPNOTSUPP); 90 91 /* 92 * Update is a no-op 93 */ 94 if (mp->mnt_flag & MNT_UPDATE) { 95 /* 96 * Only support update mounts for NFS export. 97 */ 98 if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0)) 99 return (0); 100 else 101 return (EOPNOTSUPP); 102 } 103 104 /* 105 * Get argument 106 */ 107 error = vfs_getopt(mp->mnt_optnew, "from", (void **)&target, &len); 108 if (error != 0) 109 error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); 110 if (error || target[len - 1] != '\0') 111 return (EINVAL); 112 113 /* 114 * Unlock lower node to avoid possible deadlock. 115 */ 116 if (mp->mnt_vnodecovered->v_op == &null_vnodeops && 117 VOP_ISLOCKED(mp->mnt_vnodecovered) == LK_EXCLUSIVE) { 118 VOP_UNLOCK(mp->mnt_vnodecovered); 119 isvnunlocked = true; 120 } else { 121 isvnunlocked = false; 122 } 123 124 /* 125 * Find lower node 126 */ 127 ndp = &nd; 128 NDINIT(ndp, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, target); 129 error = namei(ndp); 130 131 /* 132 * Re-lock vnode. 133 * XXXKIB This is deadlock-prone as well. 134 */ 135 if (isvnunlocked) 136 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY); 137 138 if (error) 139 return (error); 140 NDFREE_PNBUF(ndp); 141 142 /* 143 * Sanity check on lower vnode 144 */ 145 lowerrootvp = ndp->ni_vp; 146 147 /* 148 * Check multi null mount to avoid `lock against myself' panic. 149 */ 150 if (mp->mnt_vnodecovered->v_op == &null_vnodeops) { 151 nn = VTONULL(mp->mnt_vnodecovered); 152 if (nn == NULL || lowerrootvp == nn->null_lowervp) { 153 NULLFSDEBUG("nullfs_mount: multi null mount?\n"); 154 vput(lowerrootvp); 155 return (EDEADLK); 156 } 157 } 158 159 /* 160 * Lower vnode must be the same type as the covered vnode - we 161 * don't allow mounting directories to files or vice versa. 162 */ 163 if ((lowerrootvp->v_type != VDIR && lowerrootvp->v_type != VREG) || 164 lowerrootvp->v_type != mp->mnt_vnodecovered->v_type) { 165 NULLFSDEBUG("nullfs_mount: target must be same type as fspath"); 166 vput(lowerrootvp); 167 return (EINVAL); 168 } 169 170 xmp = (struct null_mount *) malloc(sizeof(struct null_mount), 171 M_NULLFSMNT, M_WAITOK | M_ZERO); 172 173 /* 174 * Save pointer to underlying FS and the reference to the 175 * lower root vnode. 176 */ 177 xmp->nullm_vfs = vfs_register_upper_from_vp(lowerrootvp, mp, 178 &xmp->upper_node); 179 if (xmp->nullm_vfs == NULL) { 180 vput(lowerrootvp); 181 free(xmp, M_NULLFSMNT); 182 return (ENOENT); 183 } 184 vref(lowerrootvp); 185 xmp->nullm_lowerrootvp = lowerrootvp; 186 mp->mnt_data = xmp; 187 188 /* 189 * Make sure the node alias worked. 190 */ 191 error = null_nodeget(mp, lowerrootvp, &nullm_rootvp); 192 if (error != 0) { 193 vfs_unregister_upper(xmp->nullm_vfs, &xmp->upper_node); 194 vrele(lowerrootvp); 195 free(xmp, M_NULLFSMNT); 196 return (error); 197 } 198 199 if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL) { 200 MNT_ILOCK(mp); 201 mp->mnt_flag |= MNT_LOCAL; 202 MNT_IUNLOCK(mp); 203 } 204 205 xmp->nullm_flags |= NULLM_CACHE; 206 if (vfs_getopt(mp->mnt_optnew, "nocache", NULL, NULL) == 0 || 207 (xmp->nullm_vfs->mnt_kern_flag & MNTK_NULL_NOCACHE) != 0) 208 xmp->nullm_flags &= ~NULLM_CACHE; 209 210 if ((xmp->nullm_flags & NULLM_CACHE) != 0) { 211 vfs_register_for_notification(xmp->nullm_vfs, mp, 212 &xmp->notify_node); 213 } 214 215 if (lowerrootvp == mp->mnt_vnodecovered) { 216 vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); 217 lowerrootvp->v_vflag |= VV_CROSSLOCK; 218 VOP_UNLOCK(lowerrootvp); 219 } 220 221 MNT_ILOCK(mp); 222 if ((xmp->nullm_flags & NULLM_CACHE) != 0) { 223 mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag & 224 (MNTK_SHARED_WRITES | MNTK_LOOKUP_SHARED | 225 MNTK_EXTENDED_SHARED); 226 } 227 mp->mnt_kern_flag |= MNTK_NOMSYNC | MNTK_UNLOCKED_INSMNTQUE; 228 mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag & 229 (MNTK_USES_BCACHE | MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS); 230 MNT_IUNLOCK(mp); 231 vfs_getnewfsid(mp); 232 vfs_mountedfrom(mp, target); 233 vput(nullm_rootvp); 234 235 NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n", 236 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); 237 return (0); 238 } 239 240 /* 241 * Free reference to null layer 242 */ 243 static int 244 nullfs_unmount(struct mount *mp, int mntflags) 245 { 246 struct null_mount *mntdata; 247 int error, flags; 248 249 NULLFSDEBUG("nullfs_unmount: mp = %p\n", (void *)mp); 250 251 if (mntflags & MNT_FORCE) 252 flags = FORCECLOSE; 253 else 254 flags = 0; 255 256 for (;;) { 257 /* There is 1 extra root vnode reference (nullm_rootvp). */ 258 error = vflush(mp, 0, flags, curthread); 259 if (error) 260 return (error); 261 MNT_ILOCK(mp); 262 if (mp->mnt_nvnodelistsize == 0) { 263 MNT_IUNLOCK(mp); 264 break; 265 } 266 MNT_IUNLOCK(mp); 267 if ((mntflags & MNT_FORCE) == 0) 268 return (EBUSY); 269 } 270 271 /* 272 * Finally, throw away the null_mount structure 273 */ 274 mntdata = mp->mnt_data; 275 if ((mntdata->nullm_flags & NULLM_CACHE) != 0) { 276 vfs_unregister_for_notification(mntdata->nullm_vfs, 277 &mntdata->notify_node); 278 } 279 if (mntdata->nullm_lowerrootvp == mp->mnt_vnodecovered) { 280 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); 281 mp->mnt_vnodecovered->v_vflag &= ~VV_CROSSLOCK; 282 VOP_UNLOCK(mp->mnt_vnodecovered); 283 } 284 vfs_unregister_upper(mntdata->nullm_vfs, &mntdata->upper_node); 285 vrele(mntdata->nullm_lowerrootvp); 286 mp->mnt_data = NULL; 287 free(mntdata, M_NULLFSMNT); 288 return (0); 289 } 290 291 static int 292 nullfs_root(struct mount *mp, int flags, struct vnode **vpp) 293 { 294 struct vnode *vp; 295 struct null_mount *mntdata; 296 int error; 297 298 mntdata = MOUNTTONULLMOUNT(mp); 299 NULLFSDEBUG("nullfs_root(mp = %p, vp = %p)\n", mp, 300 mntdata->nullm_lowerrootvp); 301 302 error = vget(mntdata->nullm_lowerrootvp, flags); 303 if (error == 0) { 304 error = null_nodeget(mp, mntdata->nullm_lowerrootvp, &vp); 305 if (error == 0) { 306 *vpp = vp; 307 } 308 } 309 return (error); 310 } 311 312 static int 313 nullfs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg, bool *mp_busy) 314 { 315 struct mount *lowermp; 316 struct null_mount *mntdata; 317 int error; 318 bool unbusy; 319 320 mntdata = MOUNTTONULLMOUNT(mp); 321 lowermp = atomic_load_ptr(&mntdata->nullm_vfs); 322 KASSERT(*mp_busy == true, ("upper mount not busy")); 323 /* 324 * See comment in sys_quotactl() for an explanation of why the 325 * lower mount needs to be busied by the caller of VFS_QUOTACTL() 326 * but may be unbusied by the implementation. We must unbusy 327 * the upper mount for the same reason; otherwise a namei lookup 328 * issued by the VFS_QUOTACTL() implementation could traverse the 329 * upper mount and deadlock. 330 */ 331 vfs_unbusy(mp); 332 *mp_busy = false; 333 unbusy = true; 334 error = vfs_busy(lowermp, 0); 335 if (error == 0) 336 error = VFS_QUOTACTL(lowermp, cmd, uid, arg, &unbusy); 337 if (unbusy) 338 vfs_unbusy(lowermp); 339 340 return (error); 341 } 342 343 static int 344 nullfs_statfs(struct mount *mp, struct statfs *sbp) 345 { 346 int error; 347 struct statfs *mstat; 348 349 NULLFSDEBUG("nullfs_statfs(mp = %p, vp = %p->%p)\n", (void *)mp, 350 (void *)MOUNTTONULLMOUNT(mp)->nullm_rootvp, 351 (void *)NULLVPTOLOWERVP(MOUNTTONULLMOUNT(mp)->nullm_rootvp)); 352 353 mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO); 354 355 error = VFS_STATFS(MOUNTTONULLMOUNT(mp)->nullm_vfs, mstat); 356 if (error) { 357 free(mstat, M_STATFS); 358 return (error); 359 } 360 361 /* now copy across the "interesting" information and fake the rest */ 362 sbp->f_type = mstat->f_type; 363 sbp->f_flags = (sbp->f_flags & (MNT_RDONLY | MNT_NOEXEC | MNT_NOSUID | 364 MNT_UNION | MNT_NOSYMFOLLOW | MNT_AUTOMOUNTED)) | 365 (mstat->f_flags & ~(MNT_ROOTFS | MNT_AUTOMOUNTED)); 366 sbp->f_bsize = mstat->f_bsize; 367 sbp->f_iosize = mstat->f_iosize; 368 sbp->f_blocks = mstat->f_blocks; 369 sbp->f_bfree = mstat->f_bfree; 370 sbp->f_bavail = mstat->f_bavail; 371 sbp->f_files = mstat->f_files; 372 sbp->f_ffree = mstat->f_ffree; 373 374 free(mstat, M_STATFS); 375 return (0); 376 } 377 378 static int 379 nullfs_sync(struct mount *mp, int waitfor) 380 { 381 /* 382 * XXX - Assumes no data cached at null layer. 383 */ 384 return (0); 385 } 386 387 static int 388 nullfs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) 389 { 390 int error; 391 392 KASSERT((flags & LK_TYPE_MASK) != 0, 393 ("nullfs_vget: no lock requested")); 394 395 error = VFS_VGET(MOUNTTONULLMOUNT(mp)->nullm_vfs, ino, flags, vpp); 396 if (error != 0) 397 return (error); 398 return (null_nodeget(mp, *vpp, vpp)); 399 } 400 401 static int 402 nullfs_fhtovp(struct mount *mp, struct fid *fidp, int flags, struct vnode **vpp) 403 { 404 int error; 405 406 error = VFS_FHTOVP(MOUNTTONULLMOUNT(mp)->nullm_vfs, fidp, flags, 407 vpp); 408 if (error != 0) 409 return (error); 410 return (null_nodeget(mp, *vpp, vpp)); 411 } 412 413 static int 414 nullfs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 415 int namespace, const char *attrname) 416 { 417 418 return (VFS_EXTATTRCTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd, 419 filename_vp, namespace, attrname)); 420 } 421 422 static void 423 nullfs_reclaim_lowervp(struct mount *mp, struct vnode *lowervp) 424 { 425 struct vnode *vp; 426 427 vp = null_hashget(mp, lowervp); 428 if (vp == NULL) 429 return; 430 VTONULL(vp)->null_flags |= NULLV_NOUNLOCK; 431 vgone(vp); 432 vput(vp); 433 } 434 435 static void 436 nullfs_unlink_lowervp(struct mount *mp, struct vnode *lowervp) 437 { 438 struct vnode *vp; 439 struct null_node *xp; 440 441 vp = null_hashget(mp, lowervp); 442 if (vp == NULL) 443 return; 444 xp = VTONULL(vp); 445 xp->null_flags |= NULLV_DROP | NULLV_NOUNLOCK; 446 vhold(vp); 447 vunref(vp); 448 449 if (vp->v_usecount == 0) { 450 /* 451 * If vunref() dropped the last use reference on the 452 * nullfs vnode, it must be reclaimed, and its lock 453 * was split from the lower vnode lock. Need to do 454 * extra unlock before allowing the final vdrop() to 455 * free the vnode. 456 */ 457 KASSERT(VN_IS_DOOMED(vp), 458 ("not reclaimed nullfs vnode %p", vp)); 459 VOP_UNLOCK(vp); 460 } else { 461 /* 462 * Otherwise, the nullfs vnode still shares the lock 463 * with the lower vnode, and must not be unlocked. 464 * Also clear the NULLV_NOUNLOCK, the flag is not 465 * relevant for future reclamations. 466 */ 467 ASSERT_VOP_ELOCKED(vp, "unlink_lowervp"); 468 KASSERT(!VN_IS_DOOMED(vp), 469 ("reclaimed nullfs vnode %p", vp)); 470 xp->null_flags &= ~NULLV_NOUNLOCK; 471 } 472 vdrop(vp); 473 } 474 475 static struct vfsops null_vfsops = { 476 .vfs_extattrctl = nullfs_extattrctl, 477 .vfs_fhtovp = nullfs_fhtovp, 478 .vfs_init = nullfs_init, 479 .vfs_mount = nullfs_mount, 480 .vfs_quotactl = nullfs_quotactl, 481 .vfs_root = nullfs_root, 482 .vfs_statfs = nullfs_statfs, 483 .vfs_sync = nullfs_sync, 484 .vfs_uninit = nullfs_uninit, 485 .vfs_unmount = nullfs_unmount, 486 .vfs_vget = nullfs_vget, 487 .vfs_reclaim_lowervp = nullfs_reclaim_lowervp, 488 .vfs_unlink_lowervp = nullfs_unlink_lowervp, 489 }; 490 491 VFS_SET(null_vfsops, nullfs, VFCF_LOOPBACK | VFCF_JAIL | VFCF_FILEMOUNT); 492