1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1992, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software donated to Berkeley by 8 * Jan-Simon Pendry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)null_vfsops.c 8.2 (Berkeley) 1/21/94 35 * 36 * @(#)lofs_vfsops.c 1.2 (Berkeley) 6/18/92 37 */ 38 39 /* 40 * Null Layer 41 * (See null_vnops.c for a description of what this does.) 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/fcntl.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mount.h> 51 #include <sys/namei.h> 52 #include <sys/proc.h> 53 #include <sys/vnode.h> 54 #include <sys/jail.h> 55 56 #include <fs/nullfs/null.h> 57 58 static MALLOC_DEFINE(M_NULLFSMNT, "nullfs_mount", "NULLFS mount structure"); 59 60 static vfs_fhtovp_t nullfs_fhtovp; 61 static vfs_mount_t nullfs_mount; 62 static vfs_quotactl_t nullfs_quotactl; 63 static vfs_root_t nullfs_root; 64 static vfs_sync_t nullfs_sync; 65 static vfs_statfs_t nullfs_statfs; 66 static vfs_unmount_t nullfs_unmount; 67 static vfs_vget_t nullfs_vget; 68 static vfs_extattrctl_t nullfs_extattrctl; 69 70 /* 71 * Mount null layer 72 */ 73 static int 74 nullfs_mount(struct mount *mp) 75 { 76 struct vnode *lowerrootvp; 77 struct vnode *nullm_rootvp; 78 struct null_mount *xmp; 79 struct null_node *nn; 80 struct nameidata nd, *ndp; 81 char *target; 82 int error, len; 83 bool isvnunlocked; 84 85 NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp); 86 87 if (mp->mnt_flag & MNT_ROOTFS) 88 return (EOPNOTSUPP); 89 90 /* 91 * Update is a no-op 92 */ 93 if (mp->mnt_flag & MNT_UPDATE) { 94 /* 95 * Only support update mounts for NFS export. 96 */ 97 if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0)) 98 return (0); 99 else 100 return (EOPNOTSUPP); 101 } 102 103 /* 104 * Get argument 105 */ 106 error = vfs_getopt(mp->mnt_optnew, "from", (void **)&target, &len); 107 if (error != 0) 108 error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); 109 if (error || target[len - 1] != '\0') 110 return (EINVAL); 111 112 /* 113 * Unlock lower node to avoid possible deadlock. 114 */ 115 if (mp->mnt_vnodecovered->v_op == &null_vnodeops && 116 VOP_ISLOCKED(mp->mnt_vnodecovered) == LK_EXCLUSIVE) { 117 VOP_UNLOCK(mp->mnt_vnodecovered); 118 isvnunlocked = true; 119 } else { 120 isvnunlocked = false; 121 } 122 123 /* 124 * Find lower node 125 */ 126 ndp = &nd; 127 NDINIT(ndp, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, target); 128 error = namei(ndp); 129 130 /* 131 * Re-lock vnode. 132 * XXXKIB This is deadlock-prone as well. 133 */ 134 if (isvnunlocked) 135 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY); 136 137 if (error) 138 return (error); 139 NDFREE_PNBUF(ndp); 140 141 /* 142 * Sanity check on lower vnode 143 */ 144 lowerrootvp = ndp->ni_vp; 145 146 /* 147 * Check multi null mount to avoid `lock against myself' panic. 148 */ 149 if (mp->mnt_vnodecovered->v_op == &null_vnodeops) { 150 nn = VTONULL(mp->mnt_vnodecovered); 151 if (nn == NULL || lowerrootvp == nn->null_lowervp) { 152 NULLFSDEBUG("nullfs_mount: multi null mount?\n"); 153 vput(lowerrootvp); 154 return (EDEADLK); 155 } 156 } 157 158 /* 159 * Lower vnode must be the same type as the covered vnode - we 160 * don't allow mounting directories to files or vice versa. 161 */ 162 if ((lowerrootvp->v_type != VDIR && lowerrootvp->v_type != VREG) || 163 lowerrootvp->v_type != mp->mnt_vnodecovered->v_type) { 164 NULLFSDEBUG("nullfs_mount: target must be same type as fspath"); 165 vput(lowerrootvp); 166 return (EINVAL); 167 } 168 169 xmp = (struct null_mount *) malloc(sizeof(struct null_mount), 170 M_NULLFSMNT, M_WAITOK | M_ZERO); 171 172 /* 173 * Save pointer to underlying FS and the reference to the 174 * lower root vnode. 175 */ 176 xmp->nullm_vfs = vfs_register_upper_from_vp(lowerrootvp, mp, 177 &xmp->upper_node); 178 if (xmp->nullm_vfs == NULL) { 179 vput(lowerrootvp); 180 free(xmp, M_NULLFSMNT); 181 return (ENOENT); 182 } 183 vref(lowerrootvp); 184 xmp->nullm_lowerrootvp = lowerrootvp; 185 mp->mnt_data = xmp; 186 187 /* 188 * Make sure the node alias worked. 189 */ 190 error = null_nodeget(mp, lowerrootvp, &nullm_rootvp); 191 if (error != 0) { 192 vfs_unregister_upper(xmp->nullm_vfs, &xmp->upper_node); 193 vrele(lowerrootvp); 194 free(xmp, M_NULLFSMNT); 195 return (error); 196 } 197 198 if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL) { 199 MNT_ILOCK(mp); 200 mp->mnt_flag |= MNT_LOCAL; 201 MNT_IUNLOCK(mp); 202 } 203 204 xmp->nullm_flags |= NULLM_CACHE; 205 if (vfs_getopt(mp->mnt_optnew, "nocache", NULL, NULL) == 0 || 206 (xmp->nullm_vfs->mnt_kern_flag & MNTK_NULL_NOCACHE) != 0) 207 xmp->nullm_flags &= ~NULLM_CACHE; 208 209 if ((xmp->nullm_flags & NULLM_CACHE) != 0) { 210 vfs_register_for_notification(xmp->nullm_vfs, mp, 211 &xmp->notify_node); 212 } 213 214 if (lowerrootvp == mp->mnt_vnodecovered) { 215 vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); 216 lowerrootvp->v_vflag |= VV_CROSSLOCK; 217 VOP_UNLOCK(lowerrootvp); 218 } 219 220 MNT_ILOCK(mp); 221 if ((xmp->nullm_flags & NULLM_CACHE) != 0) { 222 mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag & 223 (MNTK_SHARED_WRITES | MNTK_LOOKUP_SHARED | 224 MNTK_EXTENDED_SHARED); 225 } 226 mp->mnt_kern_flag |= MNTK_NOMSYNC | MNTK_UNLOCKED_INSMNTQUE; 227 mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag & 228 (MNTK_USES_BCACHE | MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS); 229 MNT_IUNLOCK(mp); 230 vfs_getnewfsid(mp); 231 vfs_mountedfrom(mp, target); 232 vput(nullm_rootvp); 233 234 NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n", 235 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); 236 return (0); 237 } 238 239 /* 240 * Free reference to null layer 241 */ 242 static int 243 nullfs_unmount(struct mount *mp, int mntflags) 244 { 245 struct null_mount *mntdata; 246 int error, flags; 247 248 NULLFSDEBUG("nullfs_unmount: mp = %p\n", (void *)mp); 249 250 if (mntflags & MNT_FORCE) 251 flags = FORCECLOSE; 252 else 253 flags = 0; 254 255 for (;;) { 256 /* There is 1 extra root vnode reference (nullm_rootvp). */ 257 error = vflush(mp, 0, flags, curthread); 258 if (error) 259 return (error); 260 MNT_ILOCK(mp); 261 if (mp->mnt_nvnodelistsize == 0) { 262 MNT_IUNLOCK(mp); 263 break; 264 } 265 MNT_IUNLOCK(mp); 266 if ((mntflags & MNT_FORCE) == 0) 267 return (EBUSY); 268 } 269 270 /* 271 * Finally, throw away the null_mount structure 272 */ 273 mntdata = mp->mnt_data; 274 if ((mntdata->nullm_flags & NULLM_CACHE) != 0) { 275 vfs_unregister_for_notification(mntdata->nullm_vfs, 276 &mntdata->notify_node); 277 } 278 if (mntdata->nullm_lowerrootvp == mp->mnt_vnodecovered) { 279 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); 280 mp->mnt_vnodecovered->v_vflag &= ~VV_CROSSLOCK; 281 VOP_UNLOCK(mp->mnt_vnodecovered); 282 } 283 vfs_unregister_upper(mntdata->nullm_vfs, &mntdata->upper_node); 284 vrele(mntdata->nullm_lowerrootvp); 285 mp->mnt_data = NULL; 286 free(mntdata, M_NULLFSMNT); 287 return (0); 288 } 289 290 static int 291 nullfs_root(struct mount *mp, int flags, struct vnode **vpp) 292 { 293 struct vnode *vp; 294 struct null_mount *mntdata; 295 int error; 296 297 mntdata = MOUNTTONULLMOUNT(mp); 298 NULLFSDEBUG("nullfs_root(mp = %p, vp = %p)\n", mp, 299 mntdata->nullm_lowerrootvp); 300 301 error = vget(mntdata->nullm_lowerrootvp, flags); 302 if (error == 0) { 303 error = null_nodeget(mp, mntdata->nullm_lowerrootvp, &vp); 304 if (error == 0) { 305 *vpp = vp; 306 } 307 } 308 return (error); 309 } 310 311 static int 312 nullfs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg, bool *mp_busy) 313 { 314 struct mount *lowermp; 315 struct null_mount *mntdata; 316 int error; 317 bool unbusy; 318 319 mntdata = MOUNTTONULLMOUNT(mp); 320 lowermp = atomic_load_ptr(&mntdata->nullm_vfs); 321 KASSERT(*mp_busy == true, ("upper mount not busy")); 322 /* 323 * See comment in sys_quotactl() for an explanation of why the 324 * lower mount needs to be busied by the caller of VFS_QUOTACTL() 325 * but may be unbusied by the implementation. We must unbusy 326 * the upper mount for the same reason; otherwise a namei lookup 327 * issued by the VFS_QUOTACTL() implementation could traverse the 328 * upper mount and deadlock. 329 */ 330 vfs_unbusy(mp); 331 *mp_busy = false; 332 unbusy = true; 333 error = vfs_busy(lowermp, 0); 334 if (error == 0) 335 error = VFS_QUOTACTL(lowermp, cmd, uid, arg, &unbusy); 336 if (unbusy) 337 vfs_unbusy(lowermp); 338 339 return (error); 340 } 341 342 static int 343 nullfs_statfs(struct mount *mp, struct statfs *sbp) 344 { 345 int error; 346 struct statfs *mstat; 347 348 NULLFSDEBUG("nullfs_statfs(mp = %p, vp = %p->%p)\n", (void *)mp, 349 (void *)MOUNTTONULLMOUNT(mp)->nullm_rootvp, 350 (void *)NULLVPTOLOWERVP(MOUNTTONULLMOUNT(mp)->nullm_rootvp)); 351 352 mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO); 353 354 error = VFS_STATFS(MOUNTTONULLMOUNT(mp)->nullm_vfs, mstat); 355 if (error) { 356 free(mstat, M_STATFS); 357 return (error); 358 } 359 360 /* now copy across the "interesting" information and fake the rest */ 361 sbp->f_type = mstat->f_type; 362 sbp->f_flags = (sbp->f_flags & (MNT_RDONLY | MNT_NOEXEC | MNT_NOSUID | 363 MNT_UNION | MNT_NOSYMFOLLOW | MNT_AUTOMOUNTED | MNT_IGNORE)) | 364 (mstat->f_flags & ~(MNT_ROOTFS | MNT_AUTOMOUNTED)); 365 sbp->f_bsize = mstat->f_bsize; 366 sbp->f_iosize = mstat->f_iosize; 367 sbp->f_blocks = mstat->f_blocks; 368 sbp->f_bfree = mstat->f_bfree; 369 sbp->f_bavail = mstat->f_bavail; 370 sbp->f_files = mstat->f_files; 371 sbp->f_ffree = mstat->f_ffree; 372 373 free(mstat, M_STATFS); 374 return (0); 375 } 376 377 static int 378 nullfs_sync(struct mount *mp, int waitfor) 379 { 380 /* 381 * XXX - Assumes no data cached at null layer. 382 */ 383 return (0); 384 } 385 386 static int 387 nullfs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) 388 { 389 int error; 390 391 KASSERT((flags & LK_TYPE_MASK) != 0, 392 ("nullfs_vget: no lock requested")); 393 394 error = VFS_VGET(MOUNTTONULLMOUNT(mp)->nullm_vfs, ino, flags, vpp); 395 if (error != 0) 396 return (error); 397 return (null_nodeget(mp, *vpp, vpp)); 398 } 399 400 static int 401 nullfs_fhtovp(struct mount *mp, struct fid *fidp, int flags, struct vnode **vpp) 402 { 403 int error; 404 405 error = VFS_FHTOVP(MOUNTTONULLMOUNT(mp)->nullm_vfs, fidp, flags, 406 vpp); 407 if (error != 0) 408 return (error); 409 return (null_nodeget(mp, *vpp, vpp)); 410 } 411 412 static int 413 nullfs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 414 int namespace, const char *attrname) 415 { 416 417 return (VFS_EXTATTRCTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd, 418 filename_vp, namespace, attrname)); 419 } 420 421 static void 422 nullfs_reclaim_lowervp(struct mount *mp, struct vnode *lowervp) 423 { 424 struct vnode *vp; 425 426 vp = null_hashget(mp, lowervp); 427 if (vp == NULL) 428 return; 429 VTONULL(vp)->null_flags |= NULLV_NOUNLOCK; 430 vgone(vp); 431 vput(vp); 432 } 433 434 static void 435 nullfs_unlink_lowervp(struct mount *mp, struct vnode *lowervp) 436 { 437 struct vnode *vp; 438 struct null_node *xp; 439 440 vp = null_hashget(mp, lowervp); 441 if (vp == NULL) 442 return; 443 xp = VTONULL(vp); 444 xp->null_flags |= NULLV_DROP | NULLV_NOUNLOCK; 445 vhold(vp); 446 vunref(vp); 447 448 if (vp->v_usecount == 0) { 449 /* 450 * If vunref() dropped the last use reference on the 451 * nullfs vnode, it must be reclaimed, and its lock 452 * was split from the lower vnode lock. Need to do 453 * extra unlock before allowing the final vdrop() to 454 * free the vnode. 455 */ 456 KASSERT(VN_IS_DOOMED(vp), 457 ("not reclaimed nullfs vnode %p", vp)); 458 VOP_UNLOCK(vp); 459 } else { 460 /* 461 * Otherwise, the nullfs vnode still shares the lock 462 * with the lower vnode, and must not be unlocked. 463 * Also clear the NULLV_NOUNLOCK, the flag is not 464 * relevant for future reclamations. 465 */ 466 ASSERT_VOP_ELOCKED(vp, "unlink_lowervp"); 467 KASSERT(!VN_IS_DOOMED(vp), 468 ("reclaimed nullfs vnode %p", vp)); 469 xp->null_flags &= ~NULLV_NOUNLOCK; 470 } 471 vdrop(vp); 472 } 473 474 static struct vfsops null_vfsops = { 475 .vfs_extattrctl = nullfs_extattrctl, 476 .vfs_fhtovp = nullfs_fhtovp, 477 .vfs_init = nullfs_init, 478 .vfs_mount = nullfs_mount, 479 .vfs_quotactl = nullfs_quotactl, 480 .vfs_root = nullfs_root, 481 .vfs_statfs = nullfs_statfs, 482 .vfs_sync = nullfs_sync, 483 .vfs_uninit = nullfs_uninit, 484 .vfs_unmount = nullfs_unmount, 485 .vfs_vget = nullfs_vget, 486 .vfs_reclaim_lowervp = nullfs_reclaim_lowervp, 487 .vfs_unlink_lowervp = nullfs_unlink_lowervp, 488 }; 489 490 VFS_SET(null_vfsops, nullfs, VFCF_LOOPBACK | VFCF_JAIL | VFCF_FILEMOUNT); 491