1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1991, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_quota.h" 38 #include "opt_ufs.h" 39 #include "opt_ffs.h" 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/gsb_crc32.h> 44 #include <sys/systm.h> 45 #include <sys/namei.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/taskqueue.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/vnode.h> 52 #include <sys/mount.h> 53 #include <sys/bio.h> 54 #include <sys/buf.h> 55 #include <sys/conf.h> 56 #include <sys/fcntl.h> 57 #include <sys/ioccom.h> 58 #include <sys/malloc.h> 59 #include <sys/mutex.h> 60 #include <sys/rwlock.h> 61 #include <sys/vmmeter.h> 62 63 #include <security/mac/mac_framework.h> 64 65 #include <ufs/ufs/dir.h> 66 #include <ufs/ufs/extattr.h> 67 #include <ufs/ufs/gjournal.h> 68 #include <ufs/ufs/quota.h> 69 #include <ufs/ufs/ufsmount.h> 70 #include <ufs/ufs/inode.h> 71 #include <ufs/ufs/ufs_extern.h> 72 73 #include <ufs/ffs/fs.h> 74 #include <ufs/ffs/ffs_extern.h> 75 76 #include <vm/vm.h> 77 #include <vm/uma.h> 78 #include <vm/vm_page.h> 79 80 #include <geom/geom.h> 81 #include <geom/geom_vfs.h> 82 83 #include <ddb/ddb.h> 84 85 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 86 87 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 88 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 89 ufs2_daddr_t); 90 static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 91 static int ffs_sync_lazy(struct mount *mp); 92 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); 93 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); 94 95 static vfs_init_t ffs_init; 96 static vfs_uninit_t ffs_uninit; 97 static vfs_extattrctl_t ffs_extattrctl; 98 static vfs_cmount_t ffs_cmount; 99 static vfs_unmount_t ffs_unmount; 100 static vfs_mount_t ffs_mount; 101 static vfs_statfs_t ffs_statfs; 102 static vfs_fhtovp_t ffs_fhtovp; 103 static vfs_sync_t ffs_sync; 104 105 static struct vfsops ufs_vfsops = { 106 .vfs_extattrctl = ffs_extattrctl, 107 .vfs_fhtovp = ffs_fhtovp, 108 .vfs_init = ffs_init, 109 .vfs_mount = ffs_mount, 110 .vfs_cmount = ffs_cmount, 111 .vfs_quotactl = ufs_quotactl, 112 .vfs_root = vfs_cache_root, 113 .vfs_cachedroot = ufs_root, 114 .vfs_statfs = ffs_statfs, 115 .vfs_sync = ffs_sync, 116 .vfs_uninit = ffs_uninit, 117 .vfs_unmount = ffs_unmount, 118 .vfs_vget = ffs_vget, 119 .vfs_susp_clean = process_deferred_inactive, 120 }; 121 122 VFS_SET(ufs_vfsops, ufs, 0); 123 MODULE_VERSION(ufs, 1); 124 125 static b_strategy_t ffs_geom_strategy; 126 static b_write_t ffs_bufwrite; 127 128 static struct buf_ops ffs_ops = { 129 .bop_name = "FFS", 130 .bop_write = ffs_bufwrite, 131 .bop_strategy = ffs_geom_strategy, 132 .bop_sync = bufsync, 133 #ifdef NO_FFS_SNAPSHOT 134 .bop_bdflush = bufbdflush, 135 #else 136 .bop_bdflush = ffs_bdflush, 137 #endif 138 }; 139 140 /* 141 * Note that userquota and groupquota options are not currently used 142 * by UFS/FFS code and generally mount(8) does not pass those options 143 * from userland, but they can be passed by loader(8) via 144 * vfs.root.mountfrom.options. 145 */ 146 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 147 "noclusterw", "noexec", "export", "force", "from", "groupquota", 148 "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir", 149 "nosymfollow", "sync", "union", "userquota", "untrusted", NULL }; 150 151 static int 152 ffs_mount(struct mount *mp) 153 { 154 struct vnode *devvp; 155 struct thread *td; 156 struct ufsmount *ump = NULL; 157 struct fs *fs; 158 pid_t fsckpid = 0; 159 int error, error1, flags; 160 uint64_t mntorflags, saved_mnt_flag; 161 accmode_t accmode; 162 struct nameidata ndp; 163 char *fspec; 164 165 td = curthread; 166 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 167 return (EINVAL); 168 if (uma_inode == NULL) { 169 uma_inode = uma_zcreate("FFS inode", 170 sizeof(struct inode), NULL, NULL, NULL, NULL, 171 UMA_ALIGN_PTR, 0); 172 uma_ufs1 = uma_zcreate("FFS1 dinode", 173 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 174 UMA_ALIGN_PTR, 0); 175 uma_ufs2 = uma_zcreate("FFS2 dinode", 176 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 177 UMA_ALIGN_PTR, 0); 178 } 179 180 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 181 vfs_deleteopt(mp->mnt_optnew, "userquota"); 182 183 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 184 if (error) 185 return (error); 186 187 mntorflags = 0; 188 if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0) 189 mntorflags |= MNT_UNTRUSTED; 190 191 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 192 mntorflags |= MNT_ACLS; 193 194 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 195 mntorflags |= MNT_SNAPSHOT; 196 /* 197 * Once we have set the MNT_SNAPSHOT flag, do not 198 * persist "snapshot" in the options list. 199 */ 200 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 201 vfs_deleteopt(mp->mnt_opt, "snapshot"); 202 } 203 204 if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 && 205 vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) { 206 /* 207 * Once we have set the restricted PID, do not 208 * persist "fsckpid" in the options list. 209 */ 210 vfs_deleteopt(mp->mnt_optnew, "fsckpid"); 211 vfs_deleteopt(mp->mnt_opt, "fsckpid"); 212 if (mp->mnt_flag & MNT_UPDATE) { 213 if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 && 214 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 215 vfs_mount_error(mp, 216 "Checker enable: Must be read-only"); 217 return (EINVAL); 218 } 219 } else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 220 vfs_mount_error(mp, 221 "Checker enable: Must be read-only"); 222 return (EINVAL); 223 } 224 /* Set to -1 if we are done */ 225 if (fsckpid == 0) 226 fsckpid = -1; 227 } 228 229 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 230 if (mntorflags & MNT_ACLS) { 231 vfs_mount_error(mp, 232 "\"acls\" and \"nfsv4acls\" options " 233 "are mutually exclusive"); 234 return (EINVAL); 235 } 236 mntorflags |= MNT_NFS4ACLS; 237 } 238 239 MNT_ILOCK(mp); 240 mp->mnt_flag |= mntorflags; 241 MNT_IUNLOCK(mp); 242 /* 243 * If updating, check whether changing from read-only to 244 * read/write; if there is no device name, that's all we do. 245 */ 246 if (mp->mnt_flag & MNT_UPDATE) { 247 ump = VFSTOUFS(mp); 248 fs = ump->um_fs; 249 devvp = ump->um_devvp; 250 if (fsckpid == -1 && ump->um_fsckpid > 0) { 251 if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 || 252 (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) 253 return (error); 254 g_topology_lock(); 255 /* 256 * Return to normal read-only mode. 257 */ 258 error = g_access(ump->um_cp, 0, -1, 0); 259 g_topology_unlock(); 260 ump->um_fsckpid = 0; 261 } 262 if (fs->fs_ronly == 0 && 263 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 264 /* 265 * Flush any dirty data and suspend filesystem. 266 */ 267 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 268 return (error); 269 error = vfs_write_suspend_umnt(mp); 270 if (error != 0) 271 return (error); 272 /* 273 * Check for and optionally get rid of files open 274 * for writing. 275 */ 276 flags = WRITECLOSE; 277 if (mp->mnt_flag & MNT_FORCE) 278 flags |= FORCECLOSE; 279 if (MOUNTEDSOFTDEP(mp)) { 280 error = softdep_flushfiles(mp, flags, td); 281 } else { 282 error = ffs_flushfiles(mp, flags, td); 283 } 284 if (error) { 285 vfs_write_resume(mp, 0); 286 return (error); 287 } 288 if (fs->fs_pendingblocks != 0 || 289 fs->fs_pendinginodes != 0) { 290 printf("WARNING: %s Update error: blocks %jd " 291 "files %d\n", fs->fs_fsmnt, 292 (intmax_t)fs->fs_pendingblocks, 293 fs->fs_pendinginodes); 294 fs->fs_pendingblocks = 0; 295 fs->fs_pendinginodes = 0; 296 } 297 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 298 fs->fs_clean = 1; 299 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 300 fs->fs_ronly = 0; 301 fs->fs_clean = 0; 302 vfs_write_resume(mp, 0); 303 return (error); 304 } 305 if (MOUNTEDSOFTDEP(mp)) 306 softdep_unmount(mp); 307 g_topology_lock(); 308 /* 309 * Drop our write and exclusive access. 310 */ 311 g_access(ump->um_cp, 0, -1, -1); 312 g_topology_unlock(); 313 fs->fs_ronly = 1; 314 MNT_ILOCK(mp); 315 mp->mnt_flag |= MNT_RDONLY; 316 MNT_IUNLOCK(mp); 317 /* 318 * Allow the writers to note that filesystem 319 * is ro now. 320 */ 321 vfs_write_resume(mp, 0); 322 } 323 if ((mp->mnt_flag & MNT_RELOAD) && 324 (error = ffs_reload(mp, td, 0)) != 0) 325 return (error); 326 if (fs->fs_ronly && 327 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 328 /* 329 * If we are running a checker, do not allow upgrade. 330 */ 331 if (ump->um_fsckpid > 0) { 332 vfs_mount_error(mp, 333 "Active checker, cannot upgrade to write"); 334 return (EINVAL); 335 } 336 /* 337 * If upgrade to read-write by non-root, then verify 338 * that user has necessary permissions on the device. 339 */ 340 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 341 error = VOP_ACCESS(devvp, VREAD | VWRITE, 342 td->td_ucred, td); 343 if (error) 344 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 345 if (error) { 346 VOP_UNLOCK(devvp); 347 return (error); 348 } 349 VOP_UNLOCK(devvp); 350 fs->fs_flags &= ~FS_UNCLEAN; 351 if (fs->fs_clean == 0) { 352 fs->fs_flags |= FS_UNCLEAN; 353 if ((mp->mnt_flag & MNT_FORCE) || 354 ((fs->fs_flags & 355 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 356 (fs->fs_flags & FS_DOSOFTDEP))) { 357 printf("WARNING: %s was not properly " 358 "dismounted\n", fs->fs_fsmnt); 359 } else { 360 vfs_mount_error(mp, 361 "R/W mount of %s denied. %s.%s", 362 fs->fs_fsmnt, 363 "Filesystem is not clean - run fsck", 364 (fs->fs_flags & FS_SUJ) == 0 ? "" : 365 " Forced mount will invalidate" 366 " journal contents"); 367 return (EPERM); 368 } 369 } 370 g_topology_lock(); 371 /* 372 * Request exclusive write access. 373 */ 374 error = g_access(ump->um_cp, 0, 1, 1); 375 g_topology_unlock(); 376 if (error) 377 return (error); 378 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 379 return (error); 380 error = vfs_write_suspend_umnt(mp); 381 if (error != 0) 382 return (error); 383 fs->fs_ronly = 0; 384 MNT_ILOCK(mp); 385 saved_mnt_flag = MNT_RDONLY; 386 if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag & 387 MNT_ASYNC) != 0) 388 saved_mnt_flag |= MNT_ASYNC; 389 mp->mnt_flag &= ~saved_mnt_flag; 390 MNT_IUNLOCK(mp); 391 fs->fs_mtime = time_second; 392 /* check to see if we need to start softdep */ 393 if ((fs->fs_flags & FS_DOSOFTDEP) && 394 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 395 fs->fs_ronly = 1; 396 MNT_ILOCK(mp); 397 mp->mnt_flag |= saved_mnt_flag; 398 MNT_IUNLOCK(mp); 399 vfs_write_resume(mp, 0); 400 return (error); 401 } 402 fs->fs_clean = 0; 403 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 404 fs->fs_ronly = 1; 405 MNT_ILOCK(mp); 406 mp->mnt_flag |= saved_mnt_flag; 407 MNT_IUNLOCK(mp); 408 vfs_write_resume(mp, 0); 409 return (error); 410 } 411 if (fs->fs_snapinum[0] != 0) 412 ffs_snapshot_mount(mp); 413 vfs_write_resume(mp, 0); 414 } 415 /* 416 * Soft updates is incompatible with "async", 417 * so if we are doing softupdates stop the user 418 * from setting the async flag in an update. 419 * Softdep_mount() clears it in an initial mount 420 * or ro->rw remount. 421 */ 422 if (MOUNTEDSOFTDEP(mp)) { 423 /* XXX: Reset too late ? */ 424 MNT_ILOCK(mp); 425 mp->mnt_flag &= ~MNT_ASYNC; 426 MNT_IUNLOCK(mp); 427 } 428 /* 429 * Keep MNT_ACLS flag if it is stored in superblock. 430 */ 431 if ((fs->fs_flags & FS_ACLS) != 0) { 432 /* XXX: Set too late ? */ 433 MNT_ILOCK(mp); 434 mp->mnt_flag |= MNT_ACLS; 435 MNT_IUNLOCK(mp); 436 } 437 438 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 439 /* XXX: Set too late ? */ 440 MNT_ILOCK(mp); 441 mp->mnt_flag |= MNT_NFS4ACLS; 442 MNT_IUNLOCK(mp); 443 } 444 /* 445 * If this is a request from fsck to clean up the filesystem, 446 * then allow the specified pid to proceed. 447 */ 448 if (fsckpid > 0) { 449 if (ump->um_fsckpid != 0) { 450 vfs_mount_error(mp, 451 "Active checker already running on %s", 452 fs->fs_fsmnt); 453 return (EINVAL); 454 } 455 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 456 ("soft updates enabled on read-only file system")); 457 g_topology_lock(); 458 /* 459 * Request write access. 460 */ 461 error = g_access(ump->um_cp, 0, 1, 0); 462 g_topology_unlock(); 463 if (error) { 464 vfs_mount_error(mp, 465 "Checker activation failed on %s", 466 fs->fs_fsmnt); 467 return (error); 468 } 469 ump->um_fsckpid = fsckpid; 470 if (fs->fs_snapinum[0] != 0) 471 ffs_snapshot_mount(mp); 472 fs->fs_mtime = time_second; 473 fs->fs_fmod = 1; 474 fs->fs_clean = 0; 475 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 476 } 477 478 /* 479 * If this is a snapshot request, take the snapshot. 480 */ 481 if (mp->mnt_flag & MNT_SNAPSHOT) 482 return (ffs_snapshot(mp, fspec)); 483 484 /* 485 * Must not call namei() while owning busy ref. 486 */ 487 vfs_unbusy(mp); 488 } 489 490 /* 491 * Not an update, or updating the name: look up the name 492 * and verify that it refers to a sensible disk device. 493 */ 494 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); 495 error = namei(&ndp); 496 if ((mp->mnt_flag & MNT_UPDATE) != 0) { 497 /* 498 * Unmount does not start if MNT_UPDATE is set. Mount 499 * update busies mp before setting MNT_UPDATE. We 500 * must be able to retain our busy ref succesfully, 501 * without sleep. 502 */ 503 error1 = vfs_busy(mp, MBF_NOWAIT); 504 MPASS(error1 == 0); 505 } 506 if (error != 0) 507 return (error); 508 NDFREE(&ndp, NDF_ONLY_PNBUF); 509 devvp = ndp.ni_vp; 510 if (!vn_isdisk(devvp, &error)) { 511 vput(devvp); 512 return (error); 513 } 514 515 /* 516 * If mount by non-root, then verify that user has necessary 517 * permissions on the device. 518 */ 519 accmode = VREAD; 520 if ((mp->mnt_flag & MNT_RDONLY) == 0) 521 accmode |= VWRITE; 522 error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); 523 if (error) 524 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 525 if (error) { 526 vput(devvp); 527 return (error); 528 } 529 530 if (mp->mnt_flag & MNT_UPDATE) { 531 /* 532 * Update only 533 * 534 * If it's not the same vnode, or at least the same device 535 * then it's not correct. 536 */ 537 538 if (devvp->v_rdev != ump->um_devvp->v_rdev) 539 error = EINVAL; /* needs translation */ 540 vput(devvp); 541 if (error) 542 return (error); 543 } else { 544 /* 545 * New mount 546 * 547 * We need the name for the mount point (also used for 548 * "last mounted on") copied in. If an error occurs, 549 * the mount point is discarded by the upper level code. 550 * Note that vfs_mount_alloc() populates f_mntonname for us. 551 */ 552 if ((error = ffs_mountfs(devvp, mp, td)) != 0) { 553 vrele(devvp); 554 return (error); 555 } 556 if (fsckpid > 0) { 557 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 558 ("soft updates enabled on read-only file system")); 559 ump = VFSTOUFS(mp); 560 fs = ump->um_fs; 561 g_topology_lock(); 562 /* 563 * Request write access. 564 */ 565 error = g_access(ump->um_cp, 0, 1, 0); 566 g_topology_unlock(); 567 if (error) { 568 printf("WARNING: %s: Checker activation " 569 "failed\n", fs->fs_fsmnt); 570 } else { 571 ump->um_fsckpid = fsckpid; 572 if (fs->fs_snapinum[0] != 0) 573 ffs_snapshot_mount(mp); 574 fs->fs_mtime = time_second; 575 fs->fs_clean = 0; 576 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 577 } 578 } 579 } 580 vfs_mountedfrom(mp, fspec); 581 return (0); 582 } 583 584 /* 585 * Compatibility with old mount system call. 586 */ 587 588 static int 589 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 590 { 591 struct ufs_args args; 592 struct export_args exp; 593 int error; 594 595 if (data == NULL) 596 return (EINVAL); 597 error = copyin(data, &args, sizeof args); 598 if (error) 599 return (error); 600 vfs_oexport_conv(&args.export, &exp); 601 602 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 603 ma = mount_arg(ma, "export", &exp, sizeof(exp)); 604 error = kernel_mount(ma, flags); 605 606 return (error); 607 } 608 609 /* 610 * Reload all incore data for a filesystem (used after running fsck on 611 * the root filesystem and finding things to fix). If the 'force' flag 612 * is 0, the filesystem must be mounted read-only. 613 * 614 * Things to do to update the mount: 615 * 1) invalidate all cached meta-data. 616 * 2) re-read superblock from disk. 617 * 3) re-read summary information from disk. 618 * 4) invalidate all inactive vnodes. 619 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary 620 * writers, if requested. 621 * 6) invalidate all cached file data. 622 * 7) re-read inode data for all active vnodes. 623 */ 624 int 625 ffs_reload(struct mount *mp, struct thread *td, int flags) 626 { 627 struct vnode *vp, *mvp, *devvp; 628 struct inode *ip; 629 void *space; 630 struct buf *bp; 631 struct fs *fs, *newfs; 632 struct ufsmount *ump; 633 ufs2_daddr_t sblockloc; 634 int i, blks, error; 635 u_long size; 636 int32_t *lp; 637 638 ump = VFSTOUFS(mp); 639 640 MNT_ILOCK(mp); 641 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 642 MNT_IUNLOCK(mp); 643 return (EINVAL); 644 } 645 MNT_IUNLOCK(mp); 646 647 /* 648 * Step 1: invalidate all cached meta-data. 649 */ 650 devvp = VFSTOUFS(mp)->um_devvp; 651 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 652 if (vinvalbuf(devvp, 0, 0, 0) != 0) 653 panic("ffs_reload: dirty1"); 654 VOP_UNLOCK(devvp); 655 656 /* 657 * Step 2: re-read superblock from disk. 658 */ 659 fs = VFSTOUFS(mp)->um_fs; 660 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 661 NOCRED, &bp)) != 0) 662 return (error); 663 newfs = (struct fs *)bp->b_data; 664 if ((newfs->fs_magic != FS_UFS1_MAGIC && 665 newfs->fs_magic != FS_UFS2_MAGIC) || 666 newfs->fs_bsize > MAXBSIZE || 667 newfs->fs_bsize < sizeof(struct fs)) { 668 brelse(bp); 669 return (EIO); /* XXX needs translation */ 670 } 671 /* 672 * Copy pointer fields back into superblock before copying in XXX 673 * new superblock. These should really be in the ufsmount. XXX 674 * Note that important parameters (eg fs_ncg) are unchanged. 675 */ 676 newfs->fs_csp = fs->fs_csp; 677 newfs->fs_maxcluster = fs->fs_maxcluster; 678 newfs->fs_contigdirs = fs->fs_contigdirs; 679 newfs->fs_active = fs->fs_active; 680 newfs->fs_ronly = fs->fs_ronly; 681 sblockloc = fs->fs_sblockloc; 682 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 683 brelse(bp); 684 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 685 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 686 UFS_LOCK(ump); 687 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 688 printf("WARNING: %s: reload pending error: blocks %jd " 689 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 690 fs->fs_pendinginodes); 691 fs->fs_pendingblocks = 0; 692 fs->fs_pendinginodes = 0; 693 } 694 UFS_UNLOCK(ump); 695 696 /* 697 * Step 3: re-read summary information from disk. 698 */ 699 size = fs->fs_cssize; 700 blks = howmany(size, fs->fs_fsize); 701 if (fs->fs_contigsumsize > 0) 702 size += fs->fs_ncg * sizeof(int32_t); 703 size += fs->fs_ncg * sizeof(u_int8_t); 704 free(fs->fs_csp, M_UFSMNT); 705 space = malloc(size, M_UFSMNT, M_WAITOK); 706 fs->fs_csp = space; 707 for (i = 0; i < blks; i += fs->fs_frag) { 708 size = fs->fs_bsize; 709 if (i + fs->fs_frag > blks) 710 size = (blks - i) * fs->fs_fsize; 711 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 712 NOCRED, &bp); 713 if (error) 714 return (error); 715 bcopy(bp->b_data, space, (u_int)size); 716 space = (char *)space + size; 717 brelse(bp); 718 } 719 /* 720 * We no longer know anything about clusters per cylinder group. 721 */ 722 if (fs->fs_contigsumsize > 0) { 723 fs->fs_maxcluster = lp = space; 724 for (i = 0; i < fs->fs_ncg; i++) 725 *lp++ = fs->fs_contigsumsize; 726 space = lp; 727 } 728 size = fs->fs_ncg * sizeof(u_int8_t); 729 fs->fs_contigdirs = (u_int8_t *)space; 730 bzero(fs->fs_contigdirs, size); 731 if ((flags & FFSR_UNSUSPEND) != 0) { 732 MNT_ILOCK(mp); 733 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 734 wakeup(&mp->mnt_flag); 735 MNT_IUNLOCK(mp); 736 } 737 738 loop: 739 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 740 /* 741 * Skip syncer vnode. 742 */ 743 if (vp->v_type == VNON) { 744 VI_UNLOCK(vp); 745 continue; 746 } 747 /* 748 * Step 4: invalidate all cached file data. 749 */ 750 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { 751 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 752 goto loop; 753 } 754 if (vinvalbuf(vp, 0, 0, 0)) 755 panic("ffs_reload: dirty2"); 756 /* 757 * Step 5: re-read inode data for all active vnodes. 758 */ 759 ip = VTOI(vp); 760 error = 761 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 762 (int)fs->fs_bsize, NOCRED, &bp); 763 if (error) { 764 vput(vp); 765 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 766 return (error); 767 } 768 if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) { 769 brelse(bp); 770 vput(vp); 771 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 772 return (error); 773 } 774 ip->i_effnlink = ip->i_nlink; 775 brelse(bp); 776 vput(vp); 777 } 778 return (0); 779 } 780 781 /* 782 * Common code for mount and mountroot 783 */ 784 static int 785 ffs_mountfs(devvp, mp, td) 786 struct vnode *devvp; 787 struct mount *mp; 788 struct thread *td; 789 { 790 struct ufsmount *ump; 791 struct fs *fs; 792 struct cdev *dev; 793 int error, i, len, ronly; 794 struct ucred *cred; 795 struct g_consumer *cp; 796 struct mount *nmp; 797 int candelete, canspeedup; 798 off_t loc; 799 800 fs = NULL; 801 ump = NULL; 802 cred = td ? td->td_ucred : NOCRED; 803 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 804 805 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 806 dev = devvp->v_rdev; 807 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 808 (uintptr_t)mp) == 0) { 809 VOP_UNLOCK(devvp); 810 return (EBUSY); 811 } 812 g_topology_lock(); 813 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 814 g_topology_unlock(); 815 if (error != 0) { 816 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 817 VOP_UNLOCK(devvp); 818 return (error); 819 } 820 dev_ref(dev); 821 devvp->v_bufobj.bo_ops = &ffs_ops; 822 VOP_UNLOCK(devvp); 823 if (dev->si_iosize_max != 0) 824 mp->mnt_iosize_max = dev->si_iosize_max; 825 if (mp->mnt_iosize_max > MAXPHYS) 826 mp->mnt_iosize_max = MAXPHYS; 827 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 828 error = EINVAL; 829 vfs_mount_error(mp, 830 "Invalid sectorsize %d for superblock size %d", 831 cp->provider->sectorsize, SBLOCKSIZE); 832 goto out; 833 } 834 /* fetch the superblock and summary information */ 835 loc = STDSB; 836 if ((mp->mnt_flag & MNT_ROOTFS) != 0) 837 loc = STDSB_NOHASHFAIL; 838 if ((error = ffs_sbget(devvp, &fs, loc, M_UFSMNT, ffs_use_bread)) != 0) 839 goto out; 840 /* none of these types of check-hashes are maintained by this kernel */ 841 fs->fs_metackhash &= ~(CK_INDIR | CK_DIR); 842 /* no support for any undefined flags */ 843 fs->fs_flags &= FS_SUPPORTED; 844 fs->fs_flags &= ~FS_UNCLEAN; 845 if (fs->fs_clean == 0) { 846 fs->fs_flags |= FS_UNCLEAN; 847 if (ronly || (mp->mnt_flag & MNT_FORCE) || 848 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 849 (fs->fs_flags & FS_DOSOFTDEP))) { 850 printf("WARNING: %s was not properly dismounted\n", 851 fs->fs_fsmnt); 852 } else { 853 vfs_mount_error(mp, "R/W mount of %s denied. %s%s", 854 fs->fs_fsmnt, "Filesystem is not clean - run fsck.", 855 (fs->fs_flags & FS_SUJ) == 0 ? "" : 856 " Forced mount will invalidate journal contents"); 857 error = EPERM; 858 goto out; 859 } 860 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 861 (mp->mnt_flag & MNT_FORCE)) { 862 printf("WARNING: %s: lost blocks %jd files %d\n", 863 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 864 fs->fs_pendinginodes); 865 fs->fs_pendingblocks = 0; 866 fs->fs_pendinginodes = 0; 867 } 868 } 869 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 870 printf("WARNING: %s: mount pending error: blocks %jd " 871 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 872 fs->fs_pendinginodes); 873 fs->fs_pendingblocks = 0; 874 fs->fs_pendinginodes = 0; 875 } 876 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 877 #ifdef UFS_GJOURNAL 878 /* 879 * Get journal provider name. 880 */ 881 len = 1024; 882 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); 883 if (g_io_getattr("GJOURNAL::provider", cp, &len, 884 mp->mnt_gjprovider) == 0) { 885 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 886 M_UFSMNT, M_WAITOK); 887 MNT_ILOCK(mp); 888 mp->mnt_flag |= MNT_GJOURNAL; 889 MNT_IUNLOCK(mp); 890 } else { 891 printf("WARNING: %s: GJOURNAL flag on fs " 892 "but no gjournal provider below\n", 893 mp->mnt_stat.f_mntonname); 894 free(mp->mnt_gjprovider, M_UFSMNT); 895 mp->mnt_gjprovider = NULL; 896 } 897 #else 898 printf("WARNING: %s: GJOURNAL flag on fs but no " 899 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 900 #endif 901 } else { 902 mp->mnt_gjprovider = NULL; 903 } 904 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 905 ump->um_cp = cp; 906 ump->um_bo = &devvp->v_bufobj; 907 ump->um_fs = fs; 908 if (fs->fs_magic == FS_UFS1_MAGIC) { 909 ump->um_fstype = UFS1; 910 ump->um_balloc = ffs_balloc_ufs1; 911 } else { 912 ump->um_fstype = UFS2; 913 ump->um_balloc = ffs_balloc_ufs2; 914 } 915 ump->um_blkatoff = ffs_blkatoff; 916 ump->um_truncate = ffs_truncate; 917 ump->um_update = ffs_update; 918 ump->um_valloc = ffs_valloc; 919 ump->um_vfree = ffs_vfree; 920 ump->um_ifree = ffs_ifree; 921 ump->um_rdonly = ffs_rdonly; 922 ump->um_snapgone = ffs_snapgone; 923 if ((mp->mnt_flag & MNT_UNTRUSTED) != 0) 924 ump->um_check_blkno = ffs_check_blkno; 925 else 926 ump->um_check_blkno = NULL; 927 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 928 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc); 929 fs->fs_ronly = ronly; 930 fs->fs_active = NULL; 931 mp->mnt_data = ump; 932 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 933 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 934 nmp = NULL; 935 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 936 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 937 if (nmp) 938 vfs_rel(nmp); 939 vfs_getnewfsid(mp); 940 } 941 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 942 MNT_ILOCK(mp); 943 mp->mnt_flag |= MNT_LOCAL; 944 MNT_IUNLOCK(mp); 945 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 946 #ifdef MAC 947 MNT_ILOCK(mp); 948 mp->mnt_flag |= MNT_MULTILABEL; 949 MNT_IUNLOCK(mp); 950 #else 951 printf("WARNING: %s: multilabel flag on fs but " 952 "no MAC support\n", mp->mnt_stat.f_mntonname); 953 #endif 954 } 955 if ((fs->fs_flags & FS_ACLS) != 0) { 956 #ifdef UFS_ACL 957 MNT_ILOCK(mp); 958 959 if (mp->mnt_flag & MNT_NFS4ACLS) 960 printf("WARNING: %s: ACLs flag on fs conflicts with " 961 "\"nfsv4acls\" mount option; option ignored\n", 962 mp->mnt_stat.f_mntonname); 963 mp->mnt_flag &= ~MNT_NFS4ACLS; 964 mp->mnt_flag |= MNT_ACLS; 965 966 MNT_IUNLOCK(mp); 967 #else 968 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 969 mp->mnt_stat.f_mntonname); 970 #endif 971 } 972 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 973 #ifdef UFS_ACL 974 MNT_ILOCK(mp); 975 976 if (mp->mnt_flag & MNT_ACLS) 977 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 978 "with \"acls\" mount option; option ignored\n", 979 mp->mnt_stat.f_mntonname); 980 mp->mnt_flag &= ~MNT_ACLS; 981 mp->mnt_flag |= MNT_NFS4ACLS; 982 983 MNT_IUNLOCK(mp); 984 #else 985 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 986 "ACLs support\n", mp->mnt_stat.f_mntonname); 987 #endif 988 } 989 if ((fs->fs_flags & FS_TRIM) != 0) { 990 len = sizeof(int); 991 if (g_io_getattr("GEOM::candelete", cp, &len, 992 &candelete) == 0) { 993 if (candelete) 994 ump->um_flags |= UM_CANDELETE; 995 else 996 printf("WARNING: %s: TRIM flag on fs but disk " 997 "does not support TRIM\n", 998 mp->mnt_stat.f_mntonname); 999 } else { 1000 printf("WARNING: %s: TRIM flag on fs but disk does " 1001 "not confirm that it supports TRIM\n", 1002 mp->mnt_stat.f_mntonname); 1003 } 1004 if (((ump->um_flags) & UM_CANDELETE) != 0) { 1005 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 1006 taskqueue_thread_enqueue, &ump->um_trim_tq); 1007 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 1008 "%s trim", mp->mnt_stat.f_mntonname); 1009 ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM, 1010 &ump->um_trimlisthashsize); 1011 } 1012 } 1013 1014 len = sizeof(int); 1015 if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) { 1016 if (canspeedup) 1017 ump->um_flags |= UM_CANSPEEDUP; 1018 } 1019 1020 ump->um_mountp = mp; 1021 ump->um_dev = dev; 1022 ump->um_devvp = devvp; 1023 ump->um_nindir = fs->fs_nindir; 1024 ump->um_bptrtodb = fs->fs_fsbtodb; 1025 ump->um_seqinc = fs->fs_frag; 1026 for (i = 0; i < MAXQUOTAS; i++) 1027 ump->um_quotas[i] = NULLVP; 1028 #ifdef UFS_EXTATTR 1029 ufs_extattr_uepm_init(&ump->um_extattr); 1030 #endif 1031 /* 1032 * Set FS local "last mounted on" information (NULL pad) 1033 */ 1034 bzero(fs->fs_fsmnt, MAXMNTLEN); 1035 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1036 mp->mnt_stat.f_iosize = fs->fs_bsize; 1037 1038 if (mp->mnt_flag & MNT_ROOTFS) { 1039 /* 1040 * Root mount; update timestamp in mount structure. 1041 * this will be used by the common root mount code 1042 * to update the system clock. 1043 */ 1044 mp->mnt_time = fs->fs_time; 1045 } 1046 1047 if (ronly == 0) { 1048 fs->fs_mtime = time_second; 1049 if ((fs->fs_flags & FS_DOSOFTDEP) && 1050 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1051 ffs_flushfiles(mp, FORCECLOSE, td); 1052 goto out; 1053 } 1054 if (fs->fs_snapinum[0] != 0) 1055 ffs_snapshot_mount(mp); 1056 fs->fs_fmod = 1; 1057 fs->fs_clean = 0; 1058 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1059 } 1060 /* 1061 * Initialize filesystem state information in mount struct. 1062 */ 1063 MNT_ILOCK(mp); 1064 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1065 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1066 MNT_IUNLOCK(mp); 1067 #ifdef UFS_EXTATTR 1068 #ifdef UFS_EXTATTR_AUTOSTART 1069 /* 1070 * 1071 * Auto-starting does the following: 1072 * - check for /.attribute in the fs, and extattr_start if so 1073 * - for each file in .attribute, enable that file with 1074 * an attribute of the same name. 1075 * Not clear how to report errors -- probably eat them. 1076 * This would all happen while the filesystem was busy/not 1077 * available, so would effectively be "atomic". 1078 */ 1079 (void) ufs_extattr_autostart(mp, td); 1080 #endif /* !UFS_EXTATTR_AUTOSTART */ 1081 #endif /* !UFS_EXTATTR */ 1082 return (0); 1083 out: 1084 if (fs != NULL) { 1085 free(fs->fs_csp, M_UFSMNT); 1086 free(fs, M_UFSMNT); 1087 } 1088 if (cp != NULL) { 1089 g_topology_lock(); 1090 g_vfs_close(cp); 1091 g_topology_unlock(); 1092 } 1093 if (ump) { 1094 mtx_destroy(UFS_MTX(ump)); 1095 if (mp->mnt_gjprovider != NULL) { 1096 free(mp->mnt_gjprovider, M_UFSMNT); 1097 mp->mnt_gjprovider = NULL; 1098 } 1099 free(ump, M_UFSMNT); 1100 mp->mnt_data = NULL; 1101 } 1102 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1103 dev_rel(dev); 1104 return (error); 1105 } 1106 1107 /* 1108 * A read function for use by filesystem-layer routines. 1109 */ 1110 static int 1111 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) 1112 { 1113 struct buf *bp; 1114 int error; 1115 1116 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); 1117 *bufp = malloc(size, M_UFSMNT, M_WAITOK); 1118 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, 1119 &bp)) != 0) 1120 return (error); 1121 bcopy(bp->b_data, *bufp, size); 1122 bp->b_flags |= B_INVAL | B_NOCACHE; 1123 brelse(bp); 1124 return (0); 1125 } 1126 1127 #include <sys/sysctl.h> 1128 static int bigcgs = 0; 1129 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1130 1131 /* 1132 * Sanity checks for loading old filesystem superblocks. 1133 * See ffs_oldfscompat_write below for unwound actions. 1134 * 1135 * XXX - Parts get retired eventually. 1136 * Unfortunately new bits get added. 1137 */ 1138 static void 1139 ffs_oldfscompat_read(fs, ump, sblockloc) 1140 struct fs *fs; 1141 struct ufsmount *ump; 1142 ufs2_daddr_t sblockloc; 1143 { 1144 off_t maxfilesize; 1145 1146 /* 1147 * If not yet done, update fs_flags location and value of fs_sblockloc. 1148 */ 1149 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1150 fs->fs_flags = fs->fs_old_flags; 1151 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1152 fs->fs_sblockloc = sblockloc; 1153 } 1154 /* 1155 * If not yet done, update UFS1 superblock with new wider fields. 1156 */ 1157 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1158 fs->fs_maxbsize = fs->fs_bsize; 1159 fs->fs_time = fs->fs_old_time; 1160 fs->fs_size = fs->fs_old_size; 1161 fs->fs_dsize = fs->fs_old_dsize; 1162 fs->fs_csaddr = fs->fs_old_csaddr; 1163 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1164 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1165 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1166 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1167 } 1168 if (fs->fs_magic == FS_UFS1_MAGIC && 1169 fs->fs_old_inodefmt < FS_44INODEFMT) { 1170 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1171 fs->fs_qbmask = ~fs->fs_bmask; 1172 fs->fs_qfmask = ~fs->fs_fmask; 1173 } 1174 if (fs->fs_magic == FS_UFS1_MAGIC) { 1175 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1176 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1177 if (fs->fs_maxfilesize > maxfilesize) 1178 fs->fs_maxfilesize = maxfilesize; 1179 } 1180 /* Compatibility for old filesystems */ 1181 if (fs->fs_avgfilesize <= 0) 1182 fs->fs_avgfilesize = AVFILESIZ; 1183 if (fs->fs_avgfpdir <= 0) 1184 fs->fs_avgfpdir = AFPDIR; 1185 if (bigcgs) { 1186 fs->fs_save_cgsize = fs->fs_cgsize; 1187 fs->fs_cgsize = fs->fs_bsize; 1188 } 1189 } 1190 1191 /* 1192 * Unwinding superblock updates for old filesystems. 1193 * See ffs_oldfscompat_read above for details. 1194 * 1195 * XXX - Parts get retired eventually. 1196 * Unfortunately new bits get added. 1197 */ 1198 void 1199 ffs_oldfscompat_write(fs, ump) 1200 struct fs *fs; 1201 struct ufsmount *ump; 1202 { 1203 1204 /* 1205 * Copy back UFS2 updated fields that UFS1 inspects. 1206 */ 1207 if (fs->fs_magic == FS_UFS1_MAGIC) { 1208 fs->fs_old_time = fs->fs_time; 1209 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1210 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1211 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1212 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1213 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1214 } 1215 if (bigcgs) { 1216 fs->fs_cgsize = fs->fs_save_cgsize; 1217 fs->fs_save_cgsize = 0; 1218 } 1219 } 1220 1221 /* 1222 * unmount system call 1223 */ 1224 static int 1225 ffs_unmount(mp, mntflags) 1226 struct mount *mp; 1227 int mntflags; 1228 { 1229 struct thread *td; 1230 struct ufsmount *ump = VFSTOUFS(mp); 1231 struct fs *fs; 1232 int error, flags, susp; 1233 #ifdef UFS_EXTATTR 1234 int e_restart; 1235 #endif 1236 1237 flags = 0; 1238 td = curthread; 1239 fs = ump->um_fs; 1240 susp = 0; 1241 if (mntflags & MNT_FORCE) { 1242 flags |= FORCECLOSE; 1243 susp = fs->fs_ronly == 0; 1244 } 1245 #ifdef UFS_EXTATTR 1246 if ((error = ufs_extattr_stop(mp, td))) { 1247 if (error != EOPNOTSUPP) 1248 printf("WARNING: unmount %s: ufs_extattr_stop " 1249 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1250 error); 1251 e_restart = 0; 1252 } else { 1253 ufs_extattr_uepm_destroy(&ump->um_extattr); 1254 e_restart = 1; 1255 } 1256 #endif 1257 if (susp) { 1258 error = vfs_write_suspend_umnt(mp); 1259 if (error != 0) 1260 goto fail1; 1261 } 1262 if (MOUNTEDSOFTDEP(mp)) 1263 error = softdep_flushfiles(mp, flags, td); 1264 else 1265 error = ffs_flushfiles(mp, flags, td); 1266 if (error != 0 && error != ENXIO) 1267 goto fail; 1268 1269 UFS_LOCK(ump); 1270 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1271 printf("WARNING: unmount %s: pending error: blocks %jd " 1272 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1273 fs->fs_pendinginodes); 1274 fs->fs_pendingblocks = 0; 1275 fs->fs_pendinginodes = 0; 1276 } 1277 UFS_UNLOCK(ump); 1278 if (MOUNTEDSOFTDEP(mp)) 1279 softdep_unmount(mp); 1280 if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) { 1281 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1282 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1283 if (error && error != ENXIO) { 1284 fs->fs_clean = 0; 1285 goto fail; 1286 } 1287 } 1288 if (susp) 1289 vfs_write_resume(mp, VR_START_WRITE); 1290 if (ump->um_trim_tq != NULL) { 1291 while (ump->um_trim_inflight != 0) 1292 pause("ufsutr", hz); 1293 taskqueue_drain_all(ump->um_trim_tq); 1294 taskqueue_free(ump->um_trim_tq); 1295 free (ump->um_trimhash, M_TRIM); 1296 } 1297 g_topology_lock(); 1298 if (ump->um_fsckpid > 0) { 1299 /* 1300 * Return to normal read-only mode. 1301 */ 1302 error = g_access(ump->um_cp, 0, -1, 0); 1303 ump->um_fsckpid = 0; 1304 } 1305 g_vfs_close(ump->um_cp); 1306 g_topology_unlock(); 1307 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1308 vrele(ump->um_devvp); 1309 dev_rel(ump->um_dev); 1310 mtx_destroy(UFS_MTX(ump)); 1311 if (mp->mnt_gjprovider != NULL) { 1312 free(mp->mnt_gjprovider, M_UFSMNT); 1313 mp->mnt_gjprovider = NULL; 1314 } 1315 free(fs->fs_csp, M_UFSMNT); 1316 free(fs, M_UFSMNT); 1317 free(ump, M_UFSMNT); 1318 mp->mnt_data = NULL; 1319 MNT_ILOCK(mp); 1320 mp->mnt_flag &= ~MNT_LOCAL; 1321 MNT_IUNLOCK(mp); 1322 if (td->td_su == mp) { 1323 td->td_su = NULL; 1324 vfs_rel(mp); 1325 } 1326 return (error); 1327 1328 fail: 1329 if (susp) 1330 vfs_write_resume(mp, VR_START_WRITE); 1331 fail1: 1332 #ifdef UFS_EXTATTR 1333 if (e_restart) { 1334 ufs_extattr_uepm_init(&ump->um_extattr); 1335 #ifdef UFS_EXTATTR_AUTOSTART 1336 (void) ufs_extattr_autostart(mp, td); 1337 #endif 1338 } 1339 #endif 1340 1341 return (error); 1342 } 1343 1344 /* 1345 * Flush out all the files in a filesystem. 1346 */ 1347 int 1348 ffs_flushfiles(mp, flags, td) 1349 struct mount *mp; 1350 int flags; 1351 struct thread *td; 1352 { 1353 struct ufsmount *ump; 1354 int qerror, error; 1355 1356 ump = VFSTOUFS(mp); 1357 qerror = 0; 1358 #ifdef QUOTA 1359 if (mp->mnt_flag & MNT_QUOTA) { 1360 int i; 1361 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1362 if (error) 1363 return (error); 1364 for (i = 0; i < MAXQUOTAS; i++) { 1365 error = quotaoff(td, mp, i); 1366 if (error != 0) { 1367 if ((flags & EARLYFLUSH) == 0) 1368 return (error); 1369 else 1370 qerror = error; 1371 } 1372 } 1373 1374 /* 1375 * Here we fall through to vflush again to ensure that 1376 * we have gotten rid of all the system vnodes, unless 1377 * quotas must not be closed. 1378 */ 1379 } 1380 #endif 1381 ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles"); 1382 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1383 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1384 return (error); 1385 ffs_snapshot_unmount(mp); 1386 flags |= FORCECLOSE; 1387 /* 1388 * Here we fall through to vflush again to ensure 1389 * that we have gotten rid of all the system vnodes. 1390 */ 1391 } 1392 1393 /* 1394 * Do not close system files if quotas were not closed, to be 1395 * able to sync the remaining dquots. The freeblks softupdate 1396 * workitems might hold a reference on a dquot, preventing 1397 * quotaoff() from completing. Next round of 1398 * softdep_flushworklist() iteration should process the 1399 * blockers, allowing the next run of quotaoff() to finally 1400 * flush held dquots. 1401 * 1402 * Otherwise, flush all the files. 1403 */ 1404 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1405 return (error); 1406 1407 /* 1408 * Flush filesystem metadata. 1409 */ 1410 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1411 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1412 VOP_UNLOCK(ump->um_devvp); 1413 return (error); 1414 } 1415 1416 /* 1417 * Get filesystem statistics. 1418 */ 1419 static int 1420 ffs_statfs(mp, sbp) 1421 struct mount *mp; 1422 struct statfs *sbp; 1423 { 1424 struct ufsmount *ump; 1425 struct fs *fs; 1426 1427 ump = VFSTOUFS(mp); 1428 fs = ump->um_fs; 1429 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1430 panic("ffs_statfs"); 1431 sbp->f_version = STATFS_VERSION; 1432 sbp->f_bsize = fs->fs_fsize; 1433 sbp->f_iosize = fs->fs_bsize; 1434 sbp->f_blocks = fs->fs_dsize; 1435 UFS_LOCK(ump); 1436 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1437 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1438 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1439 dbtofsb(fs, fs->fs_pendingblocks); 1440 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; 1441 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1442 UFS_UNLOCK(ump); 1443 sbp->f_namemax = UFS_MAXNAMLEN; 1444 return (0); 1445 } 1446 1447 static bool 1448 sync_doupdate(struct inode *ip) 1449 { 1450 1451 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1452 IN_UPDATE)) != 0); 1453 } 1454 1455 static int 1456 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused) 1457 { 1458 struct inode *ip; 1459 1460 /* 1461 * Flags are safe to access because ->v_data invalidation 1462 * is held off by listmtx. 1463 */ 1464 if (vp->v_type == VNON) 1465 return (false); 1466 ip = VTOI(vp); 1467 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) 1468 return (false); 1469 return (true); 1470 } 1471 1472 /* 1473 * For a lazy sync, we only care about access times, quotas and the 1474 * superblock. Other filesystem changes are already converted to 1475 * cylinder group blocks or inode blocks updates and are written to 1476 * disk by syncer. 1477 */ 1478 static int 1479 ffs_sync_lazy(mp) 1480 struct mount *mp; 1481 { 1482 struct vnode *mvp, *vp; 1483 struct inode *ip; 1484 struct thread *td; 1485 int allerror, error; 1486 1487 allerror = 0; 1488 td = curthread; 1489 if ((mp->mnt_flag & MNT_NOATIME) != 0) { 1490 #ifdef QUOTA 1491 qsync(mp); 1492 #endif 1493 goto sbupdate; 1494 } 1495 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) { 1496 if (vp->v_type == VNON) { 1497 VI_UNLOCK(vp); 1498 continue; 1499 } 1500 ip = VTOI(vp); 1501 1502 /* 1503 * The IN_ACCESS flag is converted to IN_MODIFIED by 1504 * ufs_close() and ufs_getattr() by the calls to 1505 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1506 * Test also all the other timestamp flags too, to pick up 1507 * any other cases that could be missed. 1508 */ 1509 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1510 VI_UNLOCK(vp); 1511 continue; 1512 } 1513 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 1514 td)) != 0) 1515 continue; 1516 #ifdef QUOTA 1517 qsyncvp(vp); 1518 #endif 1519 if (sync_doupdate(ip)) 1520 error = ffs_update(vp, 0); 1521 if (error != 0) 1522 allerror = error; 1523 vput(vp); 1524 } 1525 sbupdate: 1526 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1527 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1528 allerror = error; 1529 return (allerror); 1530 } 1531 1532 /* 1533 * Go through the disk queues to initiate sandbagged IO; 1534 * go through the inodes to write those that have been modified; 1535 * initiate the writing of the super block if it has been modified. 1536 * 1537 * Note: we are always called with the filesystem marked busy using 1538 * vfs_busy(). 1539 */ 1540 static int 1541 ffs_sync(mp, waitfor) 1542 struct mount *mp; 1543 int waitfor; 1544 { 1545 struct vnode *mvp, *vp, *devvp; 1546 struct thread *td; 1547 struct inode *ip; 1548 struct ufsmount *ump = VFSTOUFS(mp); 1549 struct fs *fs; 1550 int error, count, lockreq, allerror = 0; 1551 int suspend; 1552 int suspended; 1553 int secondary_writes; 1554 int secondary_accwrites; 1555 int softdep_deps; 1556 int softdep_accdeps; 1557 struct bufobj *bo; 1558 1559 suspend = 0; 1560 suspended = 0; 1561 td = curthread; 1562 fs = ump->um_fs; 1563 if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0) 1564 panic("%s: ffs_sync: modification on read-only filesystem", 1565 fs->fs_fsmnt); 1566 if (waitfor == MNT_LAZY) { 1567 if (!rebooting) 1568 return (ffs_sync_lazy(mp)); 1569 waitfor = MNT_NOWAIT; 1570 } 1571 1572 /* 1573 * Write back each (modified) inode. 1574 */ 1575 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1576 if (waitfor == MNT_SUSPEND) { 1577 suspend = 1; 1578 waitfor = MNT_WAIT; 1579 } 1580 if (waitfor == MNT_WAIT) 1581 lockreq = LK_EXCLUSIVE; 1582 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1583 loop: 1584 /* Grab snapshot of secondary write counts */ 1585 MNT_ILOCK(mp); 1586 secondary_writes = mp->mnt_secondary_writes; 1587 secondary_accwrites = mp->mnt_secondary_accwrites; 1588 MNT_IUNLOCK(mp); 1589 1590 /* Grab snapshot of softdep dependency counts */ 1591 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1592 1593 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1594 /* 1595 * Depend on the vnode interlock to keep things stable enough 1596 * for a quick test. Since there might be hundreds of 1597 * thousands of vnodes, we cannot afford even a subroutine 1598 * call unless there's a good chance that we have work to do. 1599 */ 1600 if (vp->v_type == VNON) { 1601 VI_UNLOCK(vp); 1602 continue; 1603 } 1604 ip = VTOI(vp); 1605 if ((ip->i_flag & 1606 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1607 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1608 VI_UNLOCK(vp); 1609 continue; 1610 } 1611 if ((error = vget(vp, lockreq, td)) != 0) { 1612 if (error == ENOENT || error == ENOLCK) { 1613 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1614 goto loop; 1615 } 1616 continue; 1617 } 1618 #ifdef QUOTA 1619 qsyncvp(vp); 1620 #endif 1621 if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0) 1622 allerror = error; 1623 vput(vp); 1624 } 1625 /* 1626 * Force stale filesystem control information to be flushed. 1627 */ 1628 if (waitfor == MNT_WAIT || rebooting) { 1629 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1630 allerror = error; 1631 /* Flushed work items may create new vnodes to clean */ 1632 if (allerror == 0 && count) 1633 goto loop; 1634 } 1635 1636 devvp = ump->um_devvp; 1637 bo = &devvp->v_bufobj; 1638 BO_LOCK(bo); 1639 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1640 BO_UNLOCK(bo); 1641 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1642 error = VOP_FSYNC(devvp, waitfor, td); 1643 VOP_UNLOCK(devvp); 1644 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1645 error = ffs_sbupdate(ump, waitfor, 0); 1646 if (error != 0) 1647 allerror = error; 1648 if (allerror == 0 && waitfor == MNT_WAIT) 1649 goto loop; 1650 } else if (suspend != 0) { 1651 if (softdep_check_suspend(mp, 1652 devvp, 1653 softdep_deps, 1654 softdep_accdeps, 1655 secondary_writes, 1656 secondary_accwrites) != 0) { 1657 MNT_IUNLOCK(mp); 1658 goto loop; /* More work needed */ 1659 } 1660 mtx_assert(MNT_MTX(mp), MA_OWNED); 1661 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1662 MNT_IUNLOCK(mp); 1663 suspended = 1; 1664 } else 1665 BO_UNLOCK(bo); 1666 /* 1667 * Write back modified superblock. 1668 */ 1669 if (fs->fs_fmod != 0 && 1670 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1671 allerror = error; 1672 return (allerror); 1673 } 1674 1675 int 1676 ffs_vget(mp, ino, flags, vpp) 1677 struct mount *mp; 1678 ino_t ino; 1679 int flags; 1680 struct vnode **vpp; 1681 { 1682 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1683 } 1684 1685 int 1686 ffs_vgetf(mp, ino, flags, vpp, ffs_flags) 1687 struct mount *mp; 1688 ino_t ino; 1689 int flags; 1690 struct vnode **vpp; 1691 int ffs_flags; 1692 { 1693 struct fs *fs; 1694 struct inode *ip; 1695 struct ufsmount *ump; 1696 struct buf *bp; 1697 struct vnode *vp; 1698 int error; 1699 1700 MPASS((ffs_flags & FFSV_REPLACE) == 0 || (flags & LK_EXCLUSIVE) != 0); 1701 1702 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1703 if (error != 0) 1704 return (error); 1705 if (*vpp != NULL) { 1706 if ((ffs_flags & FFSV_REPLACE) == 0) 1707 return (0); 1708 vgone(*vpp); 1709 vput(*vpp); 1710 } 1711 1712 /* 1713 * We must promote to an exclusive lock for vnode creation. This 1714 * can happen if lookup is passed LOCKSHARED. 1715 */ 1716 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1717 flags &= ~LK_TYPE_MASK; 1718 flags |= LK_EXCLUSIVE; 1719 } 1720 1721 /* 1722 * We do not lock vnode creation as it is believed to be too 1723 * expensive for such rare case as simultaneous creation of vnode 1724 * for same ino by different processes. We just allow them to race 1725 * and check later to decide who wins. Let the race begin! 1726 */ 1727 1728 ump = VFSTOUFS(mp); 1729 fs = ump->um_fs; 1730 ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO); 1731 1732 /* Allocate a new vnode/inode. */ 1733 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 1734 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1735 if (error) { 1736 *vpp = NULL; 1737 uma_zfree(uma_inode, ip); 1738 return (error); 1739 } 1740 /* 1741 * FFS supports recursive locking. 1742 */ 1743 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 1744 VN_LOCK_AREC(vp); 1745 vp->v_data = ip; 1746 vp->v_bufobj.bo_bsize = fs->fs_bsize; 1747 ip->i_vnode = vp; 1748 ip->i_ump = ump; 1749 ip->i_number = ino; 1750 ip->i_ea_refs = 0; 1751 ip->i_nextclustercg = -1; 1752 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; 1753 ip->i_mode = 0; /* ensure error cases below throw away vnode */ 1754 #ifdef QUOTA 1755 { 1756 int i; 1757 for (i = 0; i < MAXQUOTAS; i++) 1758 ip->i_dquot[i] = NODQUOT; 1759 } 1760 #endif 1761 1762 if (ffs_flags & FFSV_FORCEINSMQ) 1763 vp->v_vflag |= VV_FORCEINSMQ; 1764 error = insmntque(vp, mp); 1765 if (error != 0) { 1766 uma_zfree(uma_inode, ip); 1767 *vpp = NULL; 1768 return (error); 1769 } 1770 vp->v_vflag &= ~VV_FORCEINSMQ; 1771 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 1772 if (error != 0) 1773 return (error); 1774 if (*vpp != NULL) { 1775 /* 1776 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set) 1777 * operate on empty inode, which must not be found by 1778 * other threads until fully filled. Vnode for empty 1779 * inode must be not re-inserted on the hash by other 1780 * thread, after removal by us at the beginning. 1781 */ 1782 MPASS((ffs_flags & FFSV_REPLACE) == 0); 1783 return (0); 1784 } 1785 1786 /* Read in the disk contents for the inode, copy into the inode. */ 1787 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), 1788 (int)fs->fs_bsize, NOCRED, &bp); 1789 if (error) { 1790 /* 1791 * The inode does not contain anything useful, so it would 1792 * be misleading to leave it on its hash chain. With mode 1793 * still zero, it will be unlinked and returned to the free 1794 * list by vput(). 1795 */ 1796 vgone(vp); 1797 vput(vp); 1798 *vpp = NULL; 1799 return (error); 1800 } 1801 if (I_IS_UFS1(ip)) 1802 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 1803 else 1804 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1805 if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) { 1806 bqrelse(bp); 1807 vgone(vp); 1808 vput(vp); 1809 *vpp = NULL; 1810 return (error); 1811 } 1812 if (DOINGSOFTDEP(vp)) 1813 softdep_load_inodeblock(ip); 1814 else 1815 ip->i_effnlink = ip->i_nlink; 1816 bqrelse(bp); 1817 1818 /* 1819 * Initialize the vnode from the inode, check for aliases. 1820 * Note that the underlying vnode may have changed. 1821 */ 1822 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, 1823 &vp); 1824 if (error) { 1825 vgone(vp); 1826 vput(vp); 1827 *vpp = NULL; 1828 return (error); 1829 } 1830 1831 /* 1832 * Finish inode initialization. 1833 */ 1834 if (vp->v_type != VFIFO) { 1835 /* FFS supports shared locking for all files except fifos. */ 1836 VN_LOCK_ASHARE(vp); 1837 } 1838 1839 /* 1840 * Set up a generation number for this inode if it does not 1841 * already have one. This should only happen on old filesystems. 1842 */ 1843 if (ip->i_gen == 0) { 1844 while (ip->i_gen == 0) 1845 ip->i_gen = arc4random(); 1846 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 1847 UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 1848 DIP_SET(ip, i_gen, ip->i_gen); 1849 } 1850 } 1851 #ifdef MAC 1852 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 1853 /* 1854 * If this vnode is already allocated, and we're running 1855 * multi-label, attempt to perform a label association 1856 * from the extended attributes on the inode. 1857 */ 1858 error = mac_vnode_associate_extattr(mp, vp); 1859 if (error) { 1860 /* ufs_inactive will release ip->i_devvp ref. */ 1861 vgone(vp); 1862 vput(vp); 1863 *vpp = NULL; 1864 return (error); 1865 } 1866 } 1867 #endif 1868 1869 *vpp = vp; 1870 return (0); 1871 } 1872 1873 /* 1874 * File handle to vnode 1875 * 1876 * Have to be really careful about stale file handles: 1877 * - check that the inode number is valid 1878 * - for UFS2 check that the inode number is initialized 1879 * - call ffs_vget() to get the locked inode 1880 * - check for an unallocated inode (i_mode == 0) 1881 * - check that the given client host has export rights and return 1882 * those rights via. exflagsp and credanonp 1883 */ 1884 static int 1885 ffs_fhtovp(mp, fhp, flags, vpp) 1886 struct mount *mp; 1887 struct fid *fhp; 1888 int flags; 1889 struct vnode **vpp; 1890 { 1891 struct ufid *ufhp; 1892 struct ufsmount *ump; 1893 struct fs *fs; 1894 struct cg *cgp; 1895 struct buf *bp; 1896 ino_t ino; 1897 u_int cg; 1898 int error; 1899 1900 ufhp = (struct ufid *)fhp; 1901 ino = ufhp->ufid_ino; 1902 ump = VFSTOUFS(mp); 1903 fs = ump->um_fs; 1904 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) 1905 return (ESTALE); 1906 /* 1907 * Need to check if inode is initialized because UFS2 does lazy 1908 * initialization and nfs_fhtovp can offer arbitrary inode numbers. 1909 */ 1910 if (fs->fs_magic != FS_UFS2_MAGIC) 1911 return (ufs_fhtovp(mp, ufhp, flags, vpp)); 1912 cg = ino_to_cg(fs, ino); 1913 if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0) 1914 return (error); 1915 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { 1916 brelse(bp); 1917 return (ESTALE); 1918 } 1919 brelse(bp); 1920 return (ufs_fhtovp(mp, ufhp, flags, vpp)); 1921 } 1922 1923 /* 1924 * Initialize the filesystem. 1925 */ 1926 static int 1927 ffs_init(vfsp) 1928 struct vfsconf *vfsp; 1929 { 1930 1931 ffs_susp_initialize(); 1932 softdep_initialize(); 1933 return (ufs_init(vfsp)); 1934 } 1935 1936 /* 1937 * Undo the work of ffs_init(). 1938 */ 1939 static int 1940 ffs_uninit(vfsp) 1941 struct vfsconf *vfsp; 1942 { 1943 int ret; 1944 1945 ret = ufs_uninit(vfsp); 1946 softdep_uninitialize(); 1947 ffs_susp_uninitialize(); 1948 return (ret); 1949 } 1950 1951 /* 1952 * Structure used to pass information from ffs_sbupdate to its 1953 * helper routine ffs_use_bwrite. 1954 */ 1955 struct devfd { 1956 struct ufsmount *ump; 1957 struct buf *sbbp; 1958 int waitfor; 1959 int suspended; 1960 int error; 1961 }; 1962 1963 /* 1964 * Write a superblock and associated information back to disk. 1965 */ 1966 int 1967 ffs_sbupdate(ump, waitfor, suspended) 1968 struct ufsmount *ump; 1969 int waitfor; 1970 int suspended; 1971 { 1972 struct fs *fs; 1973 struct buf *sbbp; 1974 struct devfd devfd; 1975 1976 fs = ump->um_fs; 1977 if (fs->fs_ronly == 1 && 1978 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 1979 (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0) 1980 panic("ffs_sbupdate: write read-only filesystem"); 1981 /* 1982 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 1983 */ 1984 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 1985 (int)fs->fs_sbsize, 0, 0, 0); 1986 /* 1987 * Initialize info needed for write function. 1988 */ 1989 devfd.ump = ump; 1990 devfd.sbbp = sbbp; 1991 devfd.waitfor = waitfor; 1992 devfd.suspended = suspended; 1993 devfd.error = 0; 1994 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); 1995 } 1996 1997 /* 1998 * Write function for use by filesystem-layer routines. 1999 */ 2000 static int 2001 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) 2002 { 2003 struct devfd *devfdp; 2004 struct ufsmount *ump; 2005 struct buf *bp; 2006 struct fs *fs; 2007 int error; 2008 2009 devfdp = devfd; 2010 ump = devfdp->ump; 2011 fs = ump->um_fs; 2012 /* 2013 * Writing the superblock summary information. 2014 */ 2015 if (loc != fs->fs_sblockloc) { 2016 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); 2017 bcopy(buf, bp->b_data, (u_int)size); 2018 if (devfdp->suspended) 2019 bp->b_flags |= B_VALIDSUSPWRT; 2020 if (devfdp->waitfor != MNT_WAIT) 2021 bawrite(bp); 2022 else if ((error = bwrite(bp)) != 0) 2023 devfdp->error = error; 2024 return (0); 2025 } 2026 /* 2027 * Writing the superblock itself. We need to do special checks for it. 2028 */ 2029 bp = devfdp->sbbp; 2030 if (devfdp->error != 0) { 2031 brelse(bp); 2032 return (devfdp->error); 2033 } 2034 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 2035 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2036 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2037 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 2038 fs->fs_sblockloc = SBLOCK_UFS1; 2039 } 2040 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 2041 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2042 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2043 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 2044 fs->fs_sblockloc = SBLOCK_UFS2; 2045 } 2046 if (MOUNTEDSOFTDEP(ump->um_mountp)) 2047 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 2048 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 2049 fs = (struct fs *)bp->b_data; 2050 ffs_oldfscompat_write(fs, ump); 2051 /* 2052 * Because we may have made changes to the superblock, we need to 2053 * recompute its check-hash. 2054 */ 2055 fs->fs_ckhash = ffs_calc_sbhash(fs); 2056 if (devfdp->suspended) 2057 bp->b_flags |= B_VALIDSUSPWRT; 2058 if (devfdp->waitfor != MNT_WAIT) 2059 bawrite(bp); 2060 else if ((error = bwrite(bp)) != 0) 2061 devfdp->error = error; 2062 return (devfdp->error); 2063 } 2064 2065 static int 2066 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 2067 int attrnamespace, const char *attrname) 2068 { 2069 2070 #ifdef UFS_EXTATTR 2071 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 2072 attrname)); 2073 #else 2074 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 2075 attrname)); 2076 #endif 2077 } 2078 2079 static void 2080 ffs_ifree(struct ufsmount *ump, struct inode *ip) 2081 { 2082 2083 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 2084 uma_zfree(uma_ufs1, ip->i_din1); 2085 else if (ip->i_din2 != NULL) 2086 uma_zfree(uma_ufs2, ip->i_din2); 2087 uma_zfree(uma_inode, ip); 2088 } 2089 2090 static int dobkgrdwrite = 1; 2091 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2092 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2093 2094 /* 2095 * Complete a background write started from bwrite. 2096 */ 2097 static void 2098 ffs_backgroundwritedone(struct buf *bp) 2099 { 2100 struct bufobj *bufobj; 2101 struct buf *origbp; 2102 2103 /* 2104 * Find the original buffer that we are writing. 2105 */ 2106 bufobj = bp->b_bufobj; 2107 BO_LOCK(bufobj); 2108 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2109 panic("backgroundwritedone: lost buffer"); 2110 2111 /* 2112 * We should mark the cylinder group buffer origbp as 2113 * dirty, to not loose the failed write. 2114 */ 2115 if ((bp->b_ioflags & BIO_ERROR) != 0) 2116 origbp->b_vflags |= BV_BKGRDERR; 2117 BO_UNLOCK(bufobj); 2118 /* 2119 * Process dependencies then return any unfinished ones. 2120 */ 2121 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2122 buf_complete(bp); 2123 #ifdef SOFTUPDATES 2124 if (!LIST_EMPTY(&bp->b_dep)) 2125 softdep_move_dependencies(bp, origbp); 2126 #endif 2127 /* 2128 * This buffer is marked B_NOCACHE so when it is released 2129 * by biodone it will be tossed. 2130 */ 2131 bp->b_flags |= B_NOCACHE; 2132 bp->b_flags &= ~B_CACHE; 2133 pbrelvp(bp); 2134 2135 /* 2136 * Prevent brelse() from trying to keep and re-dirtying bp on 2137 * errors. It causes b_bufobj dereference in 2138 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2139 * pbrelvp() above. 2140 */ 2141 if ((bp->b_ioflags & BIO_ERROR) != 0) 2142 bp->b_flags |= B_INVAL; 2143 bufdone(bp); 2144 BO_LOCK(bufobj); 2145 /* 2146 * Clear the BV_BKGRDINPROG flag in the original buffer 2147 * and awaken it if it is waiting for the write to complete. 2148 * If BV_BKGRDINPROG is not set in the original buffer it must 2149 * have been released and re-instantiated - which is not legal. 2150 */ 2151 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2152 ("backgroundwritedone: lost buffer2")); 2153 origbp->b_vflags &= ~BV_BKGRDINPROG; 2154 if (origbp->b_vflags & BV_BKGRDWAIT) { 2155 origbp->b_vflags &= ~BV_BKGRDWAIT; 2156 wakeup(&origbp->b_xflags); 2157 } 2158 BO_UNLOCK(bufobj); 2159 } 2160 2161 2162 /* 2163 * Write, release buffer on completion. (Done by iodone 2164 * if async). Do not bother writing anything if the buffer 2165 * is invalid. 2166 * 2167 * Note that we set B_CACHE here, indicating that buffer is 2168 * fully valid and thus cacheable. This is true even of NFS 2169 * now so we set it generally. This could be set either here 2170 * or in biodone() since the I/O is synchronous. We put it 2171 * here. 2172 */ 2173 static int 2174 ffs_bufwrite(struct buf *bp) 2175 { 2176 struct buf *newbp; 2177 struct cg *cgp; 2178 2179 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2180 if (bp->b_flags & B_INVAL) { 2181 brelse(bp); 2182 return (0); 2183 } 2184 2185 if (!BUF_ISLOCKED(bp)) 2186 panic("bufwrite: buffer is not busy???"); 2187 /* 2188 * If a background write is already in progress, delay 2189 * writing this block if it is asynchronous. Otherwise 2190 * wait for the background write to complete. 2191 */ 2192 BO_LOCK(bp->b_bufobj); 2193 if (bp->b_vflags & BV_BKGRDINPROG) { 2194 if (bp->b_flags & B_ASYNC) { 2195 BO_UNLOCK(bp->b_bufobj); 2196 bdwrite(bp); 2197 return (0); 2198 } 2199 bp->b_vflags |= BV_BKGRDWAIT; 2200 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2201 "bwrbg", 0); 2202 if (bp->b_vflags & BV_BKGRDINPROG) 2203 panic("bufwrite: still writing"); 2204 } 2205 bp->b_vflags &= ~BV_BKGRDERR; 2206 BO_UNLOCK(bp->b_bufobj); 2207 2208 /* 2209 * If this buffer is marked for background writing and we 2210 * do not have to wait for it, make a copy and write the 2211 * copy so as to leave this buffer ready for further use. 2212 * 2213 * This optimization eats a lot of memory. If we have a page 2214 * or buffer shortfall we can't do it. 2215 */ 2216 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2217 (bp->b_flags & B_ASYNC) && 2218 !vm_page_count_severe() && 2219 !buf_dirty_count_severe()) { 2220 KASSERT(bp->b_iodone == NULL, 2221 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2222 2223 /* get a new block */ 2224 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2225 if (newbp == NULL) 2226 goto normal_write; 2227 2228 KASSERT(buf_mapped(bp), ("Unmapped cg")); 2229 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2230 BO_LOCK(bp->b_bufobj); 2231 bp->b_vflags |= BV_BKGRDINPROG; 2232 BO_UNLOCK(bp->b_bufobj); 2233 newbp->b_xflags |= 2234 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; 2235 newbp->b_lblkno = bp->b_lblkno; 2236 newbp->b_blkno = bp->b_blkno; 2237 newbp->b_offset = bp->b_offset; 2238 newbp->b_iodone = ffs_backgroundwritedone; 2239 newbp->b_flags |= B_ASYNC; 2240 newbp->b_flags &= ~B_INVAL; 2241 pbgetvp(bp->b_vp, newbp); 2242 2243 #ifdef SOFTUPDATES 2244 /* 2245 * Move over the dependencies. If there are rollbacks, 2246 * leave the parent buffer dirtied as it will need to 2247 * be written again. 2248 */ 2249 if (LIST_EMPTY(&bp->b_dep) || 2250 softdep_move_dependencies(bp, newbp) == 0) 2251 bundirty(bp); 2252 #else 2253 bundirty(bp); 2254 #endif 2255 2256 /* 2257 * Initiate write on the copy, release the original. The 2258 * BKGRDINPROG flag prevents it from going away until 2259 * the background write completes. We have to recalculate 2260 * its check hash in case the buffer gets freed and then 2261 * reconstituted from the buffer cache during a later read. 2262 */ 2263 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2264 cgp = (struct cg *)bp->b_data; 2265 cgp->cg_ckhash = 0; 2266 cgp->cg_ckhash = 2267 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2268 } 2269 bqrelse(bp); 2270 bp = newbp; 2271 } else 2272 /* Mark the buffer clean */ 2273 bundirty(bp); 2274 2275 2276 /* Let the normal bufwrite do the rest for us */ 2277 normal_write: 2278 /* 2279 * If we are writing a cylinder group, update its time. 2280 */ 2281 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2282 cgp = (struct cg *)bp->b_data; 2283 cgp->cg_old_time = cgp->cg_time = time_second; 2284 } 2285 return (bufwrite(bp)); 2286 } 2287 2288 2289 static void 2290 ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2291 { 2292 struct vnode *vp; 2293 struct buf *tbp; 2294 int error, nocopy; 2295 2296 vp = bo2vnode(bo); 2297 if (bp->b_iocmd == BIO_WRITE) { 2298 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2299 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2300 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2301 panic("ffs_geom_strategy: bad I/O"); 2302 nocopy = bp->b_flags & B_NOCOPY; 2303 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2304 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2305 vp->v_rdev->si_snapdata != NULL) { 2306 if ((bp->b_flags & B_CLUSTER) != 0) { 2307 runningbufwakeup(bp); 2308 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2309 b_cluster.cluster_entry) { 2310 error = ffs_copyonwrite(vp, tbp); 2311 if (error != 0 && 2312 error != EOPNOTSUPP) { 2313 bp->b_error = error; 2314 bp->b_ioflags |= BIO_ERROR; 2315 bufdone(bp); 2316 return; 2317 } 2318 } 2319 bp->b_runningbufspace = bp->b_bufsize; 2320 atomic_add_long(&runningbufspace, 2321 bp->b_runningbufspace); 2322 } else { 2323 error = ffs_copyonwrite(vp, bp); 2324 if (error != 0 && error != EOPNOTSUPP) { 2325 bp->b_error = error; 2326 bp->b_ioflags |= BIO_ERROR; 2327 bufdone(bp); 2328 return; 2329 } 2330 } 2331 } 2332 #ifdef SOFTUPDATES 2333 if ((bp->b_flags & B_CLUSTER) != 0) { 2334 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2335 b_cluster.cluster_entry) { 2336 if (!LIST_EMPTY(&tbp->b_dep)) 2337 buf_start(tbp); 2338 } 2339 } else { 2340 if (!LIST_EMPTY(&bp->b_dep)) 2341 buf_start(bp); 2342 } 2343 2344 #endif 2345 /* 2346 * Check for metadata that needs check-hashes and update them. 2347 */ 2348 switch (bp->b_xflags & BX_FSPRIV) { 2349 case BX_CYLGRP: 2350 ((struct cg *)bp->b_data)->cg_ckhash = 0; 2351 ((struct cg *)bp->b_data)->cg_ckhash = 2352 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2353 break; 2354 2355 case BX_SUPERBLOCK: 2356 case BX_INODE: 2357 case BX_INDIR: 2358 case BX_DIR: 2359 printf("Check-hash write is unimplemented!!!\n"); 2360 break; 2361 2362 case 0: 2363 break; 2364 2365 default: 2366 printf("multiple buffer types 0x%b\n", 2367 (u_int)(bp->b_xflags & BX_FSPRIV), 2368 PRINT_UFS_BUF_XFLAGS); 2369 break; 2370 } 2371 } 2372 g_vfs_strategy(bo, bp); 2373 } 2374 2375 int 2376 ffs_own_mount(const struct mount *mp) 2377 { 2378 2379 if (mp->mnt_op == &ufs_vfsops) 2380 return (1); 2381 return (0); 2382 } 2383 2384 #ifdef DDB 2385 #ifdef SOFTUPDATES 2386 2387 /* defined in ffs_softdep.c */ 2388 extern void db_print_ffs(struct ufsmount *ump); 2389 2390 DB_SHOW_COMMAND(ffs, db_show_ffs) 2391 { 2392 struct mount *mp; 2393 struct ufsmount *ump; 2394 2395 if (have_addr) { 2396 ump = VFSTOUFS((struct mount *)addr); 2397 db_print_ffs(ump); 2398 return; 2399 } 2400 2401 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2402 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2403 db_print_ffs(VFSTOUFS(mp)); 2404 } 2405 } 2406 2407 #endif /* SOFTUPDATES */ 2408 #endif /* DDB */ 2409