1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1991, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_quota.h" 38 #include "opt_ufs.h" 39 #include "opt_ffs.h" 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/namei.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/taskqueue.h> 48 #include <sys/kernel.h> 49 #include <sys/vnode.h> 50 #include <sys/mount.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/conf.h> 54 #include <sys/fcntl.h> 55 #include <sys/ioccom.h> 56 #include <sys/malloc.h> 57 #include <sys/mutex.h> 58 #include <sys/rwlock.h> 59 #include <sys/vmmeter.h> 60 61 #include <security/mac/mac_framework.h> 62 63 #include <ufs/ufs/dir.h> 64 #include <ufs/ufs/extattr.h> 65 #include <ufs/ufs/gjournal.h> 66 #include <ufs/ufs/quota.h> 67 #include <ufs/ufs/ufsmount.h> 68 #include <ufs/ufs/inode.h> 69 #include <ufs/ufs/ufs_extern.h> 70 71 #include <ufs/ffs/fs.h> 72 #include <ufs/ffs/ffs_extern.h> 73 74 #include <vm/vm.h> 75 #include <vm/uma.h> 76 #include <vm/vm_page.h> 77 78 #include <geom/geom.h> 79 #include <geom/geom_vfs.h> 80 81 #include <ddb/ddb.h> 82 83 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 84 85 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 86 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 87 ufs2_daddr_t); 88 static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 89 static int ffs_sync_lazy(struct mount *mp); 90 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); 91 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); 92 93 static vfs_init_t ffs_init; 94 static vfs_uninit_t ffs_uninit; 95 static vfs_extattrctl_t ffs_extattrctl; 96 static vfs_cmount_t ffs_cmount; 97 static vfs_unmount_t ffs_unmount; 98 static vfs_mount_t ffs_mount; 99 static vfs_statfs_t ffs_statfs; 100 static vfs_fhtovp_t ffs_fhtovp; 101 static vfs_sync_t ffs_sync; 102 103 static struct vfsops ufs_vfsops = { 104 .vfs_extattrctl = ffs_extattrctl, 105 .vfs_fhtovp = ffs_fhtovp, 106 .vfs_init = ffs_init, 107 .vfs_mount = ffs_mount, 108 .vfs_cmount = ffs_cmount, 109 .vfs_quotactl = ufs_quotactl, 110 .vfs_root = ufs_root, 111 .vfs_statfs = ffs_statfs, 112 .vfs_sync = ffs_sync, 113 .vfs_uninit = ffs_uninit, 114 .vfs_unmount = ffs_unmount, 115 .vfs_vget = ffs_vget, 116 .vfs_susp_clean = process_deferred_inactive, 117 }; 118 119 VFS_SET(ufs_vfsops, ufs, 0); 120 MODULE_VERSION(ufs, 1); 121 122 static b_strategy_t ffs_geom_strategy; 123 static b_write_t ffs_bufwrite; 124 125 static struct buf_ops ffs_ops = { 126 .bop_name = "FFS", 127 .bop_write = ffs_bufwrite, 128 .bop_strategy = ffs_geom_strategy, 129 .bop_sync = bufsync, 130 #ifdef NO_FFS_SNAPSHOT 131 .bop_bdflush = bufbdflush, 132 #else 133 .bop_bdflush = ffs_bdflush, 134 #endif 135 }; 136 137 /* 138 * Note that userquota and groupquota options are not currently used 139 * by UFS/FFS code and generally mount(8) does not pass those options 140 * from userland, but they can be passed by loader(8) via 141 * vfs.root.mountfrom.options. 142 */ 143 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 144 "noclusterw", "noexec", "export", "force", "from", "groupquota", 145 "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir", 146 "nosymfollow", "sync", "union", "userquota", NULL }; 147 148 static int 149 ffs_mount(struct mount *mp) 150 { 151 struct vnode *devvp; 152 struct thread *td; 153 struct ufsmount *ump = NULL; 154 struct fs *fs; 155 pid_t fsckpid = 0; 156 int error, error1, flags; 157 uint64_t mntorflags; 158 accmode_t accmode; 159 struct nameidata ndp; 160 char *fspec; 161 162 td = curthread; 163 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 164 return (EINVAL); 165 if (uma_inode == NULL) { 166 uma_inode = uma_zcreate("FFS inode", 167 sizeof(struct inode), NULL, NULL, NULL, NULL, 168 UMA_ALIGN_PTR, 0); 169 uma_ufs1 = uma_zcreate("FFS1 dinode", 170 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 171 UMA_ALIGN_PTR, 0); 172 uma_ufs2 = uma_zcreate("FFS2 dinode", 173 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 174 UMA_ALIGN_PTR, 0); 175 } 176 177 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 178 vfs_deleteopt(mp->mnt_optnew, "userquota"); 179 180 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 181 if (error) 182 return (error); 183 184 mntorflags = 0; 185 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 186 mntorflags |= MNT_ACLS; 187 188 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 189 mntorflags |= MNT_SNAPSHOT; 190 /* 191 * Once we have set the MNT_SNAPSHOT flag, do not 192 * persist "snapshot" in the options list. 193 */ 194 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 195 vfs_deleteopt(mp->mnt_opt, "snapshot"); 196 } 197 198 if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 && 199 vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) { 200 /* 201 * Once we have set the restricted PID, do not 202 * persist "fsckpid" in the options list. 203 */ 204 vfs_deleteopt(mp->mnt_optnew, "fsckpid"); 205 vfs_deleteopt(mp->mnt_opt, "fsckpid"); 206 if (mp->mnt_flag & MNT_UPDATE) { 207 if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 && 208 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 209 vfs_mount_error(mp, 210 "Checker enable: Must be read-only"); 211 return (EINVAL); 212 } 213 } else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 214 vfs_mount_error(mp, 215 "Checker enable: Must be read-only"); 216 return (EINVAL); 217 } 218 /* Set to -1 if we are done */ 219 if (fsckpid == 0) 220 fsckpid = -1; 221 } 222 223 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 224 if (mntorflags & MNT_ACLS) { 225 vfs_mount_error(mp, 226 "\"acls\" and \"nfsv4acls\" options " 227 "are mutually exclusive"); 228 return (EINVAL); 229 } 230 mntorflags |= MNT_NFS4ACLS; 231 } 232 233 MNT_ILOCK(mp); 234 mp->mnt_flag |= mntorflags; 235 MNT_IUNLOCK(mp); 236 /* 237 * If updating, check whether changing from read-only to 238 * read/write; if there is no device name, that's all we do. 239 */ 240 if (mp->mnt_flag & MNT_UPDATE) { 241 ump = VFSTOUFS(mp); 242 fs = ump->um_fs; 243 devvp = ump->um_devvp; 244 if (fsckpid == -1 && ump->um_fsckpid > 0) { 245 if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 || 246 (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) 247 return (error); 248 g_topology_lock(); 249 /* 250 * Return to normal read-only mode. 251 */ 252 error = g_access(ump->um_cp, 0, -1, 0); 253 g_topology_unlock(); 254 ump->um_fsckpid = 0; 255 } 256 if (fs->fs_ronly == 0 && 257 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 258 /* 259 * Flush any dirty data and suspend filesystem. 260 */ 261 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 262 return (error); 263 error = vfs_write_suspend_umnt(mp); 264 if (error != 0) 265 return (error); 266 /* 267 * Check for and optionally get rid of files open 268 * for writing. 269 */ 270 flags = WRITECLOSE; 271 if (mp->mnt_flag & MNT_FORCE) 272 flags |= FORCECLOSE; 273 if (MOUNTEDSOFTDEP(mp)) { 274 error = softdep_flushfiles(mp, flags, td); 275 } else { 276 error = ffs_flushfiles(mp, flags, td); 277 } 278 if (error) { 279 vfs_write_resume(mp, 0); 280 return (error); 281 } 282 if (fs->fs_pendingblocks != 0 || 283 fs->fs_pendinginodes != 0) { 284 printf("WARNING: %s Update error: blocks %jd " 285 "files %d\n", fs->fs_fsmnt, 286 (intmax_t)fs->fs_pendingblocks, 287 fs->fs_pendinginodes); 288 fs->fs_pendingblocks = 0; 289 fs->fs_pendinginodes = 0; 290 } 291 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 292 fs->fs_clean = 1; 293 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 294 fs->fs_ronly = 0; 295 fs->fs_clean = 0; 296 vfs_write_resume(mp, 0); 297 return (error); 298 } 299 if (MOUNTEDSOFTDEP(mp)) 300 softdep_unmount(mp); 301 g_topology_lock(); 302 /* 303 * Drop our write and exclusive access. 304 */ 305 g_access(ump->um_cp, 0, -1, -1); 306 g_topology_unlock(); 307 fs->fs_ronly = 1; 308 MNT_ILOCK(mp); 309 mp->mnt_flag |= MNT_RDONLY; 310 MNT_IUNLOCK(mp); 311 /* 312 * Allow the writers to note that filesystem 313 * is ro now. 314 */ 315 vfs_write_resume(mp, 0); 316 } 317 if ((mp->mnt_flag & MNT_RELOAD) && 318 (error = ffs_reload(mp, td, 0)) != 0) 319 return (error); 320 if (fs->fs_ronly && 321 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 322 /* 323 * If we are running a checker, do not allow upgrade. 324 */ 325 if (ump->um_fsckpid > 0) { 326 vfs_mount_error(mp, 327 "Active checker, cannot upgrade to write"); 328 return (EINVAL); 329 } 330 /* 331 * If upgrade to read-write by non-root, then verify 332 * that user has necessary permissions on the device. 333 */ 334 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 335 error = VOP_ACCESS(devvp, VREAD | VWRITE, 336 td->td_ucred, td); 337 if (error) 338 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 339 if (error) { 340 VOP_UNLOCK(devvp, 0); 341 return (error); 342 } 343 VOP_UNLOCK(devvp, 0); 344 fs->fs_flags &= ~FS_UNCLEAN; 345 if (fs->fs_clean == 0) { 346 fs->fs_flags |= FS_UNCLEAN; 347 if ((mp->mnt_flag & MNT_FORCE) || 348 ((fs->fs_flags & 349 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 350 (fs->fs_flags & FS_DOSOFTDEP))) { 351 printf("WARNING: %s was not properly " 352 "dismounted\n", fs->fs_fsmnt); 353 } else { 354 vfs_mount_error(mp, 355 "R/W mount of %s denied. %s.%s", 356 fs->fs_fsmnt, 357 "Filesystem is not clean - run fsck", 358 (fs->fs_flags & FS_SUJ) == 0 ? "" : 359 " Forced mount will invalidate" 360 " journal contents"); 361 return (EPERM); 362 } 363 } 364 g_topology_lock(); 365 /* 366 * Request exclusive write access. 367 */ 368 error = g_access(ump->um_cp, 0, 1, 1); 369 g_topology_unlock(); 370 if (error) 371 return (error); 372 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 373 return (error); 374 fs->fs_ronly = 0; 375 MNT_ILOCK(mp); 376 mp->mnt_flag &= ~MNT_RDONLY; 377 MNT_IUNLOCK(mp); 378 fs->fs_mtime = time_second; 379 /* check to see if we need to start softdep */ 380 if ((fs->fs_flags & FS_DOSOFTDEP) && 381 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 382 vn_finished_write(mp); 383 return (error); 384 } 385 fs->fs_clean = 0; 386 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 387 vn_finished_write(mp); 388 return (error); 389 } 390 if (fs->fs_snapinum[0] != 0) 391 ffs_snapshot_mount(mp); 392 vn_finished_write(mp); 393 } 394 /* 395 * Soft updates is incompatible with "async", 396 * so if we are doing softupdates stop the user 397 * from setting the async flag in an update. 398 * Softdep_mount() clears it in an initial mount 399 * or ro->rw remount. 400 */ 401 if (MOUNTEDSOFTDEP(mp)) { 402 /* XXX: Reset too late ? */ 403 MNT_ILOCK(mp); 404 mp->mnt_flag &= ~MNT_ASYNC; 405 MNT_IUNLOCK(mp); 406 } 407 /* 408 * Keep MNT_ACLS flag if it is stored in superblock. 409 */ 410 if ((fs->fs_flags & FS_ACLS) != 0) { 411 /* XXX: Set too late ? */ 412 MNT_ILOCK(mp); 413 mp->mnt_flag |= MNT_ACLS; 414 MNT_IUNLOCK(mp); 415 } 416 417 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 418 /* XXX: Set too late ? */ 419 MNT_ILOCK(mp); 420 mp->mnt_flag |= MNT_NFS4ACLS; 421 MNT_IUNLOCK(mp); 422 } 423 /* 424 * If this is a request from fsck to clean up the filesystem, 425 * then allow the specified pid to proceed. 426 */ 427 if (fsckpid > 0) { 428 if (ump->um_fsckpid != 0) { 429 vfs_mount_error(mp, 430 "Active checker already running on %s", 431 fs->fs_fsmnt); 432 return (EINVAL); 433 } 434 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 435 ("soft updates enabled on read-only file system")); 436 g_topology_lock(); 437 /* 438 * Request write access. 439 */ 440 error = g_access(ump->um_cp, 0, 1, 0); 441 g_topology_unlock(); 442 if (error) { 443 vfs_mount_error(mp, 444 "Checker activation failed on %s", 445 fs->fs_fsmnt); 446 return (error); 447 } 448 ump->um_fsckpid = fsckpid; 449 if (fs->fs_snapinum[0] != 0) 450 ffs_snapshot_mount(mp); 451 fs->fs_mtime = time_second; 452 fs->fs_fmod = 1; 453 fs->fs_clean = 0; 454 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 455 } 456 457 /* 458 * If this is a snapshot request, take the snapshot. 459 */ 460 if (mp->mnt_flag & MNT_SNAPSHOT) 461 return (ffs_snapshot(mp, fspec)); 462 463 /* 464 * Must not call namei() while owning busy ref. 465 */ 466 vfs_unbusy(mp); 467 } 468 469 /* 470 * Not an update, or updating the name: look up the name 471 * and verify that it refers to a sensible disk device. 472 */ 473 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); 474 error = namei(&ndp); 475 if ((mp->mnt_flag & MNT_UPDATE) != 0) { 476 /* 477 * Unmount does not start if MNT_UPDATE is set. Mount 478 * update busies mp before setting MNT_UPDATE. We 479 * must be able to retain our busy ref succesfully, 480 * without sleep. 481 */ 482 error1 = vfs_busy(mp, MBF_NOWAIT); 483 MPASS(error1 == 0); 484 } 485 if (error != 0) 486 return (error); 487 NDFREE(&ndp, NDF_ONLY_PNBUF); 488 devvp = ndp.ni_vp; 489 if (!vn_isdisk(devvp, &error)) { 490 vput(devvp); 491 return (error); 492 } 493 494 /* 495 * If mount by non-root, then verify that user has necessary 496 * permissions on the device. 497 */ 498 accmode = VREAD; 499 if ((mp->mnt_flag & MNT_RDONLY) == 0) 500 accmode |= VWRITE; 501 error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); 502 if (error) 503 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 504 if (error) { 505 vput(devvp); 506 return (error); 507 } 508 509 if (mp->mnt_flag & MNT_UPDATE) { 510 /* 511 * Update only 512 * 513 * If it's not the same vnode, or at least the same device 514 * then it's not correct. 515 */ 516 517 if (devvp->v_rdev != ump->um_devvp->v_rdev) 518 error = EINVAL; /* needs translation */ 519 vput(devvp); 520 if (error) 521 return (error); 522 } else { 523 /* 524 * New mount 525 * 526 * We need the name for the mount point (also used for 527 * "last mounted on") copied in. If an error occurs, 528 * the mount point is discarded by the upper level code. 529 * Note that vfs_mount_alloc() populates f_mntonname for us. 530 */ 531 if ((error = ffs_mountfs(devvp, mp, td)) != 0) { 532 vrele(devvp); 533 return (error); 534 } 535 if (fsckpid > 0) { 536 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 537 ("soft updates enabled on read-only file system")); 538 ump = VFSTOUFS(mp); 539 fs = ump->um_fs; 540 g_topology_lock(); 541 /* 542 * Request write access. 543 */ 544 error = g_access(ump->um_cp, 0, 1, 0); 545 g_topology_unlock(); 546 if (error) { 547 printf("WARNING: %s: Checker activation " 548 "failed\n", fs->fs_fsmnt); 549 } else { 550 ump->um_fsckpid = fsckpid; 551 if (fs->fs_snapinum[0] != 0) 552 ffs_snapshot_mount(mp); 553 fs->fs_mtime = time_second; 554 fs->fs_clean = 0; 555 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 556 } 557 } 558 } 559 vfs_mountedfrom(mp, fspec); 560 return (0); 561 } 562 563 /* 564 * Compatibility with old mount system call. 565 */ 566 567 static int 568 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 569 { 570 struct ufs_args args; 571 struct export_args exp; 572 int error; 573 574 if (data == NULL) 575 return (EINVAL); 576 error = copyin(data, &args, sizeof args); 577 if (error) 578 return (error); 579 vfs_oexport_conv(&args.export, &exp); 580 581 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 582 ma = mount_arg(ma, "export", &exp, sizeof(exp)); 583 error = kernel_mount(ma, flags); 584 585 return (error); 586 } 587 588 /* 589 * Reload all incore data for a filesystem (used after running fsck on 590 * the root filesystem and finding things to fix). If the 'force' flag 591 * is 0, the filesystem must be mounted read-only. 592 * 593 * Things to do to update the mount: 594 * 1) invalidate all cached meta-data. 595 * 2) re-read superblock from disk. 596 * 3) re-read summary information from disk. 597 * 4) invalidate all inactive vnodes. 598 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary 599 * writers, if requested. 600 * 6) invalidate all cached file data. 601 * 7) re-read inode data for all active vnodes. 602 */ 603 int 604 ffs_reload(struct mount *mp, struct thread *td, int flags) 605 { 606 struct vnode *vp, *mvp, *devvp; 607 struct inode *ip; 608 void *space; 609 struct buf *bp; 610 struct fs *fs, *newfs; 611 struct ufsmount *ump; 612 ufs2_daddr_t sblockloc; 613 int i, blks, error; 614 u_long size; 615 int32_t *lp; 616 617 ump = VFSTOUFS(mp); 618 619 MNT_ILOCK(mp); 620 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 621 MNT_IUNLOCK(mp); 622 return (EINVAL); 623 } 624 MNT_IUNLOCK(mp); 625 626 /* 627 * Step 1: invalidate all cached meta-data. 628 */ 629 devvp = VFSTOUFS(mp)->um_devvp; 630 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 631 if (vinvalbuf(devvp, 0, 0, 0) != 0) 632 panic("ffs_reload: dirty1"); 633 VOP_UNLOCK(devvp, 0); 634 635 /* 636 * Step 2: re-read superblock from disk. 637 */ 638 fs = VFSTOUFS(mp)->um_fs; 639 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 640 NOCRED, &bp)) != 0) 641 return (error); 642 newfs = (struct fs *)bp->b_data; 643 if ((newfs->fs_magic != FS_UFS1_MAGIC && 644 newfs->fs_magic != FS_UFS2_MAGIC) || 645 newfs->fs_bsize > MAXBSIZE || 646 newfs->fs_bsize < sizeof(struct fs)) { 647 brelse(bp); 648 return (EIO); /* XXX needs translation */ 649 } 650 /* 651 * Copy pointer fields back into superblock before copying in XXX 652 * new superblock. These should really be in the ufsmount. XXX 653 * Note that important parameters (eg fs_ncg) are unchanged. 654 */ 655 newfs->fs_csp = fs->fs_csp; 656 newfs->fs_maxcluster = fs->fs_maxcluster; 657 newfs->fs_contigdirs = fs->fs_contigdirs; 658 newfs->fs_active = fs->fs_active; 659 newfs->fs_ronly = fs->fs_ronly; 660 sblockloc = fs->fs_sblockloc; 661 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 662 brelse(bp); 663 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 664 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 665 UFS_LOCK(ump); 666 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 667 printf("WARNING: %s: reload pending error: blocks %jd " 668 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 669 fs->fs_pendinginodes); 670 fs->fs_pendingblocks = 0; 671 fs->fs_pendinginodes = 0; 672 } 673 UFS_UNLOCK(ump); 674 675 /* 676 * Step 3: re-read summary information from disk. 677 */ 678 size = fs->fs_cssize; 679 blks = howmany(size, fs->fs_fsize); 680 if (fs->fs_contigsumsize > 0) 681 size += fs->fs_ncg * sizeof(int32_t); 682 size += fs->fs_ncg * sizeof(u_int8_t); 683 free(fs->fs_csp, M_UFSMNT); 684 space = malloc(size, M_UFSMNT, M_WAITOK); 685 fs->fs_csp = space; 686 for (i = 0; i < blks; i += fs->fs_frag) { 687 size = fs->fs_bsize; 688 if (i + fs->fs_frag > blks) 689 size = (blks - i) * fs->fs_fsize; 690 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 691 NOCRED, &bp); 692 if (error) 693 return (error); 694 bcopy(bp->b_data, space, (u_int)size); 695 space = (char *)space + size; 696 brelse(bp); 697 } 698 /* 699 * We no longer know anything about clusters per cylinder group. 700 */ 701 if (fs->fs_contigsumsize > 0) { 702 fs->fs_maxcluster = lp = space; 703 for (i = 0; i < fs->fs_ncg; i++) 704 *lp++ = fs->fs_contigsumsize; 705 space = lp; 706 } 707 size = fs->fs_ncg * sizeof(u_int8_t); 708 fs->fs_contigdirs = (u_int8_t *)space; 709 bzero(fs->fs_contigdirs, size); 710 if ((flags & FFSR_UNSUSPEND) != 0) { 711 MNT_ILOCK(mp); 712 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 713 wakeup(&mp->mnt_flag); 714 MNT_IUNLOCK(mp); 715 } 716 717 loop: 718 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 719 /* 720 * Skip syncer vnode. 721 */ 722 if (vp->v_type == VNON) { 723 VI_UNLOCK(vp); 724 continue; 725 } 726 /* 727 * Step 4: invalidate all cached file data. 728 */ 729 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { 730 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 731 goto loop; 732 } 733 if (vinvalbuf(vp, 0, 0, 0)) 734 panic("ffs_reload: dirty2"); 735 /* 736 * Step 5: re-read inode data for all active vnodes. 737 */ 738 ip = VTOI(vp); 739 error = 740 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 741 (int)fs->fs_bsize, NOCRED, &bp); 742 if (error) { 743 VOP_UNLOCK(vp, 0); 744 vrele(vp); 745 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 746 return (error); 747 } 748 ffs_load_inode(bp, ip, fs, ip->i_number); 749 ip->i_effnlink = ip->i_nlink; 750 brelse(bp); 751 VOP_UNLOCK(vp, 0); 752 vrele(vp); 753 } 754 return (0); 755 } 756 757 /* 758 * Common code for mount and mountroot 759 */ 760 static int 761 ffs_mountfs(devvp, mp, td) 762 struct vnode *devvp; 763 struct mount *mp; 764 struct thread *td; 765 { 766 struct ufsmount *ump; 767 struct fs *fs; 768 struct cdev *dev; 769 int error, i, len, ronly; 770 struct ucred *cred; 771 struct g_consumer *cp; 772 struct mount *nmp; 773 774 fs = NULL; 775 ump = NULL; 776 cred = td ? td->td_ucred : NOCRED; 777 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 778 779 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 780 dev = devvp->v_rdev; 781 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 782 (uintptr_t)mp) == 0) { 783 VOP_UNLOCK(devvp, 0); 784 return (EBUSY); 785 } 786 g_topology_lock(); 787 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 788 g_topology_unlock(); 789 if (error != 0) { 790 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 791 VOP_UNLOCK(devvp, 0); 792 return (error); 793 } 794 dev_ref(dev); 795 devvp->v_bufobj.bo_ops = &ffs_ops; 796 VOP_UNLOCK(devvp, 0); 797 if (dev->si_iosize_max != 0) 798 mp->mnt_iosize_max = dev->si_iosize_max; 799 if (mp->mnt_iosize_max > MAXPHYS) 800 mp->mnt_iosize_max = MAXPHYS; 801 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 802 error = EINVAL; 803 vfs_mount_error(mp, 804 "Invalid sectorsize %d for superblock size %d", 805 cp->provider->sectorsize, SBLOCKSIZE); 806 goto out; 807 } 808 /* fetch the superblock and summary information */ 809 if ((error = ffs_sbget(devvp, &fs, -1, M_UFSMNT, ffs_use_bread)) != 0) 810 goto out; 811 fs->fs_fmod = 0; 812 /* if we ran on a kernel without metadata check hashes, disable them */ 813 if ((fs->fs_flags & FS_METACKHASH) == 0) 814 fs->fs_metackhash = 0; 815 /* none of these types of check-hashes are maintained by this kernel */ 816 fs->fs_metackhash &= ~(CK_SUPERBLOCK | CK_INODE | CK_INDIR | CK_DIR); 817 /* no support for any undefined flags */ 818 fs->fs_flags &= FS_SUPPORTED; 819 fs->fs_flags &= ~FS_UNCLEAN; 820 if (fs->fs_clean == 0) { 821 fs->fs_flags |= FS_UNCLEAN; 822 if (ronly || (mp->mnt_flag & MNT_FORCE) || 823 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 824 (fs->fs_flags & FS_DOSOFTDEP))) { 825 printf("WARNING: %s was not properly dismounted\n", 826 fs->fs_fsmnt); 827 } else { 828 vfs_mount_error(mp, "R/W mount of %s denied. %s%s", 829 fs->fs_fsmnt, "Filesystem is not clean - run fsck.", 830 (fs->fs_flags & FS_SUJ) == 0 ? "" : 831 " Forced mount will invalidate journal contents"); 832 error = EPERM; 833 goto out; 834 } 835 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 836 (mp->mnt_flag & MNT_FORCE)) { 837 printf("WARNING: %s: lost blocks %jd files %d\n", 838 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 839 fs->fs_pendinginodes); 840 fs->fs_pendingblocks = 0; 841 fs->fs_pendinginodes = 0; 842 } 843 } 844 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 845 printf("WARNING: %s: mount pending error: blocks %jd " 846 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 847 fs->fs_pendinginodes); 848 fs->fs_pendingblocks = 0; 849 fs->fs_pendinginodes = 0; 850 } 851 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 852 #ifdef UFS_GJOURNAL 853 /* 854 * Get journal provider name. 855 */ 856 len = 1024; 857 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); 858 if (g_io_getattr("GJOURNAL::provider", cp, &len, 859 mp->mnt_gjprovider) == 0) { 860 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 861 M_UFSMNT, M_WAITOK); 862 MNT_ILOCK(mp); 863 mp->mnt_flag |= MNT_GJOURNAL; 864 MNT_IUNLOCK(mp); 865 } else { 866 printf("WARNING: %s: GJOURNAL flag on fs " 867 "but no gjournal provider below\n", 868 mp->mnt_stat.f_mntonname); 869 free(mp->mnt_gjprovider, M_UFSMNT); 870 mp->mnt_gjprovider = NULL; 871 } 872 #else 873 printf("WARNING: %s: GJOURNAL flag on fs but no " 874 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 875 #endif 876 } else { 877 mp->mnt_gjprovider = NULL; 878 } 879 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 880 ump->um_cp = cp; 881 ump->um_bo = &devvp->v_bufobj; 882 ump->um_fs = fs; 883 if (fs->fs_magic == FS_UFS1_MAGIC) { 884 ump->um_fstype = UFS1; 885 ump->um_balloc = ffs_balloc_ufs1; 886 } else { 887 ump->um_fstype = UFS2; 888 ump->um_balloc = ffs_balloc_ufs2; 889 } 890 ump->um_blkatoff = ffs_blkatoff; 891 ump->um_truncate = ffs_truncate; 892 ump->um_update = ffs_update; 893 ump->um_valloc = ffs_valloc; 894 ump->um_vfree = ffs_vfree; 895 ump->um_ifree = ffs_ifree; 896 ump->um_rdonly = ffs_rdonly; 897 ump->um_snapgone = ffs_snapgone; 898 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 899 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc); 900 fs->fs_ronly = ronly; 901 fs->fs_active = NULL; 902 mp->mnt_data = ump; 903 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 904 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 905 nmp = NULL; 906 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 907 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 908 if (nmp) 909 vfs_rel(nmp); 910 vfs_getnewfsid(mp); 911 } 912 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 913 MNT_ILOCK(mp); 914 mp->mnt_flag |= MNT_LOCAL; 915 MNT_IUNLOCK(mp); 916 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 917 #ifdef MAC 918 MNT_ILOCK(mp); 919 mp->mnt_flag |= MNT_MULTILABEL; 920 MNT_IUNLOCK(mp); 921 #else 922 printf("WARNING: %s: multilabel flag on fs but " 923 "no MAC support\n", mp->mnt_stat.f_mntonname); 924 #endif 925 } 926 if ((fs->fs_flags & FS_ACLS) != 0) { 927 #ifdef UFS_ACL 928 MNT_ILOCK(mp); 929 930 if (mp->mnt_flag & MNT_NFS4ACLS) 931 printf("WARNING: %s: ACLs flag on fs conflicts with " 932 "\"nfsv4acls\" mount option; option ignored\n", 933 mp->mnt_stat.f_mntonname); 934 mp->mnt_flag &= ~MNT_NFS4ACLS; 935 mp->mnt_flag |= MNT_ACLS; 936 937 MNT_IUNLOCK(mp); 938 #else 939 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 940 mp->mnt_stat.f_mntonname); 941 #endif 942 } 943 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 944 #ifdef UFS_ACL 945 MNT_ILOCK(mp); 946 947 if (mp->mnt_flag & MNT_ACLS) 948 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 949 "with \"acls\" mount option; option ignored\n", 950 mp->mnt_stat.f_mntonname); 951 mp->mnt_flag &= ~MNT_ACLS; 952 mp->mnt_flag |= MNT_NFS4ACLS; 953 954 MNT_IUNLOCK(mp); 955 #else 956 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 957 "ACLs support\n", mp->mnt_stat.f_mntonname); 958 #endif 959 } 960 if ((fs->fs_flags & FS_TRIM) != 0) { 961 len = sizeof(int); 962 if (g_io_getattr("GEOM::candelete", cp, &len, 963 &ump->um_candelete) == 0) { 964 if (!ump->um_candelete) 965 printf("WARNING: %s: TRIM flag on fs but disk " 966 "does not support TRIM\n", 967 mp->mnt_stat.f_mntonname); 968 } else { 969 printf("WARNING: %s: TRIM flag on fs but disk does " 970 "not confirm that it supports TRIM\n", 971 mp->mnt_stat.f_mntonname); 972 ump->um_candelete = 0; 973 } 974 if (ump->um_candelete) { 975 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 976 taskqueue_thread_enqueue, &ump->um_trim_tq); 977 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 978 "%s trim", mp->mnt_stat.f_mntonname); 979 } 980 } 981 982 ump->um_mountp = mp; 983 ump->um_dev = dev; 984 ump->um_devvp = devvp; 985 ump->um_nindir = fs->fs_nindir; 986 ump->um_bptrtodb = fs->fs_fsbtodb; 987 ump->um_seqinc = fs->fs_frag; 988 for (i = 0; i < MAXQUOTAS; i++) 989 ump->um_quotas[i] = NULLVP; 990 #ifdef UFS_EXTATTR 991 ufs_extattr_uepm_init(&ump->um_extattr); 992 #endif 993 /* 994 * Set FS local "last mounted on" information (NULL pad) 995 */ 996 bzero(fs->fs_fsmnt, MAXMNTLEN); 997 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 998 mp->mnt_stat.f_iosize = fs->fs_bsize; 999 1000 if (mp->mnt_flag & MNT_ROOTFS) { 1001 /* 1002 * Root mount; update timestamp in mount structure. 1003 * this will be used by the common root mount code 1004 * to update the system clock. 1005 */ 1006 mp->mnt_time = fs->fs_time; 1007 } 1008 1009 if (ronly == 0) { 1010 fs->fs_mtime = time_second; 1011 if ((fs->fs_flags & FS_DOSOFTDEP) && 1012 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1013 ffs_flushfiles(mp, FORCECLOSE, td); 1014 goto out; 1015 } 1016 if (fs->fs_snapinum[0] != 0) 1017 ffs_snapshot_mount(mp); 1018 fs->fs_fmod = 1; 1019 fs->fs_clean = 0; 1020 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1021 } 1022 /* 1023 * Initialize filesystem state information in mount struct. 1024 */ 1025 MNT_ILOCK(mp); 1026 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1027 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1028 MNT_IUNLOCK(mp); 1029 #ifdef UFS_EXTATTR 1030 #ifdef UFS_EXTATTR_AUTOSTART 1031 /* 1032 * 1033 * Auto-starting does the following: 1034 * - check for /.attribute in the fs, and extattr_start if so 1035 * - for each file in .attribute, enable that file with 1036 * an attribute of the same name. 1037 * Not clear how to report errors -- probably eat them. 1038 * This would all happen while the filesystem was busy/not 1039 * available, so would effectively be "atomic". 1040 */ 1041 (void) ufs_extattr_autostart(mp, td); 1042 #endif /* !UFS_EXTATTR_AUTOSTART */ 1043 #endif /* !UFS_EXTATTR */ 1044 return (0); 1045 out: 1046 if (fs != NULL) { 1047 free(fs->fs_csp, M_UFSMNT); 1048 free(fs, M_UFSMNT); 1049 } 1050 if (cp != NULL) { 1051 g_topology_lock(); 1052 g_vfs_close(cp); 1053 g_topology_unlock(); 1054 } 1055 if (ump) { 1056 mtx_destroy(UFS_MTX(ump)); 1057 if (mp->mnt_gjprovider != NULL) { 1058 free(mp->mnt_gjprovider, M_UFSMNT); 1059 mp->mnt_gjprovider = NULL; 1060 } 1061 free(ump, M_UFSMNT); 1062 mp->mnt_data = NULL; 1063 } 1064 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1065 dev_rel(dev); 1066 return (error); 1067 } 1068 1069 /* 1070 * A read function for use by filesystem-layer routines. 1071 */ 1072 static int 1073 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) 1074 { 1075 struct buf *bp; 1076 int error; 1077 1078 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); 1079 *bufp = malloc(size, M_UFSMNT, M_WAITOK); 1080 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, 1081 &bp)) != 0) 1082 return (error); 1083 bcopy(bp->b_data, *bufp, size); 1084 bp->b_flags |= B_INVAL | B_NOCACHE; 1085 brelse(bp); 1086 return (0); 1087 } 1088 1089 #include <sys/sysctl.h> 1090 static int bigcgs = 0; 1091 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1092 1093 /* 1094 * Sanity checks for loading old filesystem superblocks. 1095 * See ffs_oldfscompat_write below for unwound actions. 1096 * 1097 * XXX - Parts get retired eventually. 1098 * Unfortunately new bits get added. 1099 */ 1100 static void 1101 ffs_oldfscompat_read(fs, ump, sblockloc) 1102 struct fs *fs; 1103 struct ufsmount *ump; 1104 ufs2_daddr_t sblockloc; 1105 { 1106 off_t maxfilesize; 1107 1108 /* 1109 * If not yet done, update fs_flags location and value of fs_sblockloc. 1110 */ 1111 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1112 fs->fs_flags = fs->fs_old_flags; 1113 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1114 fs->fs_sblockloc = sblockloc; 1115 } 1116 /* 1117 * If not yet done, update UFS1 superblock with new wider fields. 1118 */ 1119 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1120 fs->fs_maxbsize = fs->fs_bsize; 1121 fs->fs_time = fs->fs_old_time; 1122 fs->fs_size = fs->fs_old_size; 1123 fs->fs_dsize = fs->fs_old_dsize; 1124 fs->fs_csaddr = fs->fs_old_csaddr; 1125 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1126 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1127 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1128 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1129 } 1130 if (fs->fs_magic == FS_UFS1_MAGIC && 1131 fs->fs_old_inodefmt < FS_44INODEFMT) { 1132 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1133 fs->fs_qbmask = ~fs->fs_bmask; 1134 fs->fs_qfmask = ~fs->fs_fmask; 1135 } 1136 if (fs->fs_magic == FS_UFS1_MAGIC) { 1137 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1138 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1139 if (fs->fs_maxfilesize > maxfilesize) 1140 fs->fs_maxfilesize = maxfilesize; 1141 } 1142 /* Compatibility for old filesystems */ 1143 if (fs->fs_avgfilesize <= 0) 1144 fs->fs_avgfilesize = AVFILESIZ; 1145 if (fs->fs_avgfpdir <= 0) 1146 fs->fs_avgfpdir = AFPDIR; 1147 if (bigcgs) { 1148 fs->fs_save_cgsize = fs->fs_cgsize; 1149 fs->fs_cgsize = fs->fs_bsize; 1150 } 1151 } 1152 1153 /* 1154 * Unwinding superblock updates for old filesystems. 1155 * See ffs_oldfscompat_read above for details. 1156 * 1157 * XXX - Parts get retired eventually. 1158 * Unfortunately new bits get added. 1159 */ 1160 void 1161 ffs_oldfscompat_write(fs, ump) 1162 struct fs *fs; 1163 struct ufsmount *ump; 1164 { 1165 1166 /* 1167 * Copy back UFS2 updated fields that UFS1 inspects. 1168 */ 1169 if (fs->fs_magic == FS_UFS1_MAGIC) { 1170 fs->fs_old_time = fs->fs_time; 1171 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1172 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1173 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1174 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1175 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1176 } 1177 if (bigcgs) { 1178 fs->fs_cgsize = fs->fs_save_cgsize; 1179 fs->fs_save_cgsize = 0; 1180 } 1181 } 1182 1183 /* 1184 * unmount system call 1185 */ 1186 static int 1187 ffs_unmount(mp, mntflags) 1188 struct mount *mp; 1189 int mntflags; 1190 { 1191 struct thread *td; 1192 struct ufsmount *ump = VFSTOUFS(mp); 1193 struct fs *fs; 1194 int error, flags, susp; 1195 #ifdef UFS_EXTATTR 1196 int e_restart; 1197 #endif 1198 1199 flags = 0; 1200 td = curthread; 1201 fs = ump->um_fs; 1202 susp = 0; 1203 if (mntflags & MNT_FORCE) { 1204 flags |= FORCECLOSE; 1205 susp = fs->fs_ronly == 0; 1206 } 1207 #ifdef UFS_EXTATTR 1208 if ((error = ufs_extattr_stop(mp, td))) { 1209 if (error != EOPNOTSUPP) 1210 printf("WARNING: unmount %s: ufs_extattr_stop " 1211 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1212 error); 1213 e_restart = 0; 1214 } else { 1215 ufs_extattr_uepm_destroy(&ump->um_extattr); 1216 e_restart = 1; 1217 } 1218 #endif 1219 if (susp) { 1220 error = vfs_write_suspend_umnt(mp); 1221 if (error != 0) 1222 goto fail1; 1223 } 1224 if (MOUNTEDSOFTDEP(mp)) 1225 error = softdep_flushfiles(mp, flags, td); 1226 else 1227 error = ffs_flushfiles(mp, flags, td); 1228 if (error != 0 && error != ENXIO) 1229 goto fail; 1230 1231 UFS_LOCK(ump); 1232 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1233 printf("WARNING: unmount %s: pending error: blocks %jd " 1234 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1235 fs->fs_pendinginodes); 1236 fs->fs_pendingblocks = 0; 1237 fs->fs_pendinginodes = 0; 1238 } 1239 UFS_UNLOCK(ump); 1240 if (MOUNTEDSOFTDEP(mp)) 1241 softdep_unmount(mp); 1242 if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) { 1243 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1244 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1245 if (error && error != ENXIO) { 1246 fs->fs_clean = 0; 1247 goto fail; 1248 } 1249 } 1250 if (susp) 1251 vfs_write_resume(mp, VR_START_WRITE); 1252 if (ump->um_trim_tq != NULL) { 1253 while (ump->um_trim_inflight != 0) 1254 pause("ufsutr", hz); 1255 taskqueue_drain_all(ump->um_trim_tq); 1256 taskqueue_free(ump->um_trim_tq); 1257 } 1258 g_topology_lock(); 1259 if (ump->um_fsckpid > 0) { 1260 /* 1261 * Return to normal read-only mode. 1262 */ 1263 error = g_access(ump->um_cp, 0, -1, 0); 1264 ump->um_fsckpid = 0; 1265 } 1266 g_vfs_close(ump->um_cp); 1267 g_topology_unlock(); 1268 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1269 vrele(ump->um_devvp); 1270 dev_rel(ump->um_dev); 1271 mtx_destroy(UFS_MTX(ump)); 1272 if (mp->mnt_gjprovider != NULL) { 1273 free(mp->mnt_gjprovider, M_UFSMNT); 1274 mp->mnt_gjprovider = NULL; 1275 } 1276 free(fs->fs_csp, M_UFSMNT); 1277 free(fs, M_UFSMNT); 1278 free(ump, M_UFSMNT); 1279 mp->mnt_data = NULL; 1280 MNT_ILOCK(mp); 1281 mp->mnt_flag &= ~MNT_LOCAL; 1282 MNT_IUNLOCK(mp); 1283 if (td->td_su == mp) { 1284 td->td_su = NULL; 1285 vfs_rel(mp); 1286 } 1287 return (error); 1288 1289 fail: 1290 if (susp) 1291 vfs_write_resume(mp, VR_START_WRITE); 1292 fail1: 1293 #ifdef UFS_EXTATTR 1294 if (e_restart) { 1295 ufs_extattr_uepm_init(&ump->um_extattr); 1296 #ifdef UFS_EXTATTR_AUTOSTART 1297 (void) ufs_extattr_autostart(mp, td); 1298 #endif 1299 } 1300 #endif 1301 1302 return (error); 1303 } 1304 1305 /* 1306 * Flush out all the files in a filesystem. 1307 */ 1308 int 1309 ffs_flushfiles(mp, flags, td) 1310 struct mount *mp; 1311 int flags; 1312 struct thread *td; 1313 { 1314 struct ufsmount *ump; 1315 int qerror, error; 1316 1317 ump = VFSTOUFS(mp); 1318 qerror = 0; 1319 #ifdef QUOTA 1320 if (mp->mnt_flag & MNT_QUOTA) { 1321 int i; 1322 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1323 if (error) 1324 return (error); 1325 for (i = 0; i < MAXQUOTAS; i++) { 1326 error = quotaoff(td, mp, i); 1327 if (error != 0) { 1328 if ((flags & EARLYFLUSH) == 0) 1329 return (error); 1330 else 1331 qerror = error; 1332 } 1333 } 1334 1335 /* 1336 * Here we fall through to vflush again to ensure that 1337 * we have gotten rid of all the system vnodes, unless 1338 * quotas must not be closed. 1339 */ 1340 } 1341 #endif 1342 ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles"); 1343 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1344 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1345 return (error); 1346 ffs_snapshot_unmount(mp); 1347 flags |= FORCECLOSE; 1348 /* 1349 * Here we fall through to vflush again to ensure 1350 * that we have gotten rid of all the system vnodes. 1351 */ 1352 } 1353 1354 /* 1355 * Do not close system files if quotas were not closed, to be 1356 * able to sync the remaining dquots. The freeblks softupdate 1357 * workitems might hold a reference on a dquot, preventing 1358 * quotaoff() from completing. Next round of 1359 * softdep_flushworklist() iteration should process the 1360 * blockers, allowing the next run of quotaoff() to finally 1361 * flush held dquots. 1362 * 1363 * Otherwise, flush all the files. 1364 */ 1365 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1366 return (error); 1367 1368 /* 1369 * Flush filesystem metadata. 1370 */ 1371 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1372 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1373 VOP_UNLOCK(ump->um_devvp, 0); 1374 return (error); 1375 } 1376 1377 /* 1378 * Get filesystem statistics. 1379 */ 1380 static int 1381 ffs_statfs(mp, sbp) 1382 struct mount *mp; 1383 struct statfs *sbp; 1384 { 1385 struct ufsmount *ump; 1386 struct fs *fs; 1387 1388 ump = VFSTOUFS(mp); 1389 fs = ump->um_fs; 1390 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1391 panic("ffs_statfs"); 1392 sbp->f_version = STATFS_VERSION; 1393 sbp->f_bsize = fs->fs_fsize; 1394 sbp->f_iosize = fs->fs_bsize; 1395 sbp->f_blocks = fs->fs_dsize; 1396 UFS_LOCK(ump); 1397 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1398 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1399 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1400 dbtofsb(fs, fs->fs_pendingblocks); 1401 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; 1402 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1403 UFS_UNLOCK(ump); 1404 sbp->f_namemax = UFS_MAXNAMLEN; 1405 return (0); 1406 } 1407 1408 static bool 1409 sync_doupdate(struct inode *ip) 1410 { 1411 1412 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1413 IN_UPDATE)) != 0); 1414 } 1415 1416 /* 1417 * For a lazy sync, we only care about access times, quotas and the 1418 * superblock. Other filesystem changes are already converted to 1419 * cylinder group blocks or inode blocks updates and are written to 1420 * disk by syncer. 1421 */ 1422 static int 1423 ffs_sync_lazy(mp) 1424 struct mount *mp; 1425 { 1426 struct vnode *mvp, *vp; 1427 struct inode *ip; 1428 struct thread *td; 1429 int allerror, error; 1430 1431 allerror = 0; 1432 td = curthread; 1433 if ((mp->mnt_flag & MNT_NOATIME) != 0) 1434 goto qupdate; 1435 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 1436 if (vp->v_type == VNON) { 1437 VI_UNLOCK(vp); 1438 continue; 1439 } 1440 ip = VTOI(vp); 1441 1442 /* 1443 * The IN_ACCESS flag is converted to IN_MODIFIED by 1444 * ufs_close() and ufs_getattr() by the calls to 1445 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1446 * Test also all the other timestamp flags too, to pick up 1447 * any other cases that could be missed. 1448 */ 1449 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1450 VI_UNLOCK(vp); 1451 continue; 1452 } 1453 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 1454 td)) != 0) 1455 continue; 1456 if (sync_doupdate(ip)) 1457 error = ffs_update(vp, 0); 1458 if (error != 0) 1459 allerror = error; 1460 vput(vp); 1461 } 1462 1463 qupdate: 1464 #ifdef QUOTA 1465 qsync(mp); 1466 #endif 1467 1468 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1469 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1470 allerror = error; 1471 return (allerror); 1472 } 1473 1474 /* 1475 * Go through the disk queues to initiate sandbagged IO; 1476 * go through the inodes to write those that have been modified; 1477 * initiate the writing of the super block if it has been modified. 1478 * 1479 * Note: we are always called with the filesystem marked busy using 1480 * vfs_busy(). 1481 */ 1482 static int 1483 ffs_sync(mp, waitfor) 1484 struct mount *mp; 1485 int waitfor; 1486 { 1487 struct vnode *mvp, *vp, *devvp; 1488 struct thread *td; 1489 struct inode *ip; 1490 struct ufsmount *ump = VFSTOUFS(mp); 1491 struct fs *fs; 1492 int error, count, lockreq, allerror = 0; 1493 int suspend; 1494 int suspended; 1495 int secondary_writes; 1496 int secondary_accwrites; 1497 int softdep_deps; 1498 int softdep_accdeps; 1499 struct bufobj *bo; 1500 1501 suspend = 0; 1502 suspended = 0; 1503 td = curthread; 1504 fs = ump->um_fs; 1505 if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0) 1506 panic("%s: ffs_sync: modification on read-only filesystem", 1507 fs->fs_fsmnt); 1508 if (waitfor == MNT_LAZY) { 1509 if (!rebooting) 1510 return (ffs_sync_lazy(mp)); 1511 waitfor = MNT_NOWAIT; 1512 } 1513 1514 /* 1515 * Write back each (modified) inode. 1516 */ 1517 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1518 if (waitfor == MNT_SUSPEND) { 1519 suspend = 1; 1520 waitfor = MNT_WAIT; 1521 } 1522 if (waitfor == MNT_WAIT) 1523 lockreq = LK_EXCLUSIVE; 1524 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1525 loop: 1526 /* Grab snapshot of secondary write counts */ 1527 MNT_ILOCK(mp); 1528 secondary_writes = mp->mnt_secondary_writes; 1529 secondary_accwrites = mp->mnt_secondary_accwrites; 1530 MNT_IUNLOCK(mp); 1531 1532 /* Grab snapshot of softdep dependency counts */ 1533 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1534 1535 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1536 /* 1537 * Depend on the vnode interlock to keep things stable enough 1538 * for a quick test. Since there might be hundreds of 1539 * thousands of vnodes, we cannot afford even a subroutine 1540 * call unless there's a good chance that we have work to do. 1541 */ 1542 if (vp->v_type == VNON) { 1543 VI_UNLOCK(vp); 1544 continue; 1545 } 1546 ip = VTOI(vp); 1547 if ((ip->i_flag & 1548 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1549 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1550 VI_UNLOCK(vp); 1551 continue; 1552 } 1553 if ((error = vget(vp, lockreq, td)) != 0) { 1554 if (error == ENOENT || error == ENOLCK) { 1555 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1556 goto loop; 1557 } 1558 continue; 1559 } 1560 if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0) 1561 allerror = error; 1562 vput(vp); 1563 } 1564 /* 1565 * Force stale filesystem control information to be flushed. 1566 */ 1567 if (waitfor == MNT_WAIT || rebooting) { 1568 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1569 allerror = error; 1570 /* Flushed work items may create new vnodes to clean */ 1571 if (allerror == 0 && count) 1572 goto loop; 1573 } 1574 #ifdef QUOTA 1575 qsync(mp); 1576 #endif 1577 1578 devvp = ump->um_devvp; 1579 bo = &devvp->v_bufobj; 1580 BO_LOCK(bo); 1581 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1582 BO_UNLOCK(bo); 1583 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1584 error = VOP_FSYNC(devvp, waitfor, td); 1585 VOP_UNLOCK(devvp, 0); 1586 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1587 error = ffs_sbupdate(ump, waitfor, 0); 1588 if (error != 0) 1589 allerror = error; 1590 if (allerror == 0 && waitfor == MNT_WAIT) 1591 goto loop; 1592 } else if (suspend != 0) { 1593 if (softdep_check_suspend(mp, 1594 devvp, 1595 softdep_deps, 1596 softdep_accdeps, 1597 secondary_writes, 1598 secondary_accwrites) != 0) { 1599 MNT_IUNLOCK(mp); 1600 goto loop; /* More work needed */ 1601 } 1602 mtx_assert(MNT_MTX(mp), MA_OWNED); 1603 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1604 MNT_IUNLOCK(mp); 1605 suspended = 1; 1606 } else 1607 BO_UNLOCK(bo); 1608 /* 1609 * Write back modified superblock. 1610 */ 1611 if (fs->fs_fmod != 0 && 1612 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1613 allerror = error; 1614 return (allerror); 1615 } 1616 1617 int 1618 ffs_vget(mp, ino, flags, vpp) 1619 struct mount *mp; 1620 ino_t ino; 1621 int flags; 1622 struct vnode **vpp; 1623 { 1624 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1625 } 1626 1627 int 1628 ffs_vgetf(mp, ino, flags, vpp, ffs_flags) 1629 struct mount *mp; 1630 ino_t ino; 1631 int flags; 1632 struct vnode **vpp; 1633 int ffs_flags; 1634 { 1635 struct fs *fs; 1636 struct inode *ip; 1637 struct ufsmount *ump; 1638 struct buf *bp; 1639 struct vnode *vp; 1640 int error; 1641 1642 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1643 if (error || *vpp != NULL) 1644 return (error); 1645 1646 /* 1647 * We must promote to an exclusive lock for vnode creation. This 1648 * can happen if lookup is passed LOCKSHARED. 1649 */ 1650 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1651 flags &= ~LK_TYPE_MASK; 1652 flags |= LK_EXCLUSIVE; 1653 } 1654 1655 /* 1656 * We do not lock vnode creation as it is believed to be too 1657 * expensive for such rare case as simultaneous creation of vnode 1658 * for same ino by different processes. We just allow them to race 1659 * and check later to decide who wins. Let the race begin! 1660 */ 1661 1662 ump = VFSTOUFS(mp); 1663 fs = ump->um_fs; 1664 ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO); 1665 1666 /* Allocate a new vnode/inode. */ 1667 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 1668 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1669 if (error) { 1670 *vpp = NULL; 1671 uma_zfree(uma_inode, ip); 1672 return (error); 1673 } 1674 /* 1675 * FFS supports recursive locking. 1676 */ 1677 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 1678 VN_LOCK_AREC(vp); 1679 vp->v_data = ip; 1680 vp->v_bufobj.bo_bsize = fs->fs_bsize; 1681 ip->i_vnode = vp; 1682 ip->i_ump = ump; 1683 ip->i_number = ino; 1684 ip->i_ea_refs = 0; 1685 ip->i_nextclustercg = -1; 1686 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; 1687 #ifdef QUOTA 1688 { 1689 int i; 1690 for (i = 0; i < MAXQUOTAS; i++) 1691 ip->i_dquot[i] = NODQUOT; 1692 } 1693 #endif 1694 1695 if (ffs_flags & FFSV_FORCEINSMQ) 1696 vp->v_vflag |= VV_FORCEINSMQ; 1697 error = insmntque(vp, mp); 1698 if (error != 0) { 1699 uma_zfree(uma_inode, ip); 1700 *vpp = NULL; 1701 return (error); 1702 } 1703 vp->v_vflag &= ~VV_FORCEINSMQ; 1704 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 1705 if (error || *vpp != NULL) 1706 return (error); 1707 1708 /* Read in the disk contents for the inode, copy into the inode. */ 1709 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), 1710 (int)fs->fs_bsize, NOCRED, &bp); 1711 if (error) { 1712 /* 1713 * The inode does not contain anything useful, so it would 1714 * be misleading to leave it on its hash chain. With mode 1715 * still zero, it will be unlinked and returned to the free 1716 * list by vput(). 1717 */ 1718 brelse(bp); 1719 vput(vp); 1720 *vpp = NULL; 1721 return (error); 1722 } 1723 if (I_IS_UFS1(ip)) 1724 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 1725 else 1726 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1727 ffs_load_inode(bp, ip, fs, ino); 1728 if (DOINGSOFTDEP(vp)) 1729 softdep_load_inodeblock(ip); 1730 else 1731 ip->i_effnlink = ip->i_nlink; 1732 bqrelse(bp); 1733 1734 /* 1735 * Initialize the vnode from the inode, check for aliases. 1736 * Note that the underlying vnode may have changed. 1737 */ 1738 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, 1739 &vp); 1740 if (error) { 1741 vput(vp); 1742 *vpp = NULL; 1743 return (error); 1744 } 1745 1746 /* 1747 * Finish inode initialization. 1748 */ 1749 if (vp->v_type != VFIFO) { 1750 /* FFS supports shared locking for all files except fifos. */ 1751 VN_LOCK_ASHARE(vp); 1752 } 1753 1754 /* 1755 * Set up a generation number for this inode if it does not 1756 * already have one. This should only happen on old filesystems. 1757 */ 1758 if (ip->i_gen == 0) { 1759 while (ip->i_gen == 0) 1760 ip->i_gen = arc4random(); 1761 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 1762 ip->i_flag |= IN_MODIFIED; 1763 DIP_SET(ip, i_gen, ip->i_gen); 1764 } 1765 } 1766 #ifdef MAC 1767 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 1768 /* 1769 * If this vnode is already allocated, and we're running 1770 * multi-label, attempt to perform a label association 1771 * from the extended attributes on the inode. 1772 */ 1773 error = mac_vnode_associate_extattr(mp, vp); 1774 if (error) { 1775 /* ufs_inactive will release ip->i_devvp ref. */ 1776 vput(vp); 1777 *vpp = NULL; 1778 return (error); 1779 } 1780 } 1781 #endif 1782 1783 *vpp = vp; 1784 return (0); 1785 } 1786 1787 /* 1788 * File handle to vnode 1789 * 1790 * Have to be really careful about stale file handles: 1791 * - check that the inode number is valid 1792 * - for UFS2 check that the inode number is initialized 1793 * - call ffs_vget() to get the locked inode 1794 * - check for an unallocated inode (i_mode == 0) 1795 * - check that the given client host has export rights and return 1796 * those rights via. exflagsp and credanonp 1797 */ 1798 static int 1799 ffs_fhtovp(mp, fhp, flags, vpp) 1800 struct mount *mp; 1801 struct fid *fhp; 1802 int flags; 1803 struct vnode **vpp; 1804 { 1805 struct ufid *ufhp; 1806 struct ufsmount *ump; 1807 struct fs *fs; 1808 struct cg *cgp; 1809 struct buf *bp; 1810 ino_t ino; 1811 u_int cg; 1812 int error; 1813 1814 ufhp = (struct ufid *)fhp; 1815 ino = ufhp->ufid_ino; 1816 ump = VFSTOUFS(mp); 1817 fs = ump->um_fs; 1818 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) 1819 return (ESTALE); 1820 /* 1821 * Need to check if inode is initialized because UFS2 does lazy 1822 * initialization and nfs_fhtovp can offer arbitrary inode numbers. 1823 */ 1824 if (fs->fs_magic != FS_UFS2_MAGIC) 1825 return (ufs_fhtovp(mp, ufhp, flags, vpp)); 1826 cg = ino_to_cg(fs, ino); 1827 if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) 1828 return (error); 1829 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { 1830 brelse(bp); 1831 return (ESTALE); 1832 } 1833 brelse(bp); 1834 return (ufs_fhtovp(mp, ufhp, flags, vpp)); 1835 } 1836 1837 /* 1838 * Initialize the filesystem. 1839 */ 1840 static int 1841 ffs_init(vfsp) 1842 struct vfsconf *vfsp; 1843 { 1844 1845 ffs_susp_initialize(); 1846 softdep_initialize(); 1847 return (ufs_init(vfsp)); 1848 } 1849 1850 /* 1851 * Undo the work of ffs_init(). 1852 */ 1853 static int 1854 ffs_uninit(vfsp) 1855 struct vfsconf *vfsp; 1856 { 1857 int ret; 1858 1859 ret = ufs_uninit(vfsp); 1860 softdep_uninitialize(); 1861 ffs_susp_uninitialize(); 1862 return (ret); 1863 } 1864 1865 /* 1866 * Structure used to pass information from ffs_sbupdate to its 1867 * helper routine ffs_use_bwrite. 1868 */ 1869 struct devfd { 1870 struct ufsmount *ump; 1871 struct buf *sbbp; 1872 int waitfor; 1873 int suspended; 1874 int error; 1875 }; 1876 1877 /* 1878 * Write a superblock and associated information back to disk. 1879 */ 1880 int 1881 ffs_sbupdate(ump, waitfor, suspended) 1882 struct ufsmount *ump; 1883 int waitfor; 1884 int suspended; 1885 { 1886 struct fs *fs; 1887 struct buf *sbbp; 1888 struct devfd devfd; 1889 1890 fs = ump->um_fs; 1891 if (fs->fs_ronly == 1 && 1892 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 1893 (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0) 1894 panic("ffs_sbupdate: write read-only filesystem"); 1895 /* 1896 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 1897 */ 1898 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 1899 (int)fs->fs_sbsize, 0, 0, 0); 1900 /* 1901 * Initialize info needed for write function. 1902 */ 1903 devfd.ump = ump; 1904 devfd.sbbp = sbbp; 1905 devfd.waitfor = waitfor; 1906 devfd.suspended = suspended; 1907 devfd.error = 0; 1908 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); 1909 } 1910 1911 /* 1912 * Write function for use by filesystem-layer routines. 1913 */ 1914 static int 1915 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) 1916 { 1917 struct devfd *devfdp; 1918 struct ufsmount *ump; 1919 struct buf *bp; 1920 struct fs *fs; 1921 int error; 1922 1923 devfdp = devfd; 1924 ump = devfdp->ump; 1925 fs = ump->um_fs; 1926 /* 1927 * Writing the superblock summary information. 1928 */ 1929 if (loc != fs->fs_sblockloc) { 1930 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); 1931 bcopy(buf, bp->b_data, (u_int)size); 1932 if (devfdp->suspended) 1933 bp->b_flags |= B_VALIDSUSPWRT; 1934 if (devfdp->waitfor != MNT_WAIT) 1935 bawrite(bp); 1936 else if ((error = bwrite(bp)) != 0) 1937 devfdp->error = error; 1938 return (0); 1939 } 1940 /* 1941 * Writing the superblock itself. We need to do special checks for it. 1942 */ 1943 bp = devfdp->sbbp; 1944 if (devfdp->error != 0) { 1945 brelse(bp); 1946 return (devfdp->error); 1947 } 1948 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 1949 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1950 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 1951 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 1952 fs->fs_sblockloc = SBLOCK_UFS1; 1953 } 1954 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 1955 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1956 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 1957 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 1958 fs->fs_sblockloc = SBLOCK_UFS2; 1959 } 1960 if (MOUNTEDSOFTDEP(ump->um_mountp)) 1961 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 1962 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 1963 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 1964 if (devfdp->suspended) 1965 bp->b_flags |= B_VALIDSUSPWRT; 1966 if (devfdp->waitfor != MNT_WAIT) 1967 bawrite(bp); 1968 else if ((error = bwrite(bp)) != 0) 1969 devfdp->error = error; 1970 return (devfdp->error); 1971 } 1972 1973 static int 1974 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 1975 int attrnamespace, const char *attrname) 1976 { 1977 1978 #ifdef UFS_EXTATTR 1979 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 1980 attrname)); 1981 #else 1982 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 1983 attrname)); 1984 #endif 1985 } 1986 1987 static void 1988 ffs_ifree(struct ufsmount *ump, struct inode *ip) 1989 { 1990 1991 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 1992 uma_zfree(uma_ufs1, ip->i_din1); 1993 else if (ip->i_din2 != NULL) 1994 uma_zfree(uma_ufs2, ip->i_din2); 1995 uma_zfree(uma_inode, ip); 1996 } 1997 1998 static int dobkgrdwrite = 1; 1999 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2000 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2001 2002 /* 2003 * Complete a background write started from bwrite. 2004 */ 2005 static void 2006 ffs_backgroundwritedone(struct buf *bp) 2007 { 2008 struct bufobj *bufobj; 2009 struct buf *origbp; 2010 2011 /* 2012 * Find the original buffer that we are writing. 2013 */ 2014 bufobj = bp->b_bufobj; 2015 BO_LOCK(bufobj); 2016 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2017 panic("backgroundwritedone: lost buffer"); 2018 2019 /* 2020 * We should mark the cylinder group buffer origbp as 2021 * dirty, to not loose the failed write. 2022 */ 2023 if ((bp->b_ioflags & BIO_ERROR) != 0) 2024 origbp->b_vflags |= BV_BKGRDERR; 2025 BO_UNLOCK(bufobj); 2026 /* 2027 * Process dependencies then return any unfinished ones. 2028 */ 2029 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2030 buf_complete(bp); 2031 #ifdef SOFTUPDATES 2032 if (!LIST_EMPTY(&bp->b_dep)) 2033 softdep_move_dependencies(bp, origbp); 2034 #endif 2035 /* 2036 * This buffer is marked B_NOCACHE so when it is released 2037 * by biodone it will be tossed. 2038 */ 2039 bp->b_flags |= B_NOCACHE; 2040 bp->b_flags &= ~B_CACHE; 2041 pbrelvp(bp); 2042 2043 /* 2044 * Prevent brelse() from trying to keep and re-dirtying bp on 2045 * errors. It causes b_bufobj dereference in 2046 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2047 * pbrelvp() above. 2048 */ 2049 if ((bp->b_ioflags & BIO_ERROR) != 0) 2050 bp->b_flags |= B_INVAL; 2051 bufdone(bp); 2052 BO_LOCK(bufobj); 2053 /* 2054 * Clear the BV_BKGRDINPROG flag in the original buffer 2055 * and awaken it if it is waiting for the write to complete. 2056 * If BV_BKGRDINPROG is not set in the original buffer it must 2057 * have been released and re-instantiated - which is not legal. 2058 */ 2059 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2060 ("backgroundwritedone: lost buffer2")); 2061 origbp->b_vflags &= ~BV_BKGRDINPROG; 2062 if (origbp->b_vflags & BV_BKGRDWAIT) { 2063 origbp->b_vflags &= ~BV_BKGRDWAIT; 2064 wakeup(&origbp->b_xflags); 2065 } 2066 BO_UNLOCK(bufobj); 2067 } 2068 2069 2070 /* 2071 * Write, release buffer on completion. (Done by iodone 2072 * if async). Do not bother writing anything if the buffer 2073 * is invalid. 2074 * 2075 * Note that we set B_CACHE here, indicating that buffer is 2076 * fully valid and thus cacheable. This is true even of NFS 2077 * now so we set it generally. This could be set either here 2078 * or in biodone() since the I/O is synchronous. We put it 2079 * here. 2080 */ 2081 static int 2082 ffs_bufwrite(struct buf *bp) 2083 { 2084 struct buf *newbp; 2085 struct cg *cgp; 2086 2087 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2088 if (bp->b_flags & B_INVAL) { 2089 brelse(bp); 2090 return (0); 2091 } 2092 2093 if (!BUF_ISLOCKED(bp)) 2094 panic("bufwrite: buffer is not busy???"); 2095 /* 2096 * If a background write is already in progress, delay 2097 * writing this block if it is asynchronous. Otherwise 2098 * wait for the background write to complete. 2099 */ 2100 BO_LOCK(bp->b_bufobj); 2101 if (bp->b_vflags & BV_BKGRDINPROG) { 2102 if (bp->b_flags & B_ASYNC) { 2103 BO_UNLOCK(bp->b_bufobj); 2104 bdwrite(bp); 2105 return (0); 2106 } 2107 bp->b_vflags |= BV_BKGRDWAIT; 2108 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2109 "bwrbg", 0); 2110 if (bp->b_vflags & BV_BKGRDINPROG) 2111 panic("bufwrite: still writing"); 2112 } 2113 bp->b_vflags &= ~BV_BKGRDERR; 2114 BO_UNLOCK(bp->b_bufobj); 2115 2116 /* 2117 * If this buffer is marked for background writing and we 2118 * do not have to wait for it, make a copy and write the 2119 * copy so as to leave this buffer ready for further use. 2120 * 2121 * This optimization eats a lot of memory. If we have a page 2122 * or buffer shortfall we can't do it. 2123 */ 2124 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2125 (bp->b_flags & B_ASYNC) && 2126 !vm_page_count_severe() && 2127 !buf_dirty_count_severe()) { 2128 KASSERT(bp->b_iodone == NULL, 2129 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2130 2131 /* get a new block */ 2132 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2133 if (newbp == NULL) 2134 goto normal_write; 2135 2136 KASSERT(buf_mapped(bp), ("Unmapped cg")); 2137 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2138 BO_LOCK(bp->b_bufobj); 2139 bp->b_vflags |= BV_BKGRDINPROG; 2140 BO_UNLOCK(bp->b_bufobj); 2141 newbp->b_xflags |= 2142 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; 2143 newbp->b_lblkno = bp->b_lblkno; 2144 newbp->b_blkno = bp->b_blkno; 2145 newbp->b_offset = bp->b_offset; 2146 newbp->b_iodone = ffs_backgroundwritedone; 2147 newbp->b_flags |= B_ASYNC; 2148 newbp->b_flags &= ~B_INVAL; 2149 pbgetvp(bp->b_vp, newbp); 2150 2151 #ifdef SOFTUPDATES 2152 /* 2153 * Move over the dependencies. If there are rollbacks, 2154 * leave the parent buffer dirtied as it will need to 2155 * be written again. 2156 */ 2157 if (LIST_EMPTY(&bp->b_dep) || 2158 softdep_move_dependencies(bp, newbp) == 0) 2159 bundirty(bp); 2160 #else 2161 bundirty(bp); 2162 #endif 2163 2164 /* 2165 * Initiate write on the copy, release the original. The 2166 * BKGRDINPROG flag prevents it from going away until 2167 * the background write completes. We have to recalculate 2168 * its check hash in case the buffer gets freed and then 2169 * reconstituted from the buffer cache during a later read. 2170 */ 2171 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2172 cgp = (struct cg *)bp->b_data; 2173 cgp->cg_ckhash = 0; 2174 cgp->cg_ckhash = 2175 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2176 } 2177 bqrelse(bp); 2178 bp = newbp; 2179 } else 2180 /* Mark the buffer clean */ 2181 bundirty(bp); 2182 2183 2184 /* Let the normal bufwrite do the rest for us */ 2185 normal_write: 2186 /* 2187 * If we are writing a cylinder group, update its time. 2188 */ 2189 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2190 cgp = (struct cg *)bp->b_data; 2191 cgp->cg_old_time = cgp->cg_time = time_second; 2192 } 2193 return (bufwrite(bp)); 2194 } 2195 2196 2197 static void 2198 ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2199 { 2200 struct vnode *vp; 2201 struct buf *tbp; 2202 int error, nocopy; 2203 2204 vp = bo2vnode(bo); 2205 if (bp->b_iocmd == BIO_WRITE) { 2206 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2207 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2208 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2209 panic("ffs_geom_strategy: bad I/O"); 2210 nocopy = bp->b_flags & B_NOCOPY; 2211 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2212 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2213 vp->v_rdev->si_snapdata != NULL) { 2214 if ((bp->b_flags & B_CLUSTER) != 0) { 2215 runningbufwakeup(bp); 2216 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2217 b_cluster.cluster_entry) { 2218 error = ffs_copyonwrite(vp, tbp); 2219 if (error != 0 && 2220 error != EOPNOTSUPP) { 2221 bp->b_error = error; 2222 bp->b_ioflags |= BIO_ERROR; 2223 bufdone(bp); 2224 return; 2225 } 2226 } 2227 bp->b_runningbufspace = bp->b_bufsize; 2228 atomic_add_long(&runningbufspace, 2229 bp->b_runningbufspace); 2230 } else { 2231 error = ffs_copyonwrite(vp, bp); 2232 if (error != 0 && error != EOPNOTSUPP) { 2233 bp->b_error = error; 2234 bp->b_ioflags |= BIO_ERROR; 2235 bufdone(bp); 2236 return; 2237 } 2238 } 2239 } 2240 #ifdef SOFTUPDATES 2241 if ((bp->b_flags & B_CLUSTER) != 0) { 2242 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2243 b_cluster.cluster_entry) { 2244 if (!LIST_EMPTY(&tbp->b_dep)) 2245 buf_start(tbp); 2246 } 2247 } else { 2248 if (!LIST_EMPTY(&bp->b_dep)) 2249 buf_start(bp); 2250 } 2251 2252 #endif 2253 /* 2254 * Check for metadata that needs check-hashes and update them. 2255 */ 2256 switch (bp->b_xflags & BX_FSPRIV) { 2257 case BX_CYLGRP: 2258 ((struct cg *)bp->b_data)->cg_ckhash = 0; 2259 ((struct cg *)bp->b_data)->cg_ckhash = 2260 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2261 break; 2262 2263 case BX_SUPERBLOCK: 2264 case BX_INODE: 2265 case BX_INDIR: 2266 case BX_DIR: 2267 printf("Check-hash write is unimplemented!!!\n"); 2268 break; 2269 2270 case 0: 2271 break; 2272 2273 default: 2274 printf("multiple buffer types 0x%b\n", 2275 (u_int)(bp->b_xflags & BX_FSPRIV), 2276 PRINT_UFS_BUF_XFLAGS); 2277 break; 2278 } 2279 } 2280 g_vfs_strategy(bo, bp); 2281 } 2282 2283 int 2284 ffs_own_mount(const struct mount *mp) 2285 { 2286 2287 if (mp->mnt_op == &ufs_vfsops) 2288 return (1); 2289 return (0); 2290 } 2291 2292 #ifdef DDB 2293 #ifdef SOFTUPDATES 2294 2295 /* defined in ffs_softdep.c */ 2296 extern void db_print_ffs(struct ufsmount *ump); 2297 2298 DB_SHOW_COMMAND(ffs, db_show_ffs) 2299 { 2300 struct mount *mp; 2301 struct ufsmount *ump; 2302 2303 if (have_addr) { 2304 ump = VFSTOUFS((struct mount *)addr); 2305 db_print_ffs(ump); 2306 return; 2307 } 2308 2309 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2310 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2311 db_print_ffs(VFSTOUFS(mp)); 2312 } 2313 } 2314 2315 #endif /* SOFTUPDATES */ 2316 #endif /* DDB */ 2317