1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1991, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_quota.h" 38 #include "opt_ufs.h" 39 #include "opt_ffs.h" 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/namei.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/taskqueue.h> 48 #include <sys/kernel.h> 49 #include <sys/vnode.h> 50 #include <sys/mount.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/conf.h> 54 #include <sys/fcntl.h> 55 #include <sys/ioccom.h> 56 #include <sys/malloc.h> 57 #include <sys/mutex.h> 58 #include <sys/rwlock.h> 59 #include <sys/vmmeter.h> 60 61 #include <security/mac/mac_framework.h> 62 63 #include <ufs/ufs/dir.h> 64 #include <ufs/ufs/extattr.h> 65 #include <ufs/ufs/gjournal.h> 66 #include <ufs/ufs/quota.h> 67 #include <ufs/ufs/ufsmount.h> 68 #include <ufs/ufs/inode.h> 69 #include <ufs/ufs/ufs_extern.h> 70 71 #include <ufs/ffs/fs.h> 72 #include <ufs/ffs/ffs_extern.h> 73 74 #include <vm/vm.h> 75 #include <vm/uma.h> 76 #include <vm/vm_page.h> 77 78 #include <geom/geom.h> 79 #include <geom/geom_vfs.h> 80 81 #include <ddb/ddb.h> 82 83 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 84 85 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 86 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 87 ufs2_daddr_t); 88 static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 89 static int ffs_sync_lazy(struct mount *mp); 90 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); 91 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); 92 93 static vfs_init_t ffs_init; 94 static vfs_uninit_t ffs_uninit; 95 static vfs_extattrctl_t ffs_extattrctl; 96 static vfs_cmount_t ffs_cmount; 97 static vfs_unmount_t ffs_unmount; 98 static vfs_mount_t ffs_mount; 99 static vfs_statfs_t ffs_statfs; 100 static vfs_fhtovp_t ffs_fhtovp; 101 static vfs_sync_t ffs_sync; 102 103 static struct vfsops ufs_vfsops = { 104 .vfs_extattrctl = ffs_extattrctl, 105 .vfs_fhtovp = ffs_fhtovp, 106 .vfs_init = ffs_init, 107 .vfs_mount = ffs_mount, 108 .vfs_cmount = ffs_cmount, 109 .vfs_quotactl = ufs_quotactl, 110 .vfs_root = ufs_root, 111 .vfs_statfs = ffs_statfs, 112 .vfs_sync = ffs_sync, 113 .vfs_uninit = ffs_uninit, 114 .vfs_unmount = ffs_unmount, 115 .vfs_vget = ffs_vget, 116 .vfs_susp_clean = process_deferred_inactive, 117 }; 118 119 VFS_SET(ufs_vfsops, ufs, 0); 120 MODULE_VERSION(ufs, 1); 121 122 static b_strategy_t ffs_geom_strategy; 123 static b_write_t ffs_bufwrite; 124 125 static struct buf_ops ffs_ops = { 126 .bop_name = "FFS", 127 .bop_write = ffs_bufwrite, 128 .bop_strategy = ffs_geom_strategy, 129 .bop_sync = bufsync, 130 #ifdef NO_FFS_SNAPSHOT 131 .bop_bdflush = bufbdflush, 132 #else 133 .bop_bdflush = ffs_bdflush, 134 #endif 135 }; 136 137 /* 138 * Note that userquota and groupquota options are not currently used 139 * by UFS/FFS code and generally mount(8) does not pass those options 140 * from userland, but they can be passed by loader(8) via 141 * vfs.root.mountfrom.options. 142 */ 143 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 144 "noclusterw", "noexec", "export", "force", "from", "groupquota", 145 "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir", 146 "nosymfollow", "sync", "union", "userquota", NULL }; 147 148 static int 149 ffs_mount(struct mount *mp) 150 { 151 struct vnode *devvp; 152 struct thread *td; 153 struct ufsmount *ump = NULL; 154 struct fs *fs; 155 pid_t fsckpid = 0; 156 int error, error1, flags; 157 uint64_t mntorflags, saved_mnt_flag; 158 accmode_t accmode; 159 struct nameidata ndp; 160 char *fspec; 161 162 td = curthread; 163 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 164 return (EINVAL); 165 if (uma_inode == NULL) { 166 uma_inode = uma_zcreate("FFS inode", 167 sizeof(struct inode), NULL, NULL, NULL, NULL, 168 UMA_ALIGN_PTR, 0); 169 uma_ufs1 = uma_zcreate("FFS1 dinode", 170 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 171 UMA_ALIGN_PTR, 0); 172 uma_ufs2 = uma_zcreate("FFS2 dinode", 173 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 174 UMA_ALIGN_PTR, 0); 175 } 176 177 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 178 vfs_deleteopt(mp->mnt_optnew, "userquota"); 179 180 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 181 if (error) 182 return (error); 183 184 mntorflags = 0; 185 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 186 mntorflags |= MNT_ACLS; 187 188 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 189 mntorflags |= MNT_SNAPSHOT; 190 /* 191 * Once we have set the MNT_SNAPSHOT flag, do not 192 * persist "snapshot" in the options list. 193 */ 194 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 195 vfs_deleteopt(mp->mnt_opt, "snapshot"); 196 } 197 198 if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 && 199 vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) { 200 /* 201 * Once we have set the restricted PID, do not 202 * persist "fsckpid" in the options list. 203 */ 204 vfs_deleteopt(mp->mnt_optnew, "fsckpid"); 205 vfs_deleteopt(mp->mnt_opt, "fsckpid"); 206 if (mp->mnt_flag & MNT_UPDATE) { 207 if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 && 208 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 209 vfs_mount_error(mp, 210 "Checker enable: Must be read-only"); 211 return (EINVAL); 212 } 213 } else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 214 vfs_mount_error(mp, 215 "Checker enable: Must be read-only"); 216 return (EINVAL); 217 } 218 /* Set to -1 if we are done */ 219 if (fsckpid == 0) 220 fsckpid = -1; 221 } 222 223 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 224 if (mntorflags & MNT_ACLS) { 225 vfs_mount_error(mp, 226 "\"acls\" and \"nfsv4acls\" options " 227 "are mutually exclusive"); 228 return (EINVAL); 229 } 230 mntorflags |= MNT_NFS4ACLS; 231 } 232 233 MNT_ILOCK(mp); 234 mp->mnt_flag |= mntorflags; 235 MNT_IUNLOCK(mp); 236 /* 237 * If updating, check whether changing from read-only to 238 * read/write; if there is no device name, that's all we do. 239 */ 240 if (mp->mnt_flag & MNT_UPDATE) { 241 ump = VFSTOUFS(mp); 242 fs = ump->um_fs; 243 devvp = ump->um_devvp; 244 if (fsckpid == -1 && ump->um_fsckpid > 0) { 245 if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 || 246 (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) 247 return (error); 248 g_topology_lock(); 249 /* 250 * Return to normal read-only mode. 251 */ 252 error = g_access(ump->um_cp, 0, -1, 0); 253 g_topology_unlock(); 254 ump->um_fsckpid = 0; 255 } 256 if (fs->fs_ronly == 0 && 257 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 258 /* 259 * Flush any dirty data and suspend filesystem. 260 */ 261 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 262 return (error); 263 error = vfs_write_suspend_umnt(mp); 264 if (error != 0) 265 return (error); 266 /* 267 * Check for and optionally get rid of files open 268 * for writing. 269 */ 270 flags = WRITECLOSE; 271 if (mp->mnt_flag & MNT_FORCE) 272 flags |= FORCECLOSE; 273 if (MOUNTEDSOFTDEP(mp)) { 274 error = softdep_flushfiles(mp, flags, td); 275 } else { 276 error = ffs_flushfiles(mp, flags, td); 277 } 278 if (error) { 279 vfs_write_resume(mp, 0); 280 return (error); 281 } 282 if (fs->fs_pendingblocks != 0 || 283 fs->fs_pendinginodes != 0) { 284 printf("WARNING: %s Update error: blocks %jd " 285 "files %d\n", fs->fs_fsmnt, 286 (intmax_t)fs->fs_pendingblocks, 287 fs->fs_pendinginodes); 288 fs->fs_pendingblocks = 0; 289 fs->fs_pendinginodes = 0; 290 } 291 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 292 fs->fs_clean = 1; 293 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 294 fs->fs_ronly = 0; 295 fs->fs_clean = 0; 296 vfs_write_resume(mp, 0); 297 return (error); 298 } 299 if (MOUNTEDSOFTDEP(mp)) 300 softdep_unmount(mp); 301 g_topology_lock(); 302 /* 303 * Drop our write and exclusive access. 304 */ 305 g_access(ump->um_cp, 0, -1, -1); 306 g_topology_unlock(); 307 fs->fs_ronly = 1; 308 MNT_ILOCK(mp); 309 mp->mnt_flag |= MNT_RDONLY; 310 MNT_IUNLOCK(mp); 311 /* 312 * Allow the writers to note that filesystem 313 * is ro now. 314 */ 315 vfs_write_resume(mp, 0); 316 } 317 if ((mp->mnt_flag & MNT_RELOAD) && 318 (error = ffs_reload(mp, td, 0)) != 0) 319 return (error); 320 if (fs->fs_ronly && 321 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 322 /* 323 * If we are running a checker, do not allow upgrade. 324 */ 325 if (ump->um_fsckpid > 0) { 326 vfs_mount_error(mp, 327 "Active checker, cannot upgrade to write"); 328 return (EINVAL); 329 } 330 /* 331 * If upgrade to read-write by non-root, then verify 332 * that user has necessary permissions on the device. 333 */ 334 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 335 error = VOP_ACCESS(devvp, VREAD | VWRITE, 336 td->td_ucred, td); 337 if (error) 338 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 339 if (error) { 340 VOP_UNLOCK(devvp, 0); 341 return (error); 342 } 343 VOP_UNLOCK(devvp, 0); 344 fs->fs_flags &= ~FS_UNCLEAN; 345 if (fs->fs_clean == 0) { 346 fs->fs_flags |= FS_UNCLEAN; 347 if ((mp->mnt_flag & MNT_FORCE) || 348 ((fs->fs_flags & 349 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 350 (fs->fs_flags & FS_DOSOFTDEP))) { 351 printf("WARNING: %s was not properly " 352 "dismounted\n", fs->fs_fsmnt); 353 } else { 354 vfs_mount_error(mp, 355 "R/W mount of %s denied. %s.%s", 356 fs->fs_fsmnt, 357 "Filesystem is not clean - run fsck", 358 (fs->fs_flags & FS_SUJ) == 0 ? "" : 359 " Forced mount will invalidate" 360 " journal contents"); 361 return (EPERM); 362 } 363 } 364 g_topology_lock(); 365 /* 366 * Request exclusive write access. 367 */ 368 error = g_access(ump->um_cp, 0, 1, 1); 369 g_topology_unlock(); 370 if (error) 371 return (error); 372 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 373 return (error); 374 error = vfs_write_suspend_umnt(mp); 375 if (error != 0) 376 return (error); 377 fs->fs_ronly = 0; 378 MNT_ILOCK(mp); 379 saved_mnt_flag = MNT_RDONLY; 380 if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag & 381 MNT_ASYNC) != 0) 382 saved_mnt_flag |= MNT_ASYNC; 383 mp->mnt_flag &= ~saved_mnt_flag; 384 MNT_IUNLOCK(mp); 385 fs->fs_mtime = time_second; 386 /* check to see if we need to start softdep */ 387 if ((fs->fs_flags & FS_DOSOFTDEP) && 388 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 389 fs->fs_ronly = 1; 390 MNT_ILOCK(mp); 391 mp->mnt_flag |= saved_mnt_flag; 392 MNT_IUNLOCK(mp); 393 vfs_write_resume(mp, 0); 394 return (error); 395 } 396 fs->fs_clean = 0; 397 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 398 fs->fs_ronly = 1; 399 MNT_ILOCK(mp); 400 mp->mnt_flag |= saved_mnt_flag; 401 MNT_IUNLOCK(mp); 402 vfs_write_resume(mp, 0); 403 return (error); 404 } 405 if (fs->fs_snapinum[0] != 0) 406 ffs_snapshot_mount(mp); 407 vfs_write_resume(mp, 0); 408 } 409 /* 410 * Soft updates is incompatible with "async", 411 * so if we are doing softupdates stop the user 412 * from setting the async flag in an update. 413 * Softdep_mount() clears it in an initial mount 414 * or ro->rw remount. 415 */ 416 if (MOUNTEDSOFTDEP(mp)) { 417 /* XXX: Reset too late ? */ 418 MNT_ILOCK(mp); 419 mp->mnt_flag &= ~MNT_ASYNC; 420 MNT_IUNLOCK(mp); 421 } 422 /* 423 * Keep MNT_ACLS flag if it is stored in superblock. 424 */ 425 if ((fs->fs_flags & FS_ACLS) != 0) { 426 /* XXX: Set too late ? */ 427 MNT_ILOCK(mp); 428 mp->mnt_flag |= MNT_ACLS; 429 MNT_IUNLOCK(mp); 430 } 431 432 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 433 /* XXX: Set too late ? */ 434 MNT_ILOCK(mp); 435 mp->mnt_flag |= MNT_NFS4ACLS; 436 MNT_IUNLOCK(mp); 437 } 438 /* 439 * If this is a request from fsck to clean up the filesystem, 440 * then allow the specified pid to proceed. 441 */ 442 if (fsckpid > 0) { 443 if (ump->um_fsckpid != 0) { 444 vfs_mount_error(mp, 445 "Active checker already running on %s", 446 fs->fs_fsmnt); 447 return (EINVAL); 448 } 449 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 450 ("soft updates enabled on read-only file system")); 451 g_topology_lock(); 452 /* 453 * Request write access. 454 */ 455 error = g_access(ump->um_cp, 0, 1, 0); 456 g_topology_unlock(); 457 if (error) { 458 vfs_mount_error(mp, 459 "Checker activation failed on %s", 460 fs->fs_fsmnt); 461 return (error); 462 } 463 ump->um_fsckpid = fsckpid; 464 if (fs->fs_snapinum[0] != 0) 465 ffs_snapshot_mount(mp); 466 fs->fs_mtime = time_second; 467 fs->fs_fmod = 1; 468 fs->fs_clean = 0; 469 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 470 } 471 472 /* 473 * If this is a snapshot request, take the snapshot. 474 */ 475 if (mp->mnt_flag & MNT_SNAPSHOT) 476 return (ffs_snapshot(mp, fspec)); 477 478 /* 479 * Must not call namei() while owning busy ref. 480 */ 481 vfs_unbusy(mp); 482 } 483 484 /* 485 * Not an update, or updating the name: look up the name 486 * and verify that it refers to a sensible disk device. 487 */ 488 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); 489 error = namei(&ndp); 490 if ((mp->mnt_flag & MNT_UPDATE) != 0) { 491 /* 492 * Unmount does not start if MNT_UPDATE is set. Mount 493 * update busies mp before setting MNT_UPDATE. We 494 * must be able to retain our busy ref succesfully, 495 * without sleep. 496 */ 497 error1 = vfs_busy(mp, MBF_NOWAIT); 498 MPASS(error1 == 0); 499 } 500 if (error != 0) 501 return (error); 502 NDFREE(&ndp, NDF_ONLY_PNBUF); 503 devvp = ndp.ni_vp; 504 if (!vn_isdisk(devvp, &error)) { 505 vput(devvp); 506 return (error); 507 } 508 509 /* 510 * If mount by non-root, then verify that user has necessary 511 * permissions on the device. 512 */ 513 accmode = VREAD; 514 if ((mp->mnt_flag & MNT_RDONLY) == 0) 515 accmode |= VWRITE; 516 error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); 517 if (error) 518 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 519 if (error) { 520 vput(devvp); 521 return (error); 522 } 523 524 if (mp->mnt_flag & MNT_UPDATE) { 525 /* 526 * Update only 527 * 528 * If it's not the same vnode, or at least the same device 529 * then it's not correct. 530 */ 531 532 if (devvp->v_rdev != ump->um_devvp->v_rdev) 533 error = EINVAL; /* needs translation */ 534 vput(devvp); 535 if (error) 536 return (error); 537 } else { 538 /* 539 * New mount 540 * 541 * We need the name for the mount point (also used for 542 * "last mounted on") copied in. If an error occurs, 543 * the mount point is discarded by the upper level code. 544 * Note that vfs_mount_alloc() populates f_mntonname for us. 545 */ 546 if ((error = ffs_mountfs(devvp, mp, td)) != 0) { 547 vrele(devvp); 548 return (error); 549 } 550 if (fsckpid > 0) { 551 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 552 ("soft updates enabled on read-only file system")); 553 ump = VFSTOUFS(mp); 554 fs = ump->um_fs; 555 g_topology_lock(); 556 /* 557 * Request write access. 558 */ 559 error = g_access(ump->um_cp, 0, 1, 0); 560 g_topology_unlock(); 561 if (error) { 562 printf("WARNING: %s: Checker activation " 563 "failed\n", fs->fs_fsmnt); 564 } else { 565 ump->um_fsckpid = fsckpid; 566 if (fs->fs_snapinum[0] != 0) 567 ffs_snapshot_mount(mp); 568 fs->fs_mtime = time_second; 569 fs->fs_clean = 0; 570 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 571 } 572 } 573 } 574 vfs_mountedfrom(mp, fspec); 575 return (0); 576 } 577 578 /* 579 * Compatibility with old mount system call. 580 */ 581 582 static int 583 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 584 { 585 struct ufs_args args; 586 struct export_args exp; 587 int error; 588 589 if (data == NULL) 590 return (EINVAL); 591 error = copyin(data, &args, sizeof args); 592 if (error) 593 return (error); 594 vfs_oexport_conv(&args.export, &exp); 595 596 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 597 ma = mount_arg(ma, "export", &exp, sizeof(exp)); 598 error = kernel_mount(ma, flags); 599 600 return (error); 601 } 602 603 /* 604 * Reload all incore data for a filesystem (used after running fsck on 605 * the root filesystem and finding things to fix). If the 'force' flag 606 * is 0, the filesystem must be mounted read-only. 607 * 608 * Things to do to update the mount: 609 * 1) invalidate all cached meta-data. 610 * 2) re-read superblock from disk. 611 * 3) re-read summary information from disk. 612 * 4) invalidate all inactive vnodes. 613 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary 614 * writers, if requested. 615 * 6) invalidate all cached file data. 616 * 7) re-read inode data for all active vnodes. 617 */ 618 int 619 ffs_reload(struct mount *mp, struct thread *td, int flags) 620 { 621 struct vnode *vp, *mvp, *devvp; 622 struct inode *ip; 623 void *space; 624 struct buf *bp; 625 struct fs *fs, *newfs; 626 struct ufsmount *ump; 627 ufs2_daddr_t sblockloc; 628 int i, blks, error; 629 u_long size; 630 int32_t *lp; 631 632 ump = VFSTOUFS(mp); 633 634 MNT_ILOCK(mp); 635 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 636 MNT_IUNLOCK(mp); 637 return (EINVAL); 638 } 639 MNT_IUNLOCK(mp); 640 641 /* 642 * Step 1: invalidate all cached meta-data. 643 */ 644 devvp = VFSTOUFS(mp)->um_devvp; 645 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 646 if (vinvalbuf(devvp, 0, 0, 0) != 0) 647 panic("ffs_reload: dirty1"); 648 VOP_UNLOCK(devvp, 0); 649 650 /* 651 * Step 2: re-read superblock from disk. 652 */ 653 fs = VFSTOUFS(mp)->um_fs; 654 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 655 NOCRED, &bp)) != 0) 656 return (error); 657 newfs = (struct fs *)bp->b_data; 658 if ((newfs->fs_magic != FS_UFS1_MAGIC && 659 newfs->fs_magic != FS_UFS2_MAGIC) || 660 newfs->fs_bsize > MAXBSIZE || 661 newfs->fs_bsize < sizeof(struct fs)) { 662 brelse(bp); 663 return (EIO); /* XXX needs translation */ 664 } 665 /* 666 * Copy pointer fields back into superblock before copying in XXX 667 * new superblock. These should really be in the ufsmount. XXX 668 * Note that important parameters (eg fs_ncg) are unchanged. 669 */ 670 newfs->fs_csp = fs->fs_csp; 671 newfs->fs_maxcluster = fs->fs_maxcluster; 672 newfs->fs_contigdirs = fs->fs_contigdirs; 673 newfs->fs_active = fs->fs_active; 674 newfs->fs_ronly = fs->fs_ronly; 675 sblockloc = fs->fs_sblockloc; 676 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 677 brelse(bp); 678 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 679 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 680 UFS_LOCK(ump); 681 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 682 printf("WARNING: %s: reload pending error: blocks %jd " 683 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 684 fs->fs_pendinginodes); 685 fs->fs_pendingblocks = 0; 686 fs->fs_pendinginodes = 0; 687 } 688 UFS_UNLOCK(ump); 689 690 /* 691 * Step 3: re-read summary information from disk. 692 */ 693 size = fs->fs_cssize; 694 blks = howmany(size, fs->fs_fsize); 695 if (fs->fs_contigsumsize > 0) 696 size += fs->fs_ncg * sizeof(int32_t); 697 size += fs->fs_ncg * sizeof(u_int8_t); 698 free(fs->fs_csp, M_UFSMNT); 699 space = malloc(size, M_UFSMNT, M_WAITOK); 700 fs->fs_csp = space; 701 for (i = 0; i < blks; i += fs->fs_frag) { 702 size = fs->fs_bsize; 703 if (i + fs->fs_frag > blks) 704 size = (blks - i) * fs->fs_fsize; 705 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 706 NOCRED, &bp); 707 if (error) 708 return (error); 709 bcopy(bp->b_data, space, (u_int)size); 710 space = (char *)space + size; 711 brelse(bp); 712 } 713 /* 714 * We no longer know anything about clusters per cylinder group. 715 */ 716 if (fs->fs_contigsumsize > 0) { 717 fs->fs_maxcluster = lp = space; 718 for (i = 0; i < fs->fs_ncg; i++) 719 *lp++ = fs->fs_contigsumsize; 720 space = lp; 721 } 722 size = fs->fs_ncg * sizeof(u_int8_t); 723 fs->fs_contigdirs = (u_int8_t *)space; 724 bzero(fs->fs_contigdirs, size); 725 if ((flags & FFSR_UNSUSPEND) != 0) { 726 MNT_ILOCK(mp); 727 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 728 wakeup(&mp->mnt_flag); 729 MNT_IUNLOCK(mp); 730 } 731 732 loop: 733 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 734 /* 735 * Skip syncer vnode. 736 */ 737 if (vp->v_type == VNON) { 738 VI_UNLOCK(vp); 739 continue; 740 } 741 /* 742 * Step 4: invalidate all cached file data. 743 */ 744 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { 745 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 746 goto loop; 747 } 748 if (vinvalbuf(vp, 0, 0, 0)) 749 panic("ffs_reload: dirty2"); 750 /* 751 * Step 5: re-read inode data for all active vnodes. 752 */ 753 ip = VTOI(vp); 754 error = 755 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 756 (int)fs->fs_bsize, NOCRED, &bp); 757 if (error) { 758 vput(vp); 759 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 760 return (error); 761 } 762 if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) { 763 brelse(bp); 764 vput(vp); 765 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 766 return (error); 767 } 768 ip->i_effnlink = ip->i_nlink; 769 brelse(bp); 770 vput(vp); 771 } 772 return (0); 773 } 774 775 /* 776 * Common code for mount and mountroot 777 */ 778 static int 779 ffs_mountfs(devvp, mp, td) 780 struct vnode *devvp; 781 struct mount *mp; 782 struct thread *td; 783 { 784 struct ufsmount *ump; 785 struct fs *fs; 786 struct cdev *dev; 787 int error, i, len, ronly; 788 struct ucred *cred; 789 struct g_consumer *cp; 790 struct mount *nmp; 791 int candelete; 792 off_t loc; 793 794 fs = NULL; 795 ump = NULL; 796 cred = td ? td->td_ucred : NOCRED; 797 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 798 799 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 800 dev = devvp->v_rdev; 801 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 802 (uintptr_t)mp) == 0) { 803 VOP_UNLOCK(devvp, 0); 804 return (EBUSY); 805 } 806 g_topology_lock(); 807 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 808 g_topology_unlock(); 809 if (error != 0) { 810 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 811 VOP_UNLOCK(devvp, 0); 812 return (error); 813 } 814 dev_ref(dev); 815 devvp->v_bufobj.bo_ops = &ffs_ops; 816 VOP_UNLOCK(devvp, 0); 817 if (dev->si_iosize_max != 0) 818 mp->mnt_iosize_max = dev->si_iosize_max; 819 if (mp->mnt_iosize_max > MAXPHYS) 820 mp->mnt_iosize_max = MAXPHYS; 821 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 822 error = EINVAL; 823 vfs_mount_error(mp, 824 "Invalid sectorsize %d for superblock size %d", 825 cp->provider->sectorsize, SBLOCKSIZE); 826 goto out; 827 } 828 /* fetch the superblock and summary information */ 829 loc = STDSB; 830 if ((mp->mnt_flag & MNT_ROOTFS) != 0) 831 loc = STDSB_NOHASHFAIL; 832 if ((error = ffs_sbget(devvp, &fs, loc, M_UFSMNT, ffs_use_bread)) != 0) 833 goto out; 834 /* none of these types of check-hashes are maintained by this kernel */ 835 fs->fs_metackhash &= ~(CK_INDIR | CK_DIR); 836 /* no support for any undefined flags */ 837 fs->fs_flags &= FS_SUPPORTED; 838 fs->fs_flags &= ~FS_UNCLEAN; 839 if (fs->fs_clean == 0) { 840 fs->fs_flags |= FS_UNCLEAN; 841 if (ronly || (mp->mnt_flag & MNT_FORCE) || 842 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 843 (fs->fs_flags & FS_DOSOFTDEP))) { 844 printf("WARNING: %s was not properly dismounted\n", 845 fs->fs_fsmnt); 846 } else { 847 vfs_mount_error(mp, "R/W mount of %s denied. %s%s", 848 fs->fs_fsmnt, "Filesystem is not clean - run fsck.", 849 (fs->fs_flags & FS_SUJ) == 0 ? "" : 850 " Forced mount will invalidate journal contents"); 851 error = EPERM; 852 goto out; 853 } 854 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 855 (mp->mnt_flag & MNT_FORCE)) { 856 printf("WARNING: %s: lost blocks %jd files %d\n", 857 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 858 fs->fs_pendinginodes); 859 fs->fs_pendingblocks = 0; 860 fs->fs_pendinginodes = 0; 861 } 862 } 863 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 864 printf("WARNING: %s: mount pending error: blocks %jd " 865 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 866 fs->fs_pendinginodes); 867 fs->fs_pendingblocks = 0; 868 fs->fs_pendinginodes = 0; 869 } 870 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 871 #ifdef UFS_GJOURNAL 872 /* 873 * Get journal provider name. 874 */ 875 len = 1024; 876 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); 877 if (g_io_getattr("GJOURNAL::provider", cp, &len, 878 mp->mnt_gjprovider) == 0) { 879 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 880 M_UFSMNT, M_WAITOK); 881 MNT_ILOCK(mp); 882 mp->mnt_flag |= MNT_GJOURNAL; 883 MNT_IUNLOCK(mp); 884 } else { 885 printf("WARNING: %s: GJOURNAL flag on fs " 886 "but no gjournal provider below\n", 887 mp->mnt_stat.f_mntonname); 888 free(mp->mnt_gjprovider, M_UFSMNT); 889 mp->mnt_gjprovider = NULL; 890 } 891 #else 892 printf("WARNING: %s: GJOURNAL flag on fs but no " 893 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 894 #endif 895 } else { 896 mp->mnt_gjprovider = NULL; 897 } 898 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 899 ump->um_cp = cp; 900 ump->um_bo = &devvp->v_bufobj; 901 ump->um_fs = fs; 902 if (fs->fs_magic == FS_UFS1_MAGIC) { 903 ump->um_fstype = UFS1; 904 ump->um_balloc = ffs_balloc_ufs1; 905 } else { 906 ump->um_fstype = UFS2; 907 ump->um_balloc = ffs_balloc_ufs2; 908 } 909 ump->um_blkatoff = ffs_blkatoff; 910 ump->um_truncate = ffs_truncate; 911 ump->um_update = ffs_update; 912 ump->um_valloc = ffs_valloc; 913 ump->um_vfree = ffs_vfree; 914 ump->um_ifree = ffs_ifree; 915 ump->um_rdonly = ffs_rdonly; 916 ump->um_snapgone = ffs_snapgone; 917 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 918 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc); 919 fs->fs_ronly = ronly; 920 fs->fs_active = NULL; 921 mp->mnt_data = ump; 922 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 923 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 924 nmp = NULL; 925 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 926 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 927 if (nmp) 928 vfs_rel(nmp); 929 vfs_getnewfsid(mp); 930 } 931 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 932 MNT_ILOCK(mp); 933 mp->mnt_flag |= MNT_LOCAL; 934 MNT_IUNLOCK(mp); 935 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 936 #ifdef MAC 937 MNT_ILOCK(mp); 938 mp->mnt_flag |= MNT_MULTILABEL; 939 MNT_IUNLOCK(mp); 940 #else 941 printf("WARNING: %s: multilabel flag on fs but " 942 "no MAC support\n", mp->mnt_stat.f_mntonname); 943 #endif 944 } 945 if ((fs->fs_flags & FS_ACLS) != 0) { 946 #ifdef UFS_ACL 947 MNT_ILOCK(mp); 948 949 if (mp->mnt_flag & MNT_NFS4ACLS) 950 printf("WARNING: %s: ACLs flag on fs conflicts with " 951 "\"nfsv4acls\" mount option; option ignored\n", 952 mp->mnt_stat.f_mntonname); 953 mp->mnt_flag &= ~MNT_NFS4ACLS; 954 mp->mnt_flag |= MNT_ACLS; 955 956 MNT_IUNLOCK(mp); 957 #else 958 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 959 mp->mnt_stat.f_mntonname); 960 #endif 961 } 962 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 963 #ifdef UFS_ACL 964 MNT_ILOCK(mp); 965 966 if (mp->mnt_flag & MNT_ACLS) 967 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 968 "with \"acls\" mount option; option ignored\n", 969 mp->mnt_stat.f_mntonname); 970 mp->mnt_flag &= ~MNT_ACLS; 971 mp->mnt_flag |= MNT_NFS4ACLS; 972 973 MNT_IUNLOCK(mp); 974 #else 975 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 976 "ACLs support\n", mp->mnt_stat.f_mntonname); 977 #endif 978 } 979 if ((fs->fs_flags & FS_TRIM) != 0) { 980 len = sizeof(int); 981 if (g_io_getattr("GEOM::candelete", cp, &len, 982 &candelete) == 0) { 983 if (candelete) 984 ump->um_flags |= UM_CANDELETE; 985 else 986 printf("WARNING: %s: TRIM flag on fs but disk " 987 "does not support TRIM\n", 988 mp->mnt_stat.f_mntonname); 989 } else { 990 printf("WARNING: %s: TRIM flag on fs but disk does " 991 "not confirm that it supports TRIM\n", 992 mp->mnt_stat.f_mntonname); 993 } 994 if (((ump->um_flags) & UM_CANDELETE) != 0) { 995 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 996 taskqueue_thread_enqueue, &ump->um_trim_tq); 997 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 998 "%s trim", mp->mnt_stat.f_mntonname); 999 ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM, 1000 &ump->um_trimlisthashsize); 1001 } 1002 } 1003 1004 ump->um_mountp = mp; 1005 ump->um_dev = dev; 1006 ump->um_devvp = devvp; 1007 ump->um_nindir = fs->fs_nindir; 1008 ump->um_bptrtodb = fs->fs_fsbtodb; 1009 ump->um_seqinc = fs->fs_frag; 1010 for (i = 0; i < MAXQUOTAS; i++) 1011 ump->um_quotas[i] = NULLVP; 1012 #ifdef UFS_EXTATTR 1013 ufs_extattr_uepm_init(&ump->um_extattr); 1014 #endif 1015 /* 1016 * Set FS local "last mounted on" information (NULL pad) 1017 */ 1018 bzero(fs->fs_fsmnt, MAXMNTLEN); 1019 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1020 mp->mnt_stat.f_iosize = fs->fs_bsize; 1021 1022 if (mp->mnt_flag & MNT_ROOTFS) { 1023 /* 1024 * Root mount; update timestamp in mount structure. 1025 * this will be used by the common root mount code 1026 * to update the system clock. 1027 */ 1028 mp->mnt_time = fs->fs_time; 1029 } 1030 1031 if (ronly == 0) { 1032 fs->fs_mtime = time_second; 1033 if ((fs->fs_flags & FS_DOSOFTDEP) && 1034 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1035 ffs_flushfiles(mp, FORCECLOSE, td); 1036 goto out; 1037 } 1038 if (fs->fs_snapinum[0] != 0) 1039 ffs_snapshot_mount(mp); 1040 fs->fs_fmod = 1; 1041 fs->fs_clean = 0; 1042 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1043 } 1044 /* 1045 * Initialize filesystem state information in mount struct. 1046 */ 1047 MNT_ILOCK(mp); 1048 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1049 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1050 MNT_IUNLOCK(mp); 1051 #ifdef UFS_EXTATTR 1052 #ifdef UFS_EXTATTR_AUTOSTART 1053 /* 1054 * 1055 * Auto-starting does the following: 1056 * - check for /.attribute in the fs, and extattr_start if so 1057 * - for each file in .attribute, enable that file with 1058 * an attribute of the same name. 1059 * Not clear how to report errors -- probably eat them. 1060 * This would all happen while the filesystem was busy/not 1061 * available, so would effectively be "atomic". 1062 */ 1063 (void) ufs_extattr_autostart(mp, td); 1064 #endif /* !UFS_EXTATTR_AUTOSTART */ 1065 #endif /* !UFS_EXTATTR */ 1066 return (0); 1067 out: 1068 if (fs != NULL) { 1069 free(fs->fs_csp, M_UFSMNT); 1070 free(fs, M_UFSMNT); 1071 } 1072 if (cp != NULL) { 1073 g_topology_lock(); 1074 g_vfs_close(cp); 1075 g_topology_unlock(); 1076 } 1077 if (ump) { 1078 mtx_destroy(UFS_MTX(ump)); 1079 if (mp->mnt_gjprovider != NULL) { 1080 free(mp->mnt_gjprovider, M_UFSMNT); 1081 mp->mnt_gjprovider = NULL; 1082 } 1083 free(ump, M_UFSMNT); 1084 mp->mnt_data = NULL; 1085 } 1086 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1087 dev_rel(dev); 1088 return (error); 1089 } 1090 1091 /* 1092 * A read function for use by filesystem-layer routines. 1093 */ 1094 static int 1095 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) 1096 { 1097 struct buf *bp; 1098 int error; 1099 1100 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); 1101 *bufp = malloc(size, M_UFSMNT, M_WAITOK); 1102 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, 1103 &bp)) != 0) 1104 return (error); 1105 bcopy(bp->b_data, *bufp, size); 1106 bp->b_flags |= B_INVAL | B_NOCACHE; 1107 brelse(bp); 1108 return (0); 1109 } 1110 1111 #include <sys/sysctl.h> 1112 static int bigcgs = 0; 1113 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1114 1115 /* 1116 * Sanity checks for loading old filesystem superblocks. 1117 * See ffs_oldfscompat_write below for unwound actions. 1118 * 1119 * XXX - Parts get retired eventually. 1120 * Unfortunately new bits get added. 1121 */ 1122 static void 1123 ffs_oldfscompat_read(fs, ump, sblockloc) 1124 struct fs *fs; 1125 struct ufsmount *ump; 1126 ufs2_daddr_t sblockloc; 1127 { 1128 off_t maxfilesize; 1129 1130 /* 1131 * If not yet done, update fs_flags location and value of fs_sblockloc. 1132 */ 1133 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1134 fs->fs_flags = fs->fs_old_flags; 1135 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1136 fs->fs_sblockloc = sblockloc; 1137 } 1138 /* 1139 * If not yet done, update UFS1 superblock with new wider fields. 1140 */ 1141 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1142 fs->fs_maxbsize = fs->fs_bsize; 1143 fs->fs_time = fs->fs_old_time; 1144 fs->fs_size = fs->fs_old_size; 1145 fs->fs_dsize = fs->fs_old_dsize; 1146 fs->fs_csaddr = fs->fs_old_csaddr; 1147 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1148 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1149 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1150 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1151 } 1152 if (fs->fs_magic == FS_UFS1_MAGIC && 1153 fs->fs_old_inodefmt < FS_44INODEFMT) { 1154 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1155 fs->fs_qbmask = ~fs->fs_bmask; 1156 fs->fs_qfmask = ~fs->fs_fmask; 1157 } 1158 if (fs->fs_magic == FS_UFS1_MAGIC) { 1159 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1160 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1161 if (fs->fs_maxfilesize > maxfilesize) 1162 fs->fs_maxfilesize = maxfilesize; 1163 } 1164 /* Compatibility for old filesystems */ 1165 if (fs->fs_avgfilesize <= 0) 1166 fs->fs_avgfilesize = AVFILESIZ; 1167 if (fs->fs_avgfpdir <= 0) 1168 fs->fs_avgfpdir = AFPDIR; 1169 if (bigcgs) { 1170 fs->fs_save_cgsize = fs->fs_cgsize; 1171 fs->fs_cgsize = fs->fs_bsize; 1172 } 1173 } 1174 1175 /* 1176 * Unwinding superblock updates for old filesystems. 1177 * See ffs_oldfscompat_read above for details. 1178 * 1179 * XXX - Parts get retired eventually. 1180 * Unfortunately new bits get added. 1181 */ 1182 void 1183 ffs_oldfscompat_write(fs, ump) 1184 struct fs *fs; 1185 struct ufsmount *ump; 1186 { 1187 1188 /* 1189 * Copy back UFS2 updated fields that UFS1 inspects. 1190 */ 1191 if (fs->fs_magic == FS_UFS1_MAGIC) { 1192 fs->fs_old_time = fs->fs_time; 1193 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1194 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1195 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1196 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1197 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1198 } 1199 if (bigcgs) { 1200 fs->fs_cgsize = fs->fs_save_cgsize; 1201 fs->fs_save_cgsize = 0; 1202 } 1203 } 1204 1205 /* 1206 * unmount system call 1207 */ 1208 static int 1209 ffs_unmount(mp, mntflags) 1210 struct mount *mp; 1211 int mntflags; 1212 { 1213 struct thread *td; 1214 struct ufsmount *ump = VFSTOUFS(mp); 1215 struct fs *fs; 1216 int error, flags, susp; 1217 #ifdef UFS_EXTATTR 1218 int e_restart; 1219 #endif 1220 1221 flags = 0; 1222 td = curthread; 1223 fs = ump->um_fs; 1224 susp = 0; 1225 if (mntflags & MNT_FORCE) { 1226 flags |= FORCECLOSE; 1227 susp = fs->fs_ronly == 0; 1228 } 1229 #ifdef UFS_EXTATTR 1230 if ((error = ufs_extattr_stop(mp, td))) { 1231 if (error != EOPNOTSUPP) 1232 printf("WARNING: unmount %s: ufs_extattr_stop " 1233 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1234 error); 1235 e_restart = 0; 1236 } else { 1237 ufs_extattr_uepm_destroy(&ump->um_extattr); 1238 e_restart = 1; 1239 } 1240 #endif 1241 if (susp) { 1242 error = vfs_write_suspend_umnt(mp); 1243 if (error != 0) 1244 goto fail1; 1245 } 1246 if (MOUNTEDSOFTDEP(mp)) 1247 error = softdep_flushfiles(mp, flags, td); 1248 else 1249 error = ffs_flushfiles(mp, flags, td); 1250 if (error != 0 && error != ENXIO) 1251 goto fail; 1252 1253 UFS_LOCK(ump); 1254 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1255 printf("WARNING: unmount %s: pending error: blocks %jd " 1256 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1257 fs->fs_pendinginodes); 1258 fs->fs_pendingblocks = 0; 1259 fs->fs_pendinginodes = 0; 1260 } 1261 UFS_UNLOCK(ump); 1262 if (MOUNTEDSOFTDEP(mp)) 1263 softdep_unmount(mp); 1264 if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) { 1265 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1266 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1267 if (error && error != ENXIO) { 1268 fs->fs_clean = 0; 1269 goto fail; 1270 } 1271 } 1272 if (susp) 1273 vfs_write_resume(mp, VR_START_WRITE); 1274 if (ump->um_trim_tq != NULL) { 1275 while (ump->um_trim_inflight != 0) 1276 pause("ufsutr", hz); 1277 taskqueue_drain_all(ump->um_trim_tq); 1278 taskqueue_free(ump->um_trim_tq); 1279 free (ump->um_trimhash, M_TRIM); 1280 } 1281 g_topology_lock(); 1282 if (ump->um_fsckpid > 0) { 1283 /* 1284 * Return to normal read-only mode. 1285 */ 1286 error = g_access(ump->um_cp, 0, -1, 0); 1287 ump->um_fsckpid = 0; 1288 } 1289 g_vfs_close(ump->um_cp); 1290 g_topology_unlock(); 1291 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1292 vrele(ump->um_devvp); 1293 dev_rel(ump->um_dev); 1294 mtx_destroy(UFS_MTX(ump)); 1295 if (mp->mnt_gjprovider != NULL) { 1296 free(mp->mnt_gjprovider, M_UFSMNT); 1297 mp->mnt_gjprovider = NULL; 1298 } 1299 free(fs->fs_csp, M_UFSMNT); 1300 free(fs, M_UFSMNT); 1301 free(ump, M_UFSMNT); 1302 mp->mnt_data = NULL; 1303 MNT_ILOCK(mp); 1304 mp->mnt_flag &= ~MNT_LOCAL; 1305 MNT_IUNLOCK(mp); 1306 if (td->td_su == mp) { 1307 td->td_su = NULL; 1308 vfs_rel(mp); 1309 } 1310 return (error); 1311 1312 fail: 1313 if (susp) 1314 vfs_write_resume(mp, VR_START_WRITE); 1315 fail1: 1316 #ifdef UFS_EXTATTR 1317 if (e_restart) { 1318 ufs_extattr_uepm_init(&ump->um_extattr); 1319 #ifdef UFS_EXTATTR_AUTOSTART 1320 (void) ufs_extattr_autostart(mp, td); 1321 #endif 1322 } 1323 #endif 1324 1325 return (error); 1326 } 1327 1328 /* 1329 * Flush out all the files in a filesystem. 1330 */ 1331 int 1332 ffs_flushfiles(mp, flags, td) 1333 struct mount *mp; 1334 int flags; 1335 struct thread *td; 1336 { 1337 struct ufsmount *ump; 1338 int qerror, error; 1339 1340 ump = VFSTOUFS(mp); 1341 qerror = 0; 1342 #ifdef QUOTA 1343 if (mp->mnt_flag & MNT_QUOTA) { 1344 int i; 1345 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1346 if (error) 1347 return (error); 1348 for (i = 0; i < MAXQUOTAS; i++) { 1349 error = quotaoff(td, mp, i); 1350 if (error != 0) { 1351 if ((flags & EARLYFLUSH) == 0) 1352 return (error); 1353 else 1354 qerror = error; 1355 } 1356 } 1357 1358 /* 1359 * Here we fall through to vflush again to ensure that 1360 * we have gotten rid of all the system vnodes, unless 1361 * quotas must not be closed. 1362 */ 1363 } 1364 #endif 1365 ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles"); 1366 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1367 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1368 return (error); 1369 ffs_snapshot_unmount(mp); 1370 flags |= FORCECLOSE; 1371 /* 1372 * Here we fall through to vflush again to ensure 1373 * that we have gotten rid of all the system vnodes. 1374 */ 1375 } 1376 1377 /* 1378 * Do not close system files if quotas were not closed, to be 1379 * able to sync the remaining dquots. The freeblks softupdate 1380 * workitems might hold a reference on a dquot, preventing 1381 * quotaoff() from completing. Next round of 1382 * softdep_flushworklist() iteration should process the 1383 * blockers, allowing the next run of quotaoff() to finally 1384 * flush held dquots. 1385 * 1386 * Otherwise, flush all the files. 1387 */ 1388 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1389 return (error); 1390 1391 /* 1392 * Flush filesystem metadata. 1393 */ 1394 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1395 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1396 VOP_UNLOCK(ump->um_devvp, 0); 1397 return (error); 1398 } 1399 1400 /* 1401 * Get filesystem statistics. 1402 */ 1403 static int 1404 ffs_statfs(mp, sbp) 1405 struct mount *mp; 1406 struct statfs *sbp; 1407 { 1408 struct ufsmount *ump; 1409 struct fs *fs; 1410 1411 ump = VFSTOUFS(mp); 1412 fs = ump->um_fs; 1413 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1414 panic("ffs_statfs"); 1415 sbp->f_version = STATFS_VERSION; 1416 sbp->f_bsize = fs->fs_fsize; 1417 sbp->f_iosize = fs->fs_bsize; 1418 sbp->f_blocks = fs->fs_dsize; 1419 UFS_LOCK(ump); 1420 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1421 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1422 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1423 dbtofsb(fs, fs->fs_pendingblocks); 1424 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; 1425 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1426 UFS_UNLOCK(ump); 1427 sbp->f_namemax = UFS_MAXNAMLEN; 1428 return (0); 1429 } 1430 1431 static bool 1432 sync_doupdate(struct inode *ip) 1433 { 1434 1435 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1436 IN_UPDATE)) != 0); 1437 } 1438 1439 /* 1440 * For a lazy sync, we only care about access times, quotas and the 1441 * superblock. Other filesystem changes are already converted to 1442 * cylinder group blocks or inode blocks updates and are written to 1443 * disk by syncer. 1444 */ 1445 static int 1446 ffs_sync_lazy(mp) 1447 struct mount *mp; 1448 { 1449 struct vnode *mvp, *vp; 1450 struct inode *ip; 1451 struct thread *td; 1452 int allerror, error; 1453 1454 allerror = 0; 1455 td = curthread; 1456 if ((mp->mnt_flag & MNT_NOATIME) != 0) 1457 goto qupdate; 1458 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 1459 if (vp->v_type == VNON) { 1460 VI_UNLOCK(vp); 1461 continue; 1462 } 1463 ip = VTOI(vp); 1464 1465 /* 1466 * The IN_ACCESS flag is converted to IN_MODIFIED by 1467 * ufs_close() and ufs_getattr() by the calls to 1468 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1469 * Test also all the other timestamp flags too, to pick up 1470 * any other cases that could be missed. 1471 */ 1472 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1473 VI_UNLOCK(vp); 1474 continue; 1475 } 1476 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 1477 td)) != 0) 1478 continue; 1479 if (sync_doupdate(ip)) 1480 error = ffs_update(vp, 0); 1481 if (error != 0) 1482 allerror = error; 1483 vput(vp); 1484 } 1485 1486 qupdate: 1487 #ifdef QUOTA 1488 qsync(mp); 1489 #endif 1490 1491 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1492 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1493 allerror = error; 1494 return (allerror); 1495 } 1496 1497 /* 1498 * Go through the disk queues to initiate sandbagged IO; 1499 * go through the inodes to write those that have been modified; 1500 * initiate the writing of the super block if it has been modified. 1501 * 1502 * Note: we are always called with the filesystem marked busy using 1503 * vfs_busy(). 1504 */ 1505 static int 1506 ffs_sync(mp, waitfor) 1507 struct mount *mp; 1508 int waitfor; 1509 { 1510 struct vnode *mvp, *vp, *devvp; 1511 struct thread *td; 1512 struct inode *ip; 1513 struct ufsmount *ump = VFSTOUFS(mp); 1514 struct fs *fs; 1515 int error, count, lockreq, allerror = 0; 1516 int suspend; 1517 int suspended; 1518 int secondary_writes; 1519 int secondary_accwrites; 1520 int softdep_deps; 1521 int softdep_accdeps; 1522 struct bufobj *bo; 1523 1524 suspend = 0; 1525 suspended = 0; 1526 td = curthread; 1527 fs = ump->um_fs; 1528 if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0) 1529 panic("%s: ffs_sync: modification on read-only filesystem", 1530 fs->fs_fsmnt); 1531 if (waitfor == MNT_LAZY) { 1532 if (!rebooting) 1533 return (ffs_sync_lazy(mp)); 1534 waitfor = MNT_NOWAIT; 1535 } 1536 1537 /* 1538 * Write back each (modified) inode. 1539 */ 1540 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1541 if (waitfor == MNT_SUSPEND) { 1542 suspend = 1; 1543 waitfor = MNT_WAIT; 1544 } 1545 if (waitfor == MNT_WAIT) 1546 lockreq = LK_EXCLUSIVE; 1547 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1548 loop: 1549 /* Grab snapshot of secondary write counts */ 1550 MNT_ILOCK(mp); 1551 secondary_writes = mp->mnt_secondary_writes; 1552 secondary_accwrites = mp->mnt_secondary_accwrites; 1553 MNT_IUNLOCK(mp); 1554 1555 /* Grab snapshot of softdep dependency counts */ 1556 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1557 1558 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1559 /* 1560 * Depend on the vnode interlock to keep things stable enough 1561 * for a quick test. Since there might be hundreds of 1562 * thousands of vnodes, we cannot afford even a subroutine 1563 * call unless there's a good chance that we have work to do. 1564 */ 1565 if (vp->v_type == VNON) { 1566 VI_UNLOCK(vp); 1567 continue; 1568 } 1569 ip = VTOI(vp); 1570 if ((ip->i_flag & 1571 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1572 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1573 VI_UNLOCK(vp); 1574 continue; 1575 } 1576 if ((error = vget(vp, lockreq, td)) != 0) { 1577 if (error == ENOENT || error == ENOLCK) { 1578 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1579 goto loop; 1580 } 1581 continue; 1582 } 1583 if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0) 1584 allerror = error; 1585 vput(vp); 1586 } 1587 /* 1588 * Force stale filesystem control information to be flushed. 1589 */ 1590 if (waitfor == MNT_WAIT || rebooting) { 1591 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1592 allerror = error; 1593 /* Flushed work items may create new vnodes to clean */ 1594 if (allerror == 0 && count) 1595 goto loop; 1596 } 1597 #ifdef QUOTA 1598 qsync(mp); 1599 #endif 1600 1601 devvp = ump->um_devvp; 1602 bo = &devvp->v_bufobj; 1603 BO_LOCK(bo); 1604 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1605 BO_UNLOCK(bo); 1606 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1607 error = VOP_FSYNC(devvp, waitfor, td); 1608 VOP_UNLOCK(devvp, 0); 1609 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1610 error = ffs_sbupdate(ump, waitfor, 0); 1611 if (error != 0) 1612 allerror = error; 1613 if (allerror == 0 && waitfor == MNT_WAIT) 1614 goto loop; 1615 } else if (suspend != 0) { 1616 if (softdep_check_suspend(mp, 1617 devvp, 1618 softdep_deps, 1619 softdep_accdeps, 1620 secondary_writes, 1621 secondary_accwrites) != 0) { 1622 MNT_IUNLOCK(mp); 1623 goto loop; /* More work needed */ 1624 } 1625 mtx_assert(MNT_MTX(mp), MA_OWNED); 1626 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1627 MNT_IUNLOCK(mp); 1628 suspended = 1; 1629 } else 1630 BO_UNLOCK(bo); 1631 /* 1632 * Write back modified superblock. 1633 */ 1634 if (fs->fs_fmod != 0 && 1635 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1636 allerror = error; 1637 return (allerror); 1638 } 1639 1640 int 1641 ffs_vget(mp, ino, flags, vpp) 1642 struct mount *mp; 1643 ino_t ino; 1644 int flags; 1645 struct vnode **vpp; 1646 { 1647 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1648 } 1649 1650 int 1651 ffs_vgetf(mp, ino, flags, vpp, ffs_flags) 1652 struct mount *mp; 1653 ino_t ino; 1654 int flags; 1655 struct vnode **vpp; 1656 int ffs_flags; 1657 { 1658 struct fs *fs; 1659 struct inode *ip; 1660 struct ufsmount *ump; 1661 struct buf *bp; 1662 struct vnode *vp; 1663 int error; 1664 1665 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1666 if (error || *vpp != NULL) 1667 return (error); 1668 1669 /* 1670 * We must promote to an exclusive lock for vnode creation. This 1671 * can happen if lookup is passed LOCKSHARED. 1672 */ 1673 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1674 flags &= ~LK_TYPE_MASK; 1675 flags |= LK_EXCLUSIVE; 1676 } 1677 1678 /* 1679 * We do not lock vnode creation as it is believed to be too 1680 * expensive for such rare case as simultaneous creation of vnode 1681 * for same ino by different processes. We just allow them to race 1682 * and check later to decide who wins. Let the race begin! 1683 */ 1684 1685 ump = VFSTOUFS(mp); 1686 fs = ump->um_fs; 1687 ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO); 1688 1689 /* Allocate a new vnode/inode. */ 1690 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 1691 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1692 if (error) { 1693 *vpp = NULL; 1694 uma_zfree(uma_inode, ip); 1695 return (error); 1696 } 1697 /* 1698 * FFS supports recursive locking. 1699 */ 1700 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 1701 VN_LOCK_AREC(vp); 1702 vp->v_data = ip; 1703 vp->v_bufobj.bo_bsize = fs->fs_bsize; 1704 ip->i_vnode = vp; 1705 ip->i_ump = ump; 1706 ip->i_number = ino; 1707 ip->i_ea_refs = 0; 1708 ip->i_nextclustercg = -1; 1709 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; 1710 ip->i_mode = 0; /* ensure error cases below throw away vnode */ 1711 #ifdef QUOTA 1712 { 1713 int i; 1714 for (i = 0; i < MAXQUOTAS; i++) 1715 ip->i_dquot[i] = NODQUOT; 1716 } 1717 #endif 1718 1719 if (ffs_flags & FFSV_FORCEINSMQ) 1720 vp->v_vflag |= VV_FORCEINSMQ; 1721 error = insmntque(vp, mp); 1722 if (error != 0) { 1723 uma_zfree(uma_inode, ip); 1724 *vpp = NULL; 1725 return (error); 1726 } 1727 vp->v_vflag &= ~VV_FORCEINSMQ; 1728 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 1729 if (error || *vpp != NULL) 1730 return (error); 1731 1732 /* Read in the disk contents for the inode, copy into the inode. */ 1733 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), 1734 (int)fs->fs_bsize, NOCRED, &bp); 1735 if (error) { 1736 /* 1737 * The inode does not contain anything useful, so it would 1738 * be misleading to leave it on its hash chain. With mode 1739 * still zero, it will be unlinked and returned to the free 1740 * list by vput(). 1741 */ 1742 brelse(bp); 1743 vput(vp); 1744 *vpp = NULL; 1745 return (error); 1746 } 1747 if (I_IS_UFS1(ip)) 1748 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 1749 else 1750 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1751 if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) { 1752 bqrelse(bp); 1753 vput(vp); 1754 *vpp = NULL; 1755 return (error); 1756 } 1757 if (DOINGSOFTDEP(vp)) 1758 softdep_load_inodeblock(ip); 1759 else 1760 ip->i_effnlink = ip->i_nlink; 1761 bqrelse(bp); 1762 1763 /* 1764 * Initialize the vnode from the inode, check for aliases. 1765 * Note that the underlying vnode may have changed. 1766 */ 1767 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, 1768 &vp); 1769 if (error) { 1770 vput(vp); 1771 *vpp = NULL; 1772 return (error); 1773 } 1774 1775 /* 1776 * Finish inode initialization. 1777 */ 1778 if (vp->v_type != VFIFO) { 1779 /* FFS supports shared locking for all files except fifos. */ 1780 VN_LOCK_ASHARE(vp); 1781 } 1782 1783 /* 1784 * Set up a generation number for this inode if it does not 1785 * already have one. This should only happen on old filesystems. 1786 */ 1787 if (ip->i_gen == 0) { 1788 while (ip->i_gen == 0) 1789 ip->i_gen = arc4random(); 1790 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 1791 ip->i_flag |= IN_MODIFIED; 1792 DIP_SET(ip, i_gen, ip->i_gen); 1793 } 1794 } 1795 #ifdef MAC 1796 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 1797 /* 1798 * If this vnode is already allocated, and we're running 1799 * multi-label, attempt to perform a label association 1800 * from the extended attributes on the inode. 1801 */ 1802 error = mac_vnode_associate_extattr(mp, vp); 1803 if (error) { 1804 /* ufs_inactive will release ip->i_devvp ref. */ 1805 vput(vp); 1806 *vpp = NULL; 1807 return (error); 1808 } 1809 } 1810 #endif 1811 1812 *vpp = vp; 1813 return (0); 1814 } 1815 1816 /* 1817 * File handle to vnode 1818 * 1819 * Have to be really careful about stale file handles: 1820 * - check that the inode number is valid 1821 * - for UFS2 check that the inode number is initialized 1822 * - call ffs_vget() to get the locked inode 1823 * - check for an unallocated inode (i_mode == 0) 1824 * - check that the given client host has export rights and return 1825 * those rights via. exflagsp and credanonp 1826 */ 1827 static int 1828 ffs_fhtovp(mp, fhp, flags, vpp) 1829 struct mount *mp; 1830 struct fid *fhp; 1831 int flags; 1832 struct vnode **vpp; 1833 { 1834 struct ufid *ufhp; 1835 struct ufsmount *ump; 1836 struct fs *fs; 1837 struct cg *cgp; 1838 struct buf *bp; 1839 ino_t ino; 1840 u_int cg; 1841 int error; 1842 1843 ufhp = (struct ufid *)fhp; 1844 ino = ufhp->ufid_ino; 1845 ump = VFSTOUFS(mp); 1846 fs = ump->um_fs; 1847 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) 1848 return (ESTALE); 1849 /* 1850 * Need to check if inode is initialized because UFS2 does lazy 1851 * initialization and nfs_fhtovp can offer arbitrary inode numbers. 1852 */ 1853 if (fs->fs_magic != FS_UFS2_MAGIC) 1854 return (ufs_fhtovp(mp, ufhp, flags, vpp)); 1855 cg = ino_to_cg(fs, ino); 1856 if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) 1857 return (error); 1858 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { 1859 brelse(bp); 1860 return (ESTALE); 1861 } 1862 brelse(bp); 1863 return (ufs_fhtovp(mp, ufhp, flags, vpp)); 1864 } 1865 1866 /* 1867 * Initialize the filesystem. 1868 */ 1869 static int 1870 ffs_init(vfsp) 1871 struct vfsconf *vfsp; 1872 { 1873 1874 ffs_susp_initialize(); 1875 softdep_initialize(); 1876 return (ufs_init(vfsp)); 1877 } 1878 1879 /* 1880 * Undo the work of ffs_init(). 1881 */ 1882 static int 1883 ffs_uninit(vfsp) 1884 struct vfsconf *vfsp; 1885 { 1886 int ret; 1887 1888 ret = ufs_uninit(vfsp); 1889 softdep_uninitialize(); 1890 ffs_susp_uninitialize(); 1891 return (ret); 1892 } 1893 1894 /* 1895 * Structure used to pass information from ffs_sbupdate to its 1896 * helper routine ffs_use_bwrite. 1897 */ 1898 struct devfd { 1899 struct ufsmount *ump; 1900 struct buf *sbbp; 1901 int waitfor; 1902 int suspended; 1903 int error; 1904 }; 1905 1906 /* 1907 * Write a superblock and associated information back to disk. 1908 */ 1909 int 1910 ffs_sbupdate(ump, waitfor, suspended) 1911 struct ufsmount *ump; 1912 int waitfor; 1913 int suspended; 1914 { 1915 struct fs *fs; 1916 struct buf *sbbp; 1917 struct devfd devfd; 1918 1919 fs = ump->um_fs; 1920 if (fs->fs_ronly == 1 && 1921 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 1922 (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0) 1923 panic("ffs_sbupdate: write read-only filesystem"); 1924 /* 1925 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 1926 */ 1927 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 1928 (int)fs->fs_sbsize, 0, 0, 0); 1929 /* 1930 * Initialize info needed for write function. 1931 */ 1932 devfd.ump = ump; 1933 devfd.sbbp = sbbp; 1934 devfd.waitfor = waitfor; 1935 devfd.suspended = suspended; 1936 devfd.error = 0; 1937 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); 1938 } 1939 1940 /* 1941 * Write function for use by filesystem-layer routines. 1942 */ 1943 static int 1944 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) 1945 { 1946 struct devfd *devfdp; 1947 struct ufsmount *ump; 1948 struct buf *bp; 1949 struct fs *fs; 1950 int error; 1951 1952 devfdp = devfd; 1953 ump = devfdp->ump; 1954 fs = ump->um_fs; 1955 /* 1956 * Writing the superblock summary information. 1957 */ 1958 if (loc != fs->fs_sblockloc) { 1959 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); 1960 bcopy(buf, bp->b_data, (u_int)size); 1961 if (devfdp->suspended) 1962 bp->b_flags |= B_VALIDSUSPWRT; 1963 if (devfdp->waitfor != MNT_WAIT) 1964 bawrite(bp); 1965 else if ((error = bwrite(bp)) != 0) 1966 devfdp->error = error; 1967 return (0); 1968 } 1969 /* 1970 * Writing the superblock itself. We need to do special checks for it. 1971 */ 1972 bp = devfdp->sbbp; 1973 if (devfdp->error != 0) { 1974 brelse(bp); 1975 return (devfdp->error); 1976 } 1977 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 1978 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1979 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 1980 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 1981 fs->fs_sblockloc = SBLOCK_UFS1; 1982 } 1983 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 1984 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1985 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 1986 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 1987 fs->fs_sblockloc = SBLOCK_UFS2; 1988 } 1989 if (MOUNTEDSOFTDEP(ump->um_mountp)) 1990 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 1991 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 1992 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 1993 if (devfdp->suspended) 1994 bp->b_flags |= B_VALIDSUSPWRT; 1995 if (devfdp->waitfor != MNT_WAIT) 1996 bawrite(bp); 1997 else if ((error = bwrite(bp)) != 0) 1998 devfdp->error = error; 1999 return (devfdp->error); 2000 } 2001 2002 static int 2003 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 2004 int attrnamespace, const char *attrname) 2005 { 2006 2007 #ifdef UFS_EXTATTR 2008 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 2009 attrname)); 2010 #else 2011 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 2012 attrname)); 2013 #endif 2014 } 2015 2016 static void 2017 ffs_ifree(struct ufsmount *ump, struct inode *ip) 2018 { 2019 2020 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 2021 uma_zfree(uma_ufs1, ip->i_din1); 2022 else if (ip->i_din2 != NULL) 2023 uma_zfree(uma_ufs2, ip->i_din2); 2024 uma_zfree(uma_inode, ip); 2025 } 2026 2027 static int dobkgrdwrite = 1; 2028 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2029 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2030 2031 /* 2032 * Complete a background write started from bwrite. 2033 */ 2034 static void 2035 ffs_backgroundwritedone(struct buf *bp) 2036 { 2037 struct bufobj *bufobj; 2038 struct buf *origbp; 2039 2040 /* 2041 * Find the original buffer that we are writing. 2042 */ 2043 bufobj = bp->b_bufobj; 2044 BO_LOCK(bufobj); 2045 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2046 panic("backgroundwritedone: lost buffer"); 2047 2048 /* 2049 * We should mark the cylinder group buffer origbp as 2050 * dirty, to not loose the failed write. 2051 */ 2052 if ((bp->b_ioflags & BIO_ERROR) != 0) 2053 origbp->b_vflags |= BV_BKGRDERR; 2054 BO_UNLOCK(bufobj); 2055 /* 2056 * Process dependencies then return any unfinished ones. 2057 */ 2058 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2059 buf_complete(bp); 2060 #ifdef SOFTUPDATES 2061 if (!LIST_EMPTY(&bp->b_dep)) 2062 softdep_move_dependencies(bp, origbp); 2063 #endif 2064 /* 2065 * This buffer is marked B_NOCACHE so when it is released 2066 * by biodone it will be tossed. 2067 */ 2068 bp->b_flags |= B_NOCACHE; 2069 bp->b_flags &= ~B_CACHE; 2070 pbrelvp(bp); 2071 2072 /* 2073 * Prevent brelse() from trying to keep and re-dirtying bp on 2074 * errors. It causes b_bufobj dereference in 2075 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2076 * pbrelvp() above. 2077 */ 2078 if ((bp->b_ioflags & BIO_ERROR) != 0) 2079 bp->b_flags |= B_INVAL; 2080 bufdone(bp); 2081 BO_LOCK(bufobj); 2082 /* 2083 * Clear the BV_BKGRDINPROG flag in the original buffer 2084 * and awaken it if it is waiting for the write to complete. 2085 * If BV_BKGRDINPROG is not set in the original buffer it must 2086 * have been released and re-instantiated - which is not legal. 2087 */ 2088 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2089 ("backgroundwritedone: lost buffer2")); 2090 origbp->b_vflags &= ~BV_BKGRDINPROG; 2091 if (origbp->b_vflags & BV_BKGRDWAIT) { 2092 origbp->b_vflags &= ~BV_BKGRDWAIT; 2093 wakeup(&origbp->b_xflags); 2094 } 2095 BO_UNLOCK(bufobj); 2096 } 2097 2098 2099 /* 2100 * Write, release buffer on completion. (Done by iodone 2101 * if async). Do not bother writing anything if the buffer 2102 * is invalid. 2103 * 2104 * Note that we set B_CACHE here, indicating that buffer is 2105 * fully valid and thus cacheable. This is true even of NFS 2106 * now so we set it generally. This could be set either here 2107 * or in biodone() since the I/O is synchronous. We put it 2108 * here. 2109 */ 2110 static int 2111 ffs_bufwrite(struct buf *bp) 2112 { 2113 struct buf *newbp; 2114 struct cg *cgp; 2115 2116 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2117 if (bp->b_flags & B_INVAL) { 2118 brelse(bp); 2119 return (0); 2120 } 2121 2122 if (!BUF_ISLOCKED(bp)) 2123 panic("bufwrite: buffer is not busy???"); 2124 /* 2125 * If a background write is already in progress, delay 2126 * writing this block if it is asynchronous. Otherwise 2127 * wait for the background write to complete. 2128 */ 2129 BO_LOCK(bp->b_bufobj); 2130 if (bp->b_vflags & BV_BKGRDINPROG) { 2131 if (bp->b_flags & B_ASYNC) { 2132 BO_UNLOCK(bp->b_bufobj); 2133 bdwrite(bp); 2134 return (0); 2135 } 2136 bp->b_vflags |= BV_BKGRDWAIT; 2137 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2138 "bwrbg", 0); 2139 if (bp->b_vflags & BV_BKGRDINPROG) 2140 panic("bufwrite: still writing"); 2141 } 2142 bp->b_vflags &= ~BV_BKGRDERR; 2143 BO_UNLOCK(bp->b_bufobj); 2144 2145 /* 2146 * If this buffer is marked for background writing and we 2147 * do not have to wait for it, make a copy and write the 2148 * copy so as to leave this buffer ready for further use. 2149 * 2150 * This optimization eats a lot of memory. If we have a page 2151 * or buffer shortfall we can't do it. 2152 */ 2153 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2154 (bp->b_flags & B_ASYNC) && 2155 !vm_page_count_severe() && 2156 !buf_dirty_count_severe()) { 2157 KASSERT(bp->b_iodone == NULL, 2158 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2159 2160 /* get a new block */ 2161 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2162 if (newbp == NULL) 2163 goto normal_write; 2164 2165 KASSERT(buf_mapped(bp), ("Unmapped cg")); 2166 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2167 BO_LOCK(bp->b_bufobj); 2168 bp->b_vflags |= BV_BKGRDINPROG; 2169 BO_UNLOCK(bp->b_bufobj); 2170 newbp->b_xflags |= 2171 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; 2172 newbp->b_lblkno = bp->b_lblkno; 2173 newbp->b_blkno = bp->b_blkno; 2174 newbp->b_offset = bp->b_offset; 2175 newbp->b_iodone = ffs_backgroundwritedone; 2176 newbp->b_flags |= B_ASYNC; 2177 newbp->b_flags &= ~B_INVAL; 2178 pbgetvp(bp->b_vp, newbp); 2179 2180 #ifdef SOFTUPDATES 2181 /* 2182 * Move over the dependencies. If there are rollbacks, 2183 * leave the parent buffer dirtied as it will need to 2184 * be written again. 2185 */ 2186 if (LIST_EMPTY(&bp->b_dep) || 2187 softdep_move_dependencies(bp, newbp) == 0) 2188 bundirty(bp); 2189 #else 2190 bundirty(bp); 2191 #endif 2192 2193 /* 2194 * Initiate write on the copy, release the original. The 2195 * BKGRDINPROG flag prevents it from going away until 2196 * the background write completes. We have to recalculate 2197 * its check hash in case the buffer gets freed and then 2198 * reconstituted from the buffer cache during a later read. 2199 */ 2200 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2201 cgp = (struct cg *)bp->b_data; 2202 cgp->cg_ckhash = 0; 2203 cgp->cg_ckhash = 2204 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2205 } 2206 bqrelse(bp); 2207 bp = newbp; 2208 } else 2209 /* Mark the buffer clean */ 2210 bundirty(bp); 2211 2212 2213 /* Let the normal bufwrite do the rest for us */ 2214 normal_write: 2215 /* 2216 * If we are writing a cylinder group, update its time. 2217 */ 2218 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2219 cgp = (struct cg *)bp->b_data; 2220 cgp->cg_old_time = cgp->cg_time = time_second; 2221 } 2222 return (bufwrite(bp)); 2223 } 2224 2225 2226 static void 2227 ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2228 { 2229 struct vnode *vp; 2230 struct buf *tbp; 2231 int error, nocopy; 2232 2233 vp = bo2vnode(bo); 2234 if (bp->b_iocmd == BIO_WRITE) { 2235 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2236 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2237 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2238 panic("ffs_geom_strategy: bad I/O"); 2239 nocopy = bp->b_flags & B_NOCOPY; 2240 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2241 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2242 vp->v_rdev->si_snapdata != NULL) { 2243 if ((bp->b_flags & B_CLUSTER) != 0) { 2244 runningbufwakeup(bp); 2245 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2246 b_cluster.cluster_entry) { 2247 error = ffs_copyonwrite(vp, tbp); 2248 if (error != 0 && 2249 error != EOPNOTSUPP) { 2250 bp->b_error = error; 2251 bp->b_ioflags |= BIO_ERROR; 2252 bufdone(bp); 2253 return; 2254 } 2255 } 2256 bp->b_runningbufspace = bp->b_bufsize; 2257 atomic_add_long(&runningbufspace, 2258 bp->b_runningbufspace); 2259 } else { 2260 error = ffs_copyonwrite(vp, bp); 2261 if (error != 0 && error != EOPNOTSUPP) { 2262 bp->b_error = error; 2263 bp->b_ioflags |= BIO_ERROR; 2264 bufdone(bp); 2265 return; 2266 } 2267 } 2268 } 2269 #ifdef SOFTUPDATES 2270 if ((bp->b_flags & B_CLUSTER) != 0) { 2271 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2272 b_cluster.cluster_entry) { 2273 if (!LIST_EMPTY(&tbp->b_dep)) 2274 buf_start(tbp); 2275 } 2276 } else { 2277 if (!LIST_EMPTY(&bp->b_dep)) 2278 buf_start(bp); 2279 } 2280 2281 #endif 2282 /* 2283 * Check for metadata that needs check-hashes and update them. 2284 */ 2285 switch (bp->b_xflags & BX_FSPRIV) { 2286 case BX_CYLGRP: 2287 ((struct cg *)bp->b_data)->cg_ckhash = 0; 2288 ((struct cg *)bp->b_data)->cg_ckhash = 2289 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2290 break; 2291 2292 case BX_SUPERBLOCK: 2293 case BX_INODE: 2294 case BX_INDIR: 2295 case BX_DIR: 2296 printf("Check-hash write is unimplemented!!!\n"); 2297 break; 2298 2299 case 0: 2300 break; 2301 2302 default: 2303 printf("multiple buffer types 0x%b\n", 2304 (u_int)(bp->b_xflags & BX_FSPRIV), 2305 PRINT_UFS_BUF_XFLAGS); 2306 break; 2307 } 2308 } 2309 g_vfs_strategy(bo, bp); 2310 } 2311 2312 int 2313 ffs_own_mount(const struct mount *mp) 2314 { 2315 2316 if (mp->mnt_op == &ufs_vfsops) 2317 return (1); 2318 return (0); 2319 } 2320 2321 #ifdef DDB 2322 #ifdef SOFTUPDATES 2323 2324 /* defined in ffs_softdep.c */ 2325 extern void db_print_ffs(struct ufsmount *ump); 2326 2327 DB_SHOW_COMMAND(ffs, db_show_ffs) 2328 { 2329 struct mount *mp; 2330 struct ufsmount *ump; 2331 2332 if (have_addr) { 2333 ump = VFSTOUFS((struct mount *)addr); 2334 db_print_ffs(ump); 2335 return; 2336 } 2337 2338 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2339 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2340 db_print_ffs(VFSTOUFS(mp)); 2341 } 2342 } 2343 2344 #endif /* SOFTUPDATES */ 2345 #endif /* DDB */ 2346