1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1991, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_quota.h" 38 #include "opt_ufs.h" 39 #include "opt_ffs.h" 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/gsb_crc32.h> 44 #include <sys/systm.h> 45 #include <sys/namei.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/taskqueue.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/vnode.h> 52 #include <sys/mount.h> 53 #include <sys/bio.h> 54 #include <sys/buf.h> 55 #include <sys/conf.h> 56 #include <sys/fcntl.h> 57 #include <sys/ioccom.h> 58 #include <sys/malloc.h> 59 #include <sys/mutex.h> 60 #include <sys/rwlock.h> 61 #include <sys/sysctl.h> 62 #include <sys/vmmeter.h> 63 64 #include <security/mac/mac_framework.h> 65 66 #include <ufs/ufs/dir.h> 67 #include <ufs/ufs/extattr.h> 68 #include <ufs/ufs/gjournal.h> 69 #include <ufs/ufs/quota.h> 70 #include <ufs/ufs/ufsmount.h> 71 #include <ufs/ufs/inode.h> 72 #include <ufs/ufs/ufs_extern.h> 73 74 #include <ufs/ffs/fs.h> 75 #include <ufs/ffs/ffs_extern.h> 76 77 #include <vm/vm.h> 78 #include <vm/uma.h> 79 #include <vm/vm_page.h> 80 81 #include <geom/geom.h> 82 #include <geom/geom_vfs.h> 83 84 #include <ddb/ddb.h> 85 86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 87 VFS_SMR_DECLARE; 88 89 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 90 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 91 ufs2_daddr_t); 92 static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 93 static int ffs_sync_lazy(struct mount *mp); 94 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); 95 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); 96 97 static vfs_init_t ffs_init; 98 static vfs_uninit_t ffs_uninit; 99 static vfs_extattrctl_t ffs_extattrctl; 100 static vfs_cmount_t ffs_cmount; 101 static vfs_unmount_t ffs_unmount; 102 static vfs_mount_t ffs_mount; 103 static vfs_statfs_t ffs_statfs; 104 static vfs_fhtovp_t ffs_fhtovp; 105 static vfs_sync_t ffs_sync; 106 107 static struct vfsops ufs_vfsops = { 108 .vfs_extattrctl = ffs_extattrctl, 109 .vfs_fhtovp = ffs_fhtovp, 110 .vfs_init = ffs_init, 111 .vfs_mount = ffs_mount, 112 .vfs_cmount = ffs_cmount, 113 .vfs_quotactl = ufs_quotactl, 114 .vfs_root = vfs_cache_root, 115 .vfs_cachedroot = ufs_root, 116 .vfs_statfs = ffs_statfs, 117 .vfs_sync = ffs_sync, 118 .vfs_uninit = ffs_uninit, 119 .vfs_unmount = ffs_unmount, 120 .vfs_vget = ffs_vget, 121 .vfs_susp_clean = process_deferred_inactive, 122 }; 123 124 VFS_SET(ufs_vfsops, ufs, 0); 125 MODULE_VERSION(ufs, 1); 126 127 static b_strategy_t ffs_geom_strategy; 128 static b_write_t ffs_bufwrite; 129 130 static struct buf_ops ffs_ops = { 131 .bop_name = "FFS", 132 .bop_write = ffs_bufwrite, 133 .bop_strategy = ffs_geom_strategy, 134 .bop_sync = bufsync, 135 #ifdef NO_FFS_SNAPSHOT 136 .bop_bdflush = bufbdflush, 137 #else 138 .bop_bdflush = ffs_bdflush, 139 #endif 140 }; 141 142 /* 143 * Note that userquota and groupquota options are not currently used 144 * by UFS/FFS code and generally mount(8) does not pass those options 145 * from userland, but they can be passed by loader(8) via 146 * vfs.root.mountfrom.options. 147 */ 148 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 149 "noclusterw", "noexec", "export", "force", "from", "groupquota", 150 "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir", 151 "nosymfollow", "sync", "union", "userquota", "untrusted", NULL }; 152 153 static int ffs_enxio_enable = 1; 154 SYSCTL_DECL(_vfs_ffs); 155 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN, 156 &ffs_enxio_enable, 0, 157 "enable mapping of other disk I/O errors to ENXIO"); 158 159 /* 160 * Return buffer with the contents of block "offset" from the beginning of 161 * directory "ip". If "res" is non-zero, fill it in with a pointer to the 162 * remaining space in the directory. 163 */ 164 static int 165 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) 166 { 167 struct inode *ip; 168 struct fs *fs; 169 struct buf *bp; 170 ufs_lbn_t lbn; 171 int bsize, error; 172 173 ip = VTOI(vp); 174 fs = ITOFS(ip); 175 lbn = lblkno(fs, offset); 176 bsize = blksize(fs, ip, lbn); 177 178 *bpp = NULL; 179 error = bread(vp, lbn, bsize, NOCRED, &bp); 180 if (error) { 181 return (error); 182 } 183 if (res) 184 *res = (char *)bp->b_data + blkoff(fs, offset); 185 *bpp = bp; 186 return (0); 187 } 188 189 /* 190 * Load up the contents of an inode and copy the appropriate pieces 191 * to the incore copy. 192 */ 193 static int 194 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino) 195 { 196 struct ufs1_dinode *dip1; 197 struct ufs2_dinode *dip2; 198 int error; 199 200 if (I_IS_UFS1(ip)) { 201 dip1 = ip->i_din1; 202 *dip1 = 203 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 204 ip->i_mode = dip1->di_mode; 205 ip->i_nlink = dip1->di_nlink; 206 ip->i_effnlink = dip1->di_nlink; 207 ip->i_size = dip1->di_size; 208 ip->i_flags = dip1->di_flags; 209 ip->i_gen = dip1->di_gen; 210 ip->i_uid = dip1->di_uid; 211 ip->i_gid = dip1->di_gid; 212 return (0); 213 } 214 dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 215 if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 && 216 !ffs_fsfail_cleanup(ITOUMP(ip), error)) { 217 printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt, 218 (intmax_t)ino); 219 return (error); 220 } 221 *ip->i_din2 = *dip2; 222 dip2 = ip->i_din2; 223 ip->i_mode = dip2->di_mode; 224 ip->i_nlink = dip2->di_nlink; 225 ip->i_effnlink = dip2->di_nlink; 226 ip->i_size = dip2->di_size; 227 ip->i_flags = dip2->di_flags; 228 ip->i_gen = dip2->di_gen; 229 ip->i_uid = dip2->di_uid; 230 ip->i_gid = dip2->di_gid; 231 return (0); 232 } 233 234 /* 235 * Verify that a filesystem block number is a valid data block. 236 * This routine is only called on untrusted filesystems. 237 */ 238 static int 239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize) 240 { 241 struct fs *fs; 242 struct ufsmount *ump; 243 ufs2_daddr_t end_daddr; 244 int cg, havemtx; 245 246 KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0, 247 ("ffs_check_blkno called on a trusted file system")); 248 ump = VFSTOUFS(mp); 249 fs = ump->um_fs; 250 cg = dtog(fs, daddr); 251 end_daddr = daddr + numfrags(fs, blksize); 252 /* 253 * Verify that the block number is a valid data block. Also check 254 * that it does not point to an inode block or a superblock. Accept 255 * blocks that are unalloacted (0) or part of snapshot metadata 256 * (BLK_NOCOPY or BLK_SNAP). 257 * 258 * Thus, the block must be in a valid range for the filesystem and 259 * either in the space before a backup superblock (except the first 260 * cylinder group where that space is used by the bootstrap code) or 261 * after the inode blocks and before the end of the cylinder group. 262 */ 263 if ((uint64_t)daddr <= BLK_SNAP || 264 ((uint64_t)end_daddr <= fs->fs_size && 265 ((cg > 0 && end_daddr <= cgsblock(fs, cg)) || 266 (daddr >= cgdmin(fs, cg) && 267 end_daddr <= cgbase(fs, cg) + fs->fs_fpg)))) 268 return (0); 269 if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0) 270 UFS_LOCK(ump); 271 if (ppsratecheck(&ump->um_last_integritymsg, 272 &ump->um_secs_integritymsg, 1)) { 273 UFS_UNLOCK(ump); 274 uprintf("\n%s: inode %jd, out-of-range indirect block " 275 "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr); 276 if (havemtx) 277 UFS_LOCK(ump); 278 } else if (!havemtx) 279 UFS_UNLOCK(ump); 280 return (EINTEGRITY); 281 } 282 283 /* 284 * On first ENXIO error, initiate an asynchronous forcible unmount. 285 * Used to unmount filesystems whose underlying media has gone away. 286 * 287 * Return true if a cleanup is in progress. 288 */ 289 int 290 ffs_fsfail_cleanup(struct ufsmount *ump, int error) 291 { 292 int retval; 293 294 UFS_LOCK(ump); 295 retval = ffs_fsfail_cleanup_locked(ump, error); 296 UFS_UNLOCK(ump); 297 return (retval); 298 } 299 300 int 301 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error) 302 { 303 mtx_assert(UFS_MTX(ump), MA_OWNED); 304 if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) { 305 ump->um_flags |= UM_FSFAIL_CLEANUP; 306 /* 307 * Queue an async forced unmount. 308 */ 309 vfs_ref(ump->um_mountp); 310 dounmount(ump->um_mountp, 311 MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread); 312 printf("UFS: forcibly unmounting %s from %s\n", 313 ump->um_mountp->mnt_stat.f_mntfromname, 314 ump->um_mountp->mnt_stat.f_mntonname); 315 } 316 return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0); 317 } 318 319 /* 320 * Wrapper used during ENXIO cleanup to allocate empty buffers when 321 * the kernel is unable to read the real one. They are needed so that 322 * the soft updates code can use them to unwind its dependencies. 323 */ 324 int 325 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno, 326 daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt, 327 struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *), 328 struct buf **bpp) 329 { 330 int error; 331 332 flags |= GB_CVTENXIO; 333 error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt, 334 cred, flags, ckhashfunc, bpp); 335 if (error != 0 && ffs_fsfail_cleanup(ump, error)) { 336 error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp); 337 KASSERT(error == 0, ("getblkx failed")); 338 vfs_bio_bzero_buf(*bpp, 0, size); 339 } 340 return (error); 341 } 342 343 static int 344 ffs_mount(struct mount *mp) 345 { 346 struct vnode *devvp, *odevvp; 347 struct thread *td; 348 struct ufsmount *ump = NULL; 349 struct fs *fs; 350 int error, flags; 351 int error1 __diagused; 352 uint64_t mntorflags, saved_mnt_flag; 353 accmode_t accmode; 354 struct nameidata ndp; 355 char *fspec; 356 bool mounted_softdep; 357 358 td = curthread; 359 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 360 return (EINVAL); 361 if (uma_inode == NULL) { 362 uma_inode = uma_zcreate("FFS inode", 363 sizeof(struct inode), NULL, NULL, NULL, NULL, 364 UMA_ALIGN_PTR, 0); 365 uma_ufs1 = uma_zcreate("FFS1 dinode", 366 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 367 UMA_ALIGN_PTR, 0); 368 uma_ufs2 = uma_zcreate("FFS2 dinode", 369 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 370 UMA_ALIGN_PTR, 0); 371 VFS_SMR_ZONE_SET(uma_inode); 372 } 373 374 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 375 vfs_deleteopt(mp->mnt_optnew, "userquota"); 376 377 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 378 if (error) 379 return (error); 380 381 mntorflags = 0; 382 if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0) 383 mntorflags |= MNT_UNTRUSTED; 384 385 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 386 mntorflags |= MNT_ACLS; 387 388 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 389 mntorflags |= MNT_SNAPSHOT; 390 /* 391 * Once we have set the MNT_SNAPSHOT flag, do not 392 * persist "snapshot" in the options list. 393 */ 394 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 395 vfs_deleteopt(mp->mnt_opt, "snapshot"); 396 } 397 398 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 399 if (mntorflags & MNT_ACLS) { 400 vfs_mount_error(mp, 401 "\"acls\" and \"nfsv4acls\" options " 402 "are mutually exclusive"); 403 return (EINVAL); 404 } 405 mntorflags |= MNT_NFS4ACLS; 406 } 407 408 MNT_ILOCK(mp); 409 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP; 410 mp->mnt_flag |= mntorflags; 411 MNT_IUNLOCK(mp); 412 413 /* 414 * If this is a snapshot request, take the snapshot. 415 */ 416 if (mp->mnt_flag & MNT_SNAPSHOT) 417 return (ffs_snapshot(mp, fspec)); 418 419 /* 420 * Must not call namei() while owning busy ref. 421 */ 422 if (mp->mnt_flag & MNT_UPDATE) 423 vfs_unbusy(mp); 424 425 /* 426 * Not an update, or updating the name: look up the name 427 * and verify that it refers to a sensible disk device. 428 */ 429 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec); 430 error = namei(&ndp); 431 if ((mp->mnt_flag & MNT_UPDATE) != 0) { 432 /* 433 * Unmount does not start if MNT_UPDATE is set. Mount 434 * update busies mp before setting MNT_UPDATE. We 435 * must be able to retain our busy ref successfully, 436 * without sleep. 437 */ 438 error1 = vfs_busy(mp, MBF_NOWAIT); 439 MPASS(error1 == 0); 440 } 441 if (error != 0) 442 return (error); 443 NDFREE_PNBUF(&ndp); 444 if (!vn_isdisk_error(ndp.ni_vp, &error)) { 445 vput(ndp.ni_vp); 446 return (error); 447 } 448 449 /* 450 * If mount by non-root, then verify that user has necessary 451 * permissions on the device. 452 */ 453 accmode = VREAD; 454 if ((mp->mnt_flag & MNT_RDONLY) == 0) 455 accmode |= VWRITE; 456 error = VOP_ACCESS(ndp.ni_vp, accmode, td->td_ucred, td); 457 if (error) 458 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 459 if (error) { 460 vput(ndp.ni_vp); 461 return (error); 462 } 463 464 /* 465 * New mount 466 * 467 * We need the name for the mount point (also used for 468 * "last mounted on") copied in. If an error occurs, 469 * the mount point is discarded by the upper level code. 470 * Note that vfs_mount_alloc() populates f_mntonname for us. 471 */ 472 if ((mp->mnt_flag & MNT_UPDATE) == 0) { 473 if ((error = ffs_mountfs(ndp.ni_vp, mp, td)) != 0) { 474 vrele(ndp.ni_vp); 475 return (error); 476 } 477 } else { 478 /* 479 * When updating, check whether changing from read-only to 480 * read/write; if there is no device name, that's all we do. 481 */ 482 ump = VFSTOUFS(mp); 483 fs = ump->um_fs; 484 odevvp = ump->um_odevvp; 485 devvp = ump->um_devvp; 486 487 /* 488 * If it's not the same vnode, or at least the same device 489 * then it's not correct. 490 */ 491 if (ndp.ni_vp->v_rdev != ump->um_odevvp->v_rdev) 492 error = EINVAL; /* needs translation */ 493 vput(ndp.ni_vp); 494 if (error) 495 return (error); 496 if (fs->fs_ronly == 0 && 497 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 498 /* 499 * Flush any dirty data and suspend filesystem. 500 */ 501 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 502 return (error); 503 error = vfs_write_suspend_umnt(mp); 504 if (error != 0) 505 return (error); 506 507 fs->fs_ronly = 1; 508 if (MOUNTEDSOFTDEP(mp)) { 509 MNT_ILOCK(mp); 510 mp->mnt_flag &= ~MNT_SOFTDEP; 511 MNT_IUNLOCK(mp); 512 mounted_softdep = true; 513 } else 514 mounted_softdep = false; 515 516 /* 517 * Check for and optionally get rid of files open 518 * for writing. 519 */ 520 flags = WRITECLOSE; 521 if (mp->mnt_flag & MNT_FORCE) 522 flags |= FORCECLOSE; 523 if (mounted_softdep) { 524 error = softdep_flushfiles(mp, flags, td); 525 } else { 526 error = ffs_flushfiles(mp, flags, td); 527 } 528 if (error) { 529 fs->fs_ronly = 0; 530 if (mounted_softdep) { 531 MNT_ILOCK(mp); 532 mp->mnt_flag |= MNT_SOFTDEP; 533 MNT_IUNLOCK(mp); 534 } 535 vfs_write_resume(mp, 0); 536 return (error); 537 } 538 539 if (fs->fs_pendingblocks != 0 || 540 fs->fs_pendinginodes != 0) { 541 printf("WARNING: %s Update error: blocks %jd " 542 "files %d\n", fs->fs_fsmnt, 543 (intmax_t)fs->fs_pendingblocks, 544 fs->fs_pendinginodes); 545 fs->fs_pendingblocks = 0; 546 fs->fs_pendinginodes = 0; 547 } 548 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 549 fs->fs_clean = 1; 550 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 551 fs->fs_ronly = 0; 552 fs->fs_clean = 0; 553 if (mounted_softdep) { 554 MNT_ILOCK(mp); 555 mp->mnt_flag |= MNT_SOFTDEP; 556 MNT_IUNLOCK(mp); 557 } 558 vfs_write_resume(mp, 0); 559 return (error); 560 } 561 if (mounted_softdep) 562 softdep_unmount(mp); 563 g_topology_lock(); 564 /* 565 * Drop our write and exclusive access. 566 */ 567 g_access(ump->um_cp, 0, -1, -1); 568 g_topology_unlock(); 569 MNT_ILOCK(mp); 570 mp->mnt_flag |= MNT_RDONLY; 571 MNT_IUNLOCK(mp); 572 /* 573 * Allow the writers to note that filesystem 574 * is ro now. 575 */ 576 vfs_write_resume(mp, 0); 577 } 578 if ((mp->mnt_flag & MNT_RELOAD) && 579 (error = ffs_reload(mp, 0)) != 0) 580 return (error); 581 if (fs->fs_ronly && 582 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 583 /* 584 * If upgrade to read-write by non-root, then verify 585 * that user has necessary permissions on the device. 586 */ 587 vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY); 588 error = VOP_ACCESS(odevvp, VREAD | VWRITE, 589 td->td_ucred, td); 590 if (error) 591 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 592 VOP_UNLOCK(odevvp); 593 if (error) { 594 return (error); 595 } 596 fs->fs_flags &= ~FS_UNCLEAN; 597 if (fs->fs_clean == 0) { 598 fs->fs_flags |= FS_UNCLEAN; 599 if ((mp->mnt_flag & MNT_FORCE) || 600 ((fs->fs_flags & 601 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 602 (fs->fs_flags & FS_DOSOFTDEP))) { 603 printf("WARNING: %s was not properly " 604 "dismounted\n", fs->fs_fsmnt); 605 } else { 606 vfs_mount_error(mp, 607 "R/W mount of %s denied. %s.%s", 608 fs->fs_fsmnt, 609 "Filesystem is not clean - run fsck", 610 (fs->fs_flags & FS_SUJ) == 0 ? "" : 611 " Forced mount will invalidate" 612 " journal contents"); 613 return (EPERM); 614 } 615 } 616 g_topology_lock(); 617 /* 618 * Request exclusive write access. 619 */ 620 error = g_access(ump->um_cp, 0, 1, 1); 621 g_topology_unlock(); 622 if (error) 623 return (error); 624 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 625 return (error); 626 error = vfs_write_suspend_umnt(mp); 627 if (error != 0) 628 return (error); 629 fs->fs_ronly = 0; 630 MNT_ILOCK(mp); 631 saved_mnt_flag = MNT_RDONLY; 632 if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag & 633 MNT_ASYNC) != 0) 634 saved_mnt_flag |= MNT_ASYNC; 635 mp->mnt_flag &= ~saved_mnt_flag; 636 MNT_IUNLOCK(mp); 637 fs->fs_mtime = time_second; 638 /* check to see if we need to start softdep */ 639 if ((fs->fs_flags & FS_DOSOFTDEP) && 640 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 641 fs->fs_ronly = 1; 642 MNT_ILOCK(mp); 643 mp->mnt_flag |= saved_mnt_flag; 644 MNT_IUNLOCK(mp); 645 vfs_write_resume(mp, 0); 646 return (error); 647 } 648 fs->fs_clean = 0; 649 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 650 fs->fs_ronly = 1; 651 if ((fs->fs_flags & FS_DOSOFTDEP) != 0) 652 softdep_unmount(mp); 653 MNT_ILOCK(mp); 654 mp->mnt_flag |= saved_mnt_flag; 655 MNT_IUNLOCK(mp); 656 vfs_write_resume(mp, 0); 657 return (error); 658 } 659 if (fs->fs_snapinum[0] != 0) 660 ffs_snapshot_mount(mp); 661 vfs_write_resume(mp, 0); 662 } 663 /* 664 * Soft updates is incompatible with "async", 665 * so if we are doing softupdates stop the user 666 * from setting the async flag in an update. 667 * Softdep_mount() clears it in an initial mount 668 * or ro->rw remount. 669 */ 670 if (MOUNTEDSOFTDEP(mp)) { 671 /* XXX: Reset too late ? */ 672 MNT_ILOCK(mp); 673 mp->mnt_flag &= ~MNT_ASYNC; 674 MNT_IUNLOCK(mp); 675 } 676 /* 677 * Keep MNT_ACLS flag if it is stored in superblock. 678 */ 679 if ((fs->fs_flags & FS_ACLS) != 0) { 680 /* XXX: Set too late ? */ 681 MNT_ILOCK(mp); 682 mp->mnt_flag |= MNT_ACLS; 683 MNT_IUNLOCK(mp); 684 } 685 686 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 687 /* XXX: Set too late ? */ 688 MNT_ILOCK(mp); 689 mp->mnt_flag |= MNT_NFS4ACLS; 690 MNT_IUNLOCK(mp); 691 } 692 693 } 694 695 MNT_ILOCK(mp); 696 /* 697 * This is racy versus lookup, see ufs_fplookup_vexec for details. 698 */ 699 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0) 700 panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp); 701 if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0) 702 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 703 MNT_IUNLOCK(mp); 704 705 vfs_mountedfrom(mp, fspec); 706 return (0); 707 } 708 709 /* 710 * Compatibility with old mount system call. 711 */ 712 713 static int 714 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 715 { 716 struct ufs_args args; 717 int error; 718 719 if (data == NULL) 720 return (EINVAL); 721 error = copyin(data, &args, sizeof args); 722 if (error) 723 return (error); 724 725 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 726 ma = mount_arg(ma, "export", &args.export, sizeof(args.export)); 727 error = kernel_mount(ma, flags); 728 729 return (error); 730 } 731 732 /* 733 * Reload all incore data for a filesystem (used after running fsck on 734 * the root filesystem and finding things to fix). If the 'force' flag 735 * is 0, the filesystem must be mounted read-only. 736 * 737 * Things to do to update the mount: 738 * 1) invalidate all cached meta-data. 739 * 2) re-read superblock from disk. 740 * 3) re-read summary information from disk. 741 * 4) invalidate all inactive vnodes. 742 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary 743 * writers, if requested. 744 * 6) invalidate all cached file data. 745 * 7) re-read inode data for all active vnodes. 746 */ 747 int 748 ffs_reload(struct mount *mp, int flags) 749 { 750 struct vnode *vp, *mvp, *devvp; 751 struct inode *ip; 752 void *space; 753 struct buf *bp; 754 struct fs *fs, *newfs; 755 struct ufsmount *ump; 756 ufs2_daddr_t sblockloc; 757 int i, blks, error; 758 u_long size; 759 int32_t *lp; 760 761 ump = VFSTOUFS(mp); 762 763 MNT_ILOCK(mp); 764 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 765 MNT_IUNLOCK(mp); 766 return (EINVAL); 767 } 768 MNT_IUNLOCK(mp); 769 770 /* 771 * Step 1: invalidate all cached meta-data. 772 */ 773 devvp = VFSTOUFS(mp)->um_devvp; 774 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 775 if (vinvalbuf(devvp, 0, 0, 0) != 0) 776 panic("ffs_reload: dirty1"); 777 VOP_UNLOCK(devvp); 778 779 /* 780 * Step 2: re-read superblock from disk. 781 */ 782 fs = VFSTOUFS(mp)->um_fs; 783 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 784 NOCRED, &bp)) != 0) 785 return (error); 786 newfs = (struct fs *)bp->b_data; 787 if ((newfs->fs_magic != FS_UFS1_MAGIC && 788 newfs->fs_magic != FS_UFS2_MAGIC) || 789 newfs->fs_bsize > MAXBSIZE || 790 newfs->fs_bsize < sizeof(struct fs)) { 791 brelse(bp); 792 return (EIO); /* XXX needs translation */ 793 } 794 /* 795 * Preserve the summary information, read-only status, and 796 * superblock location by copying these fields into our new 797 * superblock before using it to update the existing superblock. 798 */ 799 newfs->fs_si = fs->fs_si; 800 newfs->fs_ronly = fs->fs_ronly; 801 sblockloc = fs->fs_sblockloc; 802 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 803 brelse(bp); 804 ump->um_bsize = fs->fs_bsize; 805 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 806 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 807 UFS_LOCK(ump); 808 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 809 printf("WARNING: %s: reload pending error: blocks %jd " 810 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 811 fs->fs_pendinginodes); 812 fs->fs_pendingblocks = 0; 813 fs->fs_pendinginodes = 0; 814 } 815 UFS_UNLOCK(ump); 816 817 /* 818 * Step 3: re-read summary information from disk. 819 */ 820 size = fs->fs_cssize; 821 blks = howmany(size, fs->fs_fsize); 822 if (fs->fs_contigsumsize > 0) 823 size += fs->fs_ncg * sizeof(int32_t); 824 size += fs->fs_ncg * sizeof(u_int8_t); 825 free(fs->fs_csp, M_UFSMNT); 826 space = malloc(size, M_UFSMNT, M_WAITOK); 827 fs->fs_csp = space; 828 for (i = 0; i < blks; i += fs->fs_frag) { 829 size = fs->fs_bsize; 830 if (i + fs->fs_frag > blks) 831 size = (blks - i) * fs->fs_fsize; 832 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 833 NOCRED, &bp); 834 if (error) 835 return (error); 836 bcopy(bp->b_data, space, (u_int)size); 837 space = (char *)space + size; 838 brelse(bp); 839 } 840 /* 841 * We no longer know anything about clusters per cylinder group. 842 */ 843 if (fs->fs_contigsumsize > 0) { 844 fs->fs_maxcluster = lp = space; 845 for (i = 0; i < fs->fs_ncg; i++) 846 *lp++ = fs->fs_contigsumsize; 847 space = lp; 848 } 849 size = fs->fs_ncg * sizeof(u_int8_t); 850 fs->fs_contigdirs = (u_int8_t *)space; 851 bzero(fs->fs_contigdirs, size); 852 if ((flags & FFSR_UNSUSPEND) != 0) { 853 MNT_ILOCK(mp); 854 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 855 wakeup(&mp->mnt_flag); 856 MNT_IUNLOCK(mp); 857 } 858 859 loop: 860 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 861 /* 862 * Skip syncer vnode. 863 */ 864 if (vp->v_type == VNON) { 865 VI_UNLOCK(vp); 866 continue; 867 } 868 /* 869 * Step 4: invalidate all cached file data. 870 */ 871 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 872 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 873 goto loop; 874 } 875 if (vinvalbuf(vp, 0, 0, 0)) 876 panic("ffs_reload: dirty2"); 877 /* 878 * Step 5: re-read inode data for all active vnodes. 879 */ 880 ip = VTOI(vp); 881 error = 882 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 883 (int)fs->fs_bsize, NOCRED, &bp); 884 if (error) { 885 vput(vp); 886 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 887 return (error); 888 } 889 if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) { 890 brelse(bp); 891 vput(vp); 892 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 893 return (error); 894 } 895 ip->i_effnlink = ip->i_nlink; 896 brelse(bp); 897 vput(vp); 898 } 899 return (0); 900 } 901 902 /* 903 * Common code for mount and mountroot 904 */ 905 static int 906 ffs_mountfs(odevvp, mp, td) 907 struct vnode *odevvp; 908 struct mount *mp; 909 struct thread *td; 910 { 911 struct ufsmount *ump; 912 struct fs *fs; 913 struct cdev *dev; 914 int error, i, len, ronly; 915 struct ucred *cred; 916 struct g_consumer *cp; 917 struct mount *nmp; 918 struct vnode *devvp; 919 int candelete, canspeedup; 920 off_t loc; 921 922 fs = NULL; 923 ump = NULL; 924 cred = td ? td->td_ucred : NOCRED; 925 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 926 927 devvp = mntfs_allocvp(mp, odevvp); 928 VOP_UNLOCK(odevvp); 929 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 930 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 931 dev = devvp->v_rdev; 932 KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data")); 933 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 934 (uintptr_t)mp) == 0) { 935 mntfs_freevp(devvp); 936 return (EBUSY); 937 } 938 g_topology_lock(); 939 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 940 g_topology_unlock(); 941 if (error != 0) { 942 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 943 mntfs_freevp(devvp); 944 return (error); 945 } 946 dev_ref(dev); 947 devvp->v_bufobj.bo_ops = &ffs_ops; 948 BO_LOCK(&odevvp->v_bufobj); 949 odevvp->v_bufobj.bo_flag |= BO_NOBUFS; 950 BO_UNLOCK(&odevvp->v_bufobj); 951 VOP_UNLOCK(devvp); 952 if (dev->si_iosize_max != 0) 953 mp->mnt_iosize_max = dev->si_iosize_max; 954 if (mp->mnt_iosize_max > maxphys) 955 mp->mnt_iosize_max = maxphys; 956 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 957 error = EINVAL; 958 vfs_mount_error(mp, 959 "Invalid sectorsize %d for superblock size %d", 960 cp->provider->sectorsize, SBLOCKSIZE); 961 goto out; 962 } 963 /* fetch the superblock and summary information */ 964 loc = STDSB; 965 if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0) 966 loc = STDSB_NOHASHFAIL; 967 if ((error = ffs_sbget(devvp, &fs, loc, M_UFSMNT, ffs_use_bread)) != 0) 968 goto out; 969 fs->fs_flags &= ~FS_UNCLEAN; 970 if (fs->fs_clean == 0) { 971 fs->fs_flags |= FS_UNCLEAN; 972 if (ronly || (mp->mnt_flag & MNT_FORCE) || 973 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 974 (fs->fs_flags & FS_DOSOFTDEP))) { 975 printf("WARNING: %s was not properly dismounted\n", 976 fs->fs_fsmnt); 977 } else { 978 vfs_mount_error(mp, "R/W mount of %s denied. %s%s", 979 fs->fs_fsmnt, "Filesystem is not clean - run fsck.", 980 (fs->fs_flags & FS_SUJ) == 0 ? "" : 981 " Forced mount will invalidate journal contents"); 982 error = EPERM; 983 goto out; 984 } 985 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 986 (mp->mnt_flag & MNT_FORCE)) { 987 printf("WARNING: %s: lost blocks %jd files %d\n", 988 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 989 fs->fs_pendinginodes); 990 fs->fs_pendingblocks = 0; 991 fs->fs_pendinginodes = 0; 992 } 993 } 994 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 995 printf("WARNING: %s: mount pending error: blocks %jd " 996 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 997 fs->fs_pendinginodes); 998 fs->fs_pendingblocks = 0; 999 fs->fs_pendinginodes = 0; 1000 } 1001 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 1002 #ifdef UFS_GJOURNAL 1003 /* 1004 * Get journal provider name. 1005 */ 1006 len = 1024; 1007 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); 1008 if (g_io_getattr("GJOURNAL::provider", cp, &len, 1009 mp->mnt_gjprovider) == 0) { 1010 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 1011 M_UFSMNT, M_WAITOK); 1012 MNT_ILOCK(mp); 1013 mp->mnt_flag |= MNT_GJOURNAL; 1014 MNT_IUNLOCK(mp); 1015 } else { 1016 if ((mp->mnt_flag & MNT_RDONLY) == 0) 1017 printf("WARNING: %s: GJOURNAL flag on fs " 1018 "but no gjournal provider below\n", 1019 mp->mnt_stat.f_mntonname); 1020 free(mp->mnt_gjprovider, M_UFSMNT); 1021 mp->mnt_gjprovider = NULL; 1022 } 1023 #else 1024 printf("WARNING: %s: GJOURNAL flag on fs but no " 1025 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 1026 #endif 1027 } else { 1028 mp->mnt_gjprovider = NULL; 1029 } 1030 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 1031 ump->um_cp = cp; 1032 ump->um_bo = &devvp->v_bufobj; 1033 ump->um_fs = fs; 1034 if (fs->fs_magic == FS_UFS1_MAGIC) { 1035 ump->um_fstype = UFS1; 1036 ump->um_balloc = ffs_balloc_ufs1; 1037 } else { 1038 ump->um_fstype = UFS2; 1039 ump->um_balloc = ffs_balloc_ufs2; 1040 } 1041 ump->um_blkatoff = ffs_blkatoff; 1042 ump->um_truncate = ffs_truncate; 1043 ump->um_update = ffs_update; 1044 ump->um_valloc = ffs_valloc; 1045 ump->um_vfree = ffs_vfree; 1046 ump->um_ifree = ffs_ifree; 1047 ump->um_rdonly = ffs_rdonly; 1048 ump->um_snapgone = ffs_snapgone; 1049 if ((mp->mnt_flag & MNT_UNTRUSTED) != 0) 1050 ump->um_check_blkno = ffs_check_blkno; 1051 else 1052 ump->um_check_blkno = NULL; 1053 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 1054 sx_init(&ump->um_checkpath_lock, "uchpth"); 1055 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc); 1056 fs->fs_ronly = ronly; 1057 fs->fs_active = NULL; 1058 mp->mnt_data = ump; 1059 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 1060 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 1061 nmp = NULL; 1062 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 1063 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 1064 if (nmp) 1065 vfs_rel(nmp); 1066 vfs_getnewfsid(mp); 1067 } 1068 ump->um_bsize = fs->fs_bsize; 1069 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 1070 MNT_ILOCK(mp); 1071 mp->mnt_flag |= MNT_LOCAL; 1072 MNT_IUNLOCK(mp); 1073 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 1074 #ifdef MAC 1075 MNT_ILOCK(mp); 1076 mp->mnt_flag |= MNT_MULTILABEL; 1077 MNT_IUNLOCK(mp); 1078 #else 1079 printf("WARNING: %s: multilabel flag on fs but " 1080 "no MAC support\n", mp->mnt_stat.f_mntonname); 1081 #endif 1082 } 1083 if ((fs->fs_flags & FS_ACLS) != 0) { 1084 #ifdef UFS_ACL 1085 MNT_ILOCK(mp); 1086 1087 if (mp->mnt_flag & MNT_NFS4ACLS) 1088 printf("WARNING: %s: ACLs flag on fs conflicts with " 1089 "\"nfsv4acls\" mount option; option ignored\n", 1090 mp->mnt_stat.f_mntonname); 1091 mp->mnt_flag &= ~MNT_NFS4ACLS; 1092 mp->mnt_flag |= MNT_ACLS; 1093 1094 MNT_IUNLOCK(mp); 1095 #else 1096 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 1097 mp->mnt_stat.f_mntonname); 1098 #endif 1099 } 1100 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 1101 #ifdef UFS_ACL 1102 MNT_ILOCK(mp); 1103 1104 if (mp->mnt_flag & MNT_ACLS) 1105 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 1106 "with \"acls\" mount option; option ignored\n", 1107 mp->mnt_stat.f_mntonname); 1108 mp->mnt_flag &= ~MNT_ACLS; 1109 mp->mnt_flag |= MNT_NFS4ACLS; 1110 1111 MNT_IUNLOCK(mp); 1112 #else 1113 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 1114 "ACLs support\n", mp->mnt_stat.f_mntonname); 1115 #endif 1116 } 1117 if ((fs->fs_flags & FS_TRIM) != 0) { 1118 len = sizeof(int); 1119 if (g_io_getattr("GEOM::candelete", cp, &len, 1120 &candelete) == 0) { 1121 if (candelete) 1122 ump->um_flags |= UM_CANDELETE; 1123 else 1124 printf("WARNING: %s: TRIM flag on fs but disk " 1125 "does not support TRIM\n", 1126 mp->mnt_stat.f_mntonname); 1127 } else { 1128 printf("WARNING: %s: TRIM flag on fs but disk does " 1129 "not confirm that it supports TRIM\n", 1130 mp->mnt_stat.f_mntonname); 1131 } 1132 if (((ump->um_flags) & UM_CANDELETE) != 0) { 1133 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 1134 taskqueue_thread_enqueue, &ump->um_trim_tq); 1135 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 1136 "%s trim", mp->mnt_stat.f_mntonname); 1137 ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM, 1138 &ump->um_trimlisthashsize); 1139 } 1140 } 1141 1142 len = sizeof(int); 1143 if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) { 1144 if (canspeedup) 1145 ump->um_flags |= UM_CANSPEEDUP; 1146 } 1147 1148 ump->um_mountp = mp; 1149 ump->um_dev = dev; 1150 ump->um_devvp = devvp; 1151 ump->um_odevvp = odevvp; 1152 ump->um_nindir = fs->fs_nindir; 1153 ump->um_bptrtodb = fs->fs_fsbtodb; 1154 ump->um_seqinc = fs->fs_frag; 1155 for (i = 0; i < MAXQUOTAS; i++) 1156 ump->um_quotas[i] = NULLVP; 1157 #ifdef UFS_EXTATTR 1158 ufs_extattr_uepm_init(&ump->um_extattr); 1159 #endif 1160 /* 1161 * Set FS local "last mounted on" information (NULL pad) 1162 */ 1163 bzero(fs->fs_fsmnt, MAXMNTLEN); 1164 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1165 mp->mnt_stat.f_iosize = fs->fs_bsize; 1166 1167 if (mp->mnt_flag & MNT_ROOTFS) { 1168 /* 1169 * Root mount; update timestamp in mount structure. 1170 * this will be used by the common root mount code 1171 * to update the system clock. 1172 */ 1173 mp->mnt_time = fs->fs_time; 1174 } 1175 1176 if (ronly == 0) { 1177 fs->fs_mtime = time_second; 1178 if ((fs->fs_flags & FS_DOSOFTDEP) && 1179 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1180 ffs_flushfiles(mp, FORCECLOSE, td); 1181 goto out; 1182 } 1183 if (fs->fs_snapinum[0] != 0) 1184 ffs_snapshot_mount(mp); 1185 fs->fs_fmod = 1; 1186 fs->fs_clean = 0; 1187 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1188 } 1189 /* 1190 * Initialize filesystem state information in mount struct. 1191 */ 1192 MNT_ILOCK(mp); 1193 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1194 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1195 MNT_IUNLOCK(mp); 1196 #ifdef UFS_EXTATTR 1197 #ifdef UFS_EXTATTR_AUTOSTART 1198 /* 1199 * 1200 * Auto-starting does the following: 1201 * - check for /.attribute in the fs, and extattr_start if so 1202 * - for each file in .attribute, enable that file with 1203 * an attribute of the same name. 1204 * Not clear how to report errors -- probably eat them. 1205 * This would all happen while the filesystem was busy/not 1206 * available, so would effectively be "atomic". 1207 */ 1208 (void) ufs_extattr_autostart(mp, td); 1209 #endif /* !UFS_EXTATTR_AUTOSTART */ 1210 #endif /* !UFS_EXTATTR */ 1211 return (0); 1212 out: 1213 if (fs != NULL) { 1214 free(fs->fs_csp, M_UFSMNT); 1215 free(fs->fs_si, M_UFSMNT); 1216 free(fs, M_UFSMNT); 1217 } 1218 if (cp != NULL) { 1219 g_topology_lock(); 1220 g_vfs_close(cp); 1221 g_topology_unlock(); 1222 } 1223 if (ump != NULL) { 1224 mtx_destroy(UFS_MTX(ump)); 1225 sx_destroy(&ump->um_checkpath_lock); 1226 if (mp->mnt_gjprovider != NULL) { 1227 free(mp->mnt_gjprovider, M_UFSMNT); 1228 mp->mnt_gjprovider = NULL; 1229 } 1230 MPASS(ump->um_softdep == NULL); 1231 free(ump, M_UFSMNT); 1232 mp->mnt_data = NULL; 1233 } 1234 BO_LOCK(&odevvp->v_bufobj); 1235 odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1236 BO_UNLOCK(&odevvp->v_bufobj); 1237 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1238 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1239 mntfs_freevp(devvp); 1240 dev_rel(dev); 1241 return (error); 1242 } 1243 1244 /* 1245 * A read function for use by filesystem-layer routines. 1246 */ 1247 static int 1248 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) 1249 { 1250 struct buf *bp; 1251 int error; 1252 1253 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); 1254 *bufp = malloc(size, M_UFSMNT, M_WAITOK); 1255 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, 1256 &bp)) != 0) 1257 return (error); 1258 bcopy(bp->b_data, *bufp, size); 1259 bp->b_flags |= B_INVAL | B_NOCACHE; 1260 brelse(bp); 1261 return (0); 1262 } 1263 1264 static int bigcgs = 0; 1265 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1266 1267 /* 1268 * Sanity checks for loading old filesystem superblocks. 1269 * See ffs_oldfscompat_write below for unwound actions. 1270 * 1271 * XXX - Parts get retired eventually. 1272 * Unfortunately new bits get added. 1273 */ 1274 static void 1275 ffs_oldfscompat_read(fs, ump, sblockloc) 1276 struct fs *fs; 1277 struct ufsmount *ump; 1278 ufs2_daddr_t sblockloc; 1279 { 1280 off_t maxfilesize; 1281 1282 /* 1283 * If not yet done, update fs_flags location and value of fs_sblockloc. 1284 */ 1285 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1286 fs->fs_flags = fs->fs_old_flags; 1287 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1288 fs->fs_sblockloc = sblockloc; 1289 } 1290 /* 1291 * If not yet done, update UFS1 superblock with new wider fields. 1292 */ 1293 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1294 fs->fs_maxbsize = fs->fs_bsize; 1295 fs->fs_time = fs->fs_old_time; 1296 fs->fs_size = fs->fs_old_size; 1297 fs->fs_dsize = fs->fs_old_dsize; 1298 fs->fs_csaddr = fs->fs_old_csaddr; 1299 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1300 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1301 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1302 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1303 } 1304 if (fs->fs_magic == FS_UFS1_MAGIC && 1305 fs->fs_old_inodefmt < FS_44INODEFMT) { 1306 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1307 fs->fs_qbmask = ~fs->fs_bmask; 1308 fs->fs_qfmask = ~fs->fs_fmask; 1309 } 1310 if (fs->fs_magic == FS_UFS1_MAGIC) { 1311 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1312 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1313 if (fs->fs_maxfilesize > maxfilesize) 1314 fs->fs_maxfilesize = maxfilesize; 1315 } 1316 /* Compatibility for old filesystems */ 1317 if (fs->fs_avgfilesize <= 0) 1318 fs->fs_avgfilesize = AVFILESIZ; 1319 if (fs->fs_avgfpdir <= 0) 1320 fs->fs_avgfpdir = AFPDIR; 1321 if (bigcgs) { 1322 fs->fs_save_cgsize = fs->fs_cgsize; 1323 fs->fs_cgsize = fs->fs_bsize; 1324 } 1325 } 1326 1327 /* 1328 * Unwinding superblock updates for old filesystems. 1329 * See ffs_oldfscompat_read above for details. 1330 * 1331 * XXX - Parts get retired eventually. 1332 * Unfortunately new bits get added. 1333 */ 1334 void 1335 ffs_oldfscompat_write(fs, ump) 1336 struct fs *fs; 1337 struct ufsmount *ump; 1338 { 1339 1340 /* 1341 * Copy back UFS2 updated fields that UFS1 inspects. 1342 */ 1343 if (fs->fs_magic == FS_UFS1_MAGIC) { 1344 fs->fs_old_time = fs->fs_time; 1345 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1346 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1347 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1348 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1349 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1350 } 1351 if (bigcgs) { 1352 fs->fs_cgsize = fs->fs_save_cgsize; 1353 fs->fs_save_cgsize = 0; 1354 } 1355 } 1356 1357 /* 1358 * unmount system call 1359 */ 1360 static int 1361 ffs_unmount(mp, mntflags) 1362 struct mount *mp; 1363 int mntflags; 1364 { 1365 struct thread *td; 1366 struct ufsmount *ump = VFSTOUFS(mp); 1367 struct fs *fs; 1368 int error, flags, susp; 1369 #ifdef UFS_EXTATTR 1370 int e_restart; 1371 #endif 1372 1373 flags = 0; 1374 td = curthread; 1375 fs = ump->um_fs; 1376 if (mntflags & MNT_FORCE) 1377 flags |= FORCECLOSE; 1378 susp = fs->fs_ronly == 0; 1379 #ifdef UFS_EXTATTR 1380 if ((error = ufs_extattr_stop(mp, td))) { 1381 if (error != EOPNOTSUPP) 1382 printf("WARNING: unmount %s: ufs_extattr_stop " 1383 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1384 error); 1385 e_restart = 0; 1386 } else { 1387 ufs_extattr_uepm_destroy(&ump->um_extattr); 1388 e_restart = 1; 1389 } 1390 #endif 1391 if (susp) { 1392 error = vfs_write_suspend_umnt(mp); 1393 if (error != 0) 1394 goto fail1; 1395 } 1396 if (MOUNTEDSOFTDEP(mp)) 1397 error = softdep_flushfiles(mp, flags, td); 1398 else 1399 error = ffs_flushfiles(mp, flags, td); 1400 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) 1401 goto fail; 1402 1403 UFS_LOCK(ump); 1404 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1405 printf("WARNING: unmount %s: pending error: blocks %jd " 1406 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1407 fs->fs_pendinginodes); 1408 fs->fs_pendingblocks = 0; 1409 fs->fs_pendinginodes = 0; 1410 } 1411 UFS_UNLOCK(ump); 1412 if (MOUNTEDSOFTDEP(mp)) 1413 softdep_unmount(mp); 1414 MPASS(ump->um_softdep == NULL); 1415 if (fs->fs_ronly == 0) { 1416 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1417 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1418 if (ffs_fsfail_cleanup(ump, error)) 1419 error = 0; 1420 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) { 1421 fs->fs_clean = 0; 1422 goto fail; 1423 } 1424 } 1425 if (susp) 1426 vfs_write_resume(mp, VR_START_WRITE); 1427 if (ump->um_trim_tq != NULL) { 1428 MPASS(ump->um_trim_inflight == 0); 1429 taskqueue_free(ump->um_trim_tq); 1430 free (ump->um_trimhash, M_TRIM); 1431 } 1432 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1433 g_topology_lock(); 1434 g_vfs_close(ump->um_cp); 1435 g_topology_unlock(); 1436 BO_LOCK(&ump->um_odevvp->v_bufobj); 1437 ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1438 BO_UNLOCK(&ump->um_odevvp->v_bufobj); 1439 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1440 mntfs_freevp(ump->um_devvp); 1441 vrele(ump->um_odevvp); 1442 dev_rel(ump->um_dev); 1443 mtx_destroy(UFS_MTX(ump)); 1444 sx_destroy(&ump->um_checkpath_lock); 1445 if (mp->mnt_gjprovider != NULL) { 1446 free(mp->mnt_gjprovider, M_UFSMNT); 1447 mp->mnt_gjprovider = NULL; 1448 } 1449 free(fs->fs_csp, M_UFSMNT); 1450 free(fs->fs_si, M_UFSMNT); 1451 free(fs, M_UFSMNT); 1452 free(ump, M_UFSMNT); 1453 mp->mnt_data = NULL; 1454 MNT_ILOCK(mp); 1455 mp->mnt_flag &= ~MNT_LOCAL; 1456 MNT_IUNLOCK(mp); 1457 if (td->td_su == mp) { 1458 td->td_su = NULL; 1459 vfs_rel(mp); 1460 } 1461 return (error); 1462 1463 fail: 1464 if (susp) 1465 vfs_write_resume(mp, VR_START_WRITE); 1466 fail1: 1467 #ifdef UFS_EXTATTR 1468 if (e_restart) { 1469 ufs_extattr_uepm_init(&ump->um_extattr); 1470 #ifdef UFS_EXTATTR_AUTOSTART 1471 (void) ufs_extattr_autostart(mp, td); 1472 #endif 1473 } 1474 #endif 1475 1476 return (error); 1477 } 1478 1479 /* 1480 * Flush out all the files in a filesystem. 1481 */ 1482 int 1483 ffs_flushfiles(mp, flags, td) 1484 struct mount *mp; 1485 int flags; 1486 struct thread *td; 1487 { 1488 struct ufsmount *ump; 1489 int qerror, error; 1490 1491 ump = VFSTOUFS(mp); 1492 qerror = 0; 1493 #ifdef QUOTA 1494 if (mp->mnt_flag & MNT_QUOTA) { 1495 int i; 1496 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1497 if (error) 1498 return (error); 1499 for (i = 0; i < MAXQUOTAS; i++) { 1500 error = quotaoff(td, mp, i); 1501 if (error != 0) { 1502 if ((flags & EARLYFLUSH) == 0) 1503 return (error); 1504 else 1505 qerror = error; 1506 } 1507 } 1508 1509 /* 1510 * Here we fall through to vflush again to ensure that 1511 * we have gotten rid of all the system vnodes, unless 1512 * quotas must not be closed. 1513 */ 1514 } 1515 #endif 1516 /* devvp is not locked there */ 1517 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1518 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1519 return (error); 1520 ffs_snapshot_unmount(mp); 1521 flags |= FORCECLOSE; 1522 /* 1523 * Here we fall through to vflush again to ensure 1524 * that we have gotten rid of all the system vnodes. 1525 */ 1526 } 1527 1528 /* 1529 * Do not close system files if quotas were not closed, to be 1530 * able to sync the remaining dquots. The freeblks softupdate 1531 * workitems might hold a reference on a dquot, preventing 1532 * quotaoff() from completing. Next round of 1533 * softdep_flushworklist() iteration should process the 1534 * blockers, allowing the next run of quotaoff() to finally 1535 * flush held dquots. 1536 * 1537 * Otherwise, flush all the files. 1538 */ 1539 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1540 return (error); 1541 1542 /* 1543 * If this is a forcible unmount and there were any files that 1544 * were unlinked but still open, then vflush() will have 1545 * truncated and freed those files, which might have started 1546 * some trim work. Wait here for any trims to complete 1547 * and process the blkfrees which follow the trims. 1548 * This may create more dirty devvp buffers and softdep deps. 1549 */ 1550 if (ump->um_trim_tq != NULL) { 1551 while (ump->um_trim_inflight != 0) 1552 pause("ufsutr", hz); 1553 taskqueue_drain_all(ump->um_trim_tq); 1554 } 1555 1556 /* 1557 * Flush filesystem metadata. 1558 */ 1559 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1560 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1561 VOP_UNLOCK(ump->um_devvp); 1562 return (error); 1563 } 1564 1565 /* 1566 * Get filesystem statistics. 1567 */ 1568 static int 1569 ffs_statfs(mp, sbp) 1570 struct mount *mp; 1571 struct statfs *sbp; 1572 { 1573 struct ufsmount *ump; 1574 struct fs *fs; 1575 1576 ump = VFSTOUFS(mp); 1577 fs = ump->um_fs; 1578 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1579 panic("ffs_statfs"); 1580 sbp->f_version = STATFS_VERSION; 1581 sbp->f_bsize = fs->fs_fsize; 1582 sbp->f_iosize = fs->fs_bsize; 1583 sbp->f_blocks = fs->fs_dsize; 1584 UFS_LOCK(ump); 1585 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1586 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1587 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1588 dbtofsb(fs, fs->fs_pendingblocks); 1589 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; 1590 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1591 UFS_UNLOCK(ump); 1592 sbp->f_namemax = UFS_MAXNAMLEN; 1593 return (0); 1594 } 1595 1596 static bool 1597 sync_doupdate(struct inode *ip) 1598 { 1599 1600 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1601 IN_UPDATE)) != 0); 1602 } 1603 1604 static int 1605 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused) 1606 { 1607 struct inode *ip; 1608 1609 /* 1610 * Flags are safe to access because ->v_data invalidation 1611 * is held off by listmtx. 1612 */ 1613 if (vp->v_type == VNON) 1614 return (false); 1615 ip = VTOI(vp); 1616 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) 1617 return (false); 1618 return (true); 1619 } 1620 1621 /* 1622 * For a lazy sync, we only care about access times, quotas and the 1623 * superblock. Other filesystem changes are already converted to 1624 * cylinder group blocks or inode blocks updates and are written to 1625 * disk by syncer. 1626 */ 1627 static int 1628 ffs_sync_lazy(mp) 1629 struct mount *mp; 1630 { 1631 struct vnode *mvp, *vp; 1632 struct inode *ip; 1633 int allerror, error; 1634 1635 allerror = 0; 1636 if ((mp->mnt_flag & MNT_NOATIME) != 0) { 1637 #ifdef QUOTA 1638 qsync(mp); 1639 #endif 1640 goto sbupdate; 1641 } 1642 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) { 1643 if (vp->v_type == VNON) { 1644 VI_UNLOCK(vp); 1645 continue; 1646 } 1647 ip = VTOI(vp); 1648 1649 /* 1650 * The IN_ACCESS flag is converted to IN_MODIFIED by 1651 * ufs_close() and ufs_getattr() by the calls to 1652 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1653 * Test also all the other timestamp flags too, to pick up 1654 * any other cases that could be missed. 1655 */ 1656 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1657 VI_UNLOCK(vp); 1658 continue; 1659 } 1660 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0) 1661 continue; 1662 #ifdef QUOTA 1663 qsyncvp(vp); 1664 #endif 1665 if (sync_doupdate(ip)) 1666 error = ffs_update(vp, 0); 1667 if (error != 0) 1668 allerror = error; 1669 vput(vp); 1670 } 1671 sbupdate: 1672 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1673 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1674 allerror = error; 1675 return (allerror); 1676 } 1677 1678 /* 1679 * Go through the disk queues to initiate sandbagged IO; 1680 * go through the inodes to write those that have been modified; 1681 * initiate the writing of the super block if it has been modified. 1682 * 1683 * Note: we are always called with the filesystem marked busy using 1684 * vfs_busy(). 1685 */ 1686 static int 1687 ffs_sync(mp, waitfor) 1688 struct mount *mp; 1689 int waitfor; 1690 { 1691 struct vnode *mvp, *vp, *devvp; 1692 struct thread *td; 1693 struct inode *ip; 1694 struct ufsmount *ump = VFSTOUFS(mp); 1695 struct fs *fs; 1696 int error, count, lockreq, allerror = 0; 1697 int suspend; 1698 int suspended; 1699 int secondary_writes; 1700 int secondary_accwrites; 1701 int softdep_deps; 1702 int softdep_accdeps; 1703 struct bufobj *bo; 1704 1705 suspend = 0; 1706 suspended = 0; 1707 td = curthread; 1708 fs = ump->um_fs; 1709 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) 1710 panic("%s: ffs_sync: modification on read-only filesystem", 1711 fs->fs_fsmnt); 1712 if (waitfor == MNT_LAZY) { 1713 if (!rebooting) 1714 return (ffs_sync_lazy(mp)); 1715 waitfor = MNT_NOWAIT; 1716 } 1717 1718 /* 1719 * Write back each (modified) inode. 1720 */ 1721 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1722 if (waitfor == MNT_SUSPEND) { 1723 suspend = 1; 1724 waitfor = MNT_WAIT; 1725 } 1726 if (waitfor == MNT_WAIT) 1727 lockreq = LK_EXCLUSIVE; 1728 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1729 loop: 1730 /* Grab snapshot of secondary write counts */ 1731 MNT_ILOCK(mp); 1732 secondary_writes = mp->mnt_secondary_writes; 1733 secondary_accwrites = mp->mnt_secondary_accwrites; 1734 MNT_IUNLOCK(mp); 1735 1736 /* Grab snapshot of softdep dependency counts */ 1737 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1738 1739 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1740 /* 1741 * Depend on the vnode interlock to keep things stable enough 1742 * for a quick test. Since there might be hundreds of 1743 * thousands of vnodes, we cannot afford even a subroutine 1744 * call unless there's a good chance that we have work to do. 1745 */ 1746 if (vp->v_type == VNON) { 1747 VI_UNLOCK(vp); 1748 continue; 1749 } 1750 ip = VTOI(vp); 1751 if ((ip->i_flag & 1752 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1753 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1754 VI_UNLOCK(vp); 1755 continue; 1756 } 1757 if ((error = vget(vp, lockreq)) != 0) { 1758 if (error == ENOENT || error == ENOLCK) { 1759 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1760 goto loop; 1761 } 1762 continue; 1763 } 1764 #ifdef QUOTA 1765 qsyncvp(vp); 1766 #endif 1767 for (;;) { 1768 error = ffs_syncvnode(vp, waitfor, 0); 1769 if (error == ERELOOKUP) 1770 continue; 1771 if (error != 0) 1772 allerror = error; 1773 break; 1774 } 1775 vput(vp); 1776 } 1777 /* 1778 * Force stale filesystem control information to be flushed. 1779 */ 1780 if (waitfor == MNT_WAIT || rebooting) { 1781 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1782 allerror = error; 1783 if (ffs_fsfail_cleanup(ump, allerror)) 1784 allerror = 0; 1785 /* Flushed work items may create new vnodes to clean */ 1786 if (allerror == 0 && count) 1787 goto loop; 1788 } 1789 1790 devvp = ump->um_devvp; 1791 bo = &devvp->v_bufobj; 1792 BO_LOCK(bo); 1793 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1794 BO_UNLOCK(bo); 1795 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1796 error = VOP_FSYNC(devvp, waitfor, td); 1797 VOP_UNLOCK(devvp); 1798 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1799 error = ffs_sbupdate(ump, waitfor, 0); 1800 if (error != 0) 1801 allerror = error; 1802 if (ffs_fsfail_cleanup(ump, allerror)) 1803 allerror = 0; 1804 if (allerror == 0 && waitfor == MNT_WAIT) 1805 goto loop; 1806 } else if (suspend != 0) { 1807 if (softdep_check_suspend(mp, 1808 devvp, 1809 softdep_deps, 1810 softdep_accdeps, 1811 secondary_writes, 1812 secondary_accwrites) != 0) { 1813 MNT_IUNLOCK(mp); 1814 goto loop; /* More work needed */ 1815 } 1816 mtx_assert(MNT_MTX(mp), MA_OWNED); 1817 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1818 MNT_IUNLOCK(mp); 1819 suspended = 1; 1820 } else 1821 BO_UNLOCK(bo); 1822 /* 1823 * Write back modified superblock. 1824 */ 1825 if (fs->fs_fmod != 0 && 1826 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1827 allerror = error; 1828 if (ffs_fsfail_cleanup(ump, allerror)) 1829 allerror = 0; 1830 return (allerror); 1831 } 1832 1833 int 1834 ffs_vget(mp, ino, flags, vpp) 1835 struct mount *mp; 1836 ino_t ino; 1837 int flags; 1838 struct vnode **vpp; 1839 { 1840 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1841 } 1842 1843 int 1844 ffs_vgetf(mp, ino, flags, vpp, ffs_flags) 1845 struct mount *mp; 1846 ino_t ino; 1847 int flags; 1848 struct vnode **vpp; 1849 int ffs_flags; 1850 { 1851 struct fs *fs; 1852 struct inode *ip; 1853 struct ufsmount *ump; 1854 struct buf *bp; 1855 struct vnode *vp; 1856 daddr_t dbn; 1857 int error; 1858 1859 MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 || 1860 (flags & LK_EXCLUSIVE) != 0); 1861 1862 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1863 if (error != 0) 1864 return (error); 1865 if (*vpp != NULL) { 1866 if ((ffs_flags & FFSV_REPLACE) == 0 || 1867 ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 || 1868 !VN_IS_DOOMED(*vpp))) 1869 return (0); 1870 vgone(*vpp); 1871 vput(*vpp); 1872 } 1873 1874 /* 1875 * We must promote to an exclusive lock for vnode creation. This 1876 * can happen if lookup is passed LOCKSHARED. 1877 */ 1878 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1879 flags &= ~LK_TYPE_MASK; 1880 flags |= LK_EXCLUSIVE; 1881 } 1882 1883 /* 1884 * We do not lock vnode creation as it is believed to be too 1885 * expensive for such rare case as simultaneous creation of vnode 1886 * for same ino by different processes. We just allow them to race 1887 * and check later to decide who wins. Let the race begin! 1888 */ 1889 1890 ump = VFSTOUFS(mp); 1891 fs = ump->um_fs; 1892 ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO); 1893 1894 /* Allocate a new vnode/inode. */ 1895 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 1896 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1897 if (error) { 1898 *vpp = NULL; 1899 uma_zfree_smr(uma_inode, ip); 1900 return (error); 1901 } 1902 /* 1903 * FFS supports recursive locking. 1904 */ 1905 lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); 1906 VN_LOCK_AREC(vp); 1907 vp->v_data = ip; 1908 vp->v_bufobj.bo_bsize = fs->fs_bsize; 1909 ip->i_vnode = vp; 1910 ip->i_ump = ump; 1911 ip->i_number = ino; 1912 ip->i_ea_refs = 0; 1913 ip->i_nextclustercg = -1; 1914 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; 1915 ip->i_mode = 0; /* ensure error cases below throw away vnode */ 1916 cluster_init_vn(&ip->i_clusterw); 1917 #ifdef DIAGNOSTIC 1918 ufs_init_trackers(ip); 1919 #endif 1920 #ifdef QUOTA 1921 { 1922 int i; 1923 for (i = 0; i < MAXQUOTAS; i++) 1924 ip->i_dquot[i] = NODQUOT; 1925 } 1926 #endif 1927 1928 if (ffs_flags & FFSV_FORCEINSMQ) 1929 vp->v_vflag |= VV_FORCEINSMQ; 1930 error = insmntque(vp, mp); 1931 if (error != 0) { 1932 uma_zfree_smr(uma_inode, ip); 1933 *vpp = NULL; 1934 return (error); 1935 } 1936 vp->v_vflag &= ~VV_FORCEINSMQ; 1937 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 1938 if (error != 0) 1939 return (error); 1940 if (*vpp != NULL) { 1941 /* 1942 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set) 1943 * operate on empty inode, which must not be found by 1944 * other threads until fully filled. Vnode for empty 1945 * inode must be not re-inserted on the hash by other 1946 * thread, after removal by us at the beginning. 1947 */ 1948 MPASS((ffs_flags & FFSV_REPLACE) == 0); 1949 return (0); 1950 } 1951 1952 /* Read in the disk contents for the inode, copy into the inode. */ 1953 dbn = fsbtodb(fs, ino_to_fsba(fs, ino)); 1954 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize, 1955 NULL, NULL, 0, NOCRED, 0, NULL, &bp); 1956 if (error != 0) { 1957 /* 1958 * The inode does not contain anything useful, so it would 1959 * be misleading to leave it on its hash chain. With mode 1960 * still zero, it will be unlinked and returned to the free 1961 * list by vput(). 1962 */ 1963 vgone(vp); 1964 vput(vp); 1965 *vpp = NULL; 1966 return (error); 1967 } 1968 if (I_IS_UFS1(ip)) 1969 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 1970 else 1971 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1972 if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) { 1973 bqrelse(bp); 1974 vgone(vp); 1975 vput(vp); 1976 *vpp = NULL; 1977 return (error); 1978 } 1979 if (DOINGSOFTDEP(vp) && (!fs->fs_ronly || 1980 (ffs_flags & FFSV_FORCEINODEDEP) != 0)) 1981 softdep_load_inodeblock(ip); 1982 else 1983 ip->i_effnlink = ip->i_nlink; 1984 bqrelse(bp); 1985 1986 /* 1987 * Initialize the vnode from the inode, check for aliases. 1988 * Note that the underlying vnode may have changed. 1989 */ 1990 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, 1991 &vp); 1992 if (error) { 1993 vgone(vp); 1994 vput(vp); 1995 *vpp = NULL; 1996 return (error); 1997 } 1998 1999 /* 2000 * Finish inode initialization. 2001 */ 2002 if (vp->v_type != VFIFO) { 2003 /* FFS supports shared locking for all files except fifos. */ 2004 VN_LOCK_ASHARE(vp); 2005 } 2006 2007 /* 2008 * Set up a generation number for this inode if it does not 2009 * already have one. This should only happen on old filesystems. 2010 */ 2011 if (ip->i_gen == 0) { 2012 while (ip->i_gen == 0) 2013 ip->i_gen = arc4random(); 2014 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 2015 UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 2016 DIP_SET(ip, i_gen, ip->i_gen); 2017 } 2018 } 2019 #ifdef MAC 2020 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 2021 /* 2022 * If this vnode is already allocated, and we're running 2023 * multi-label, attempt to perform a label association 2024 * from the extended attributes on the inode. 2025 */ 2026 error = mac_vnode_associate_extattr(mp, vp); 2027 if (error) { 2028 /* ufs_inactive will release ip->i_devvp ref. */ 2029 vgone(vp); 2030 vput(vp); 2031 *vpp = NULL; 2032 return (error); 2033 } 2034 } 2035 #endif 2036 2037 *vpp = vp; 2038 return (0); 2039 } 2040 2041 /* 2042 * File handle to vnode 2043 * 2044 * Have to be really careful about stale file handles: 2045 * - check that the inode number is valid 2046 * - for UFS2 check that the inode number is initialized 2047 * - call ffs_vget() to get the locked inode 2048 * - check for an unallocated inode (i_mode == 0) 2049 * - check that the given client host has export rights and return 2050 * those rights via. exflagsp and credanonp 2051 */ 2052 static int 2053 ffs_fhtovp(mp, fhp, flags, vpp) 2054 struct mount *mp; 2055 struct fid *fhp; 2056 int flags; 2057 struct vnode **vpp; 2058 { 2059 struct ufid *ufhp; 2060 2061 ufhp = (struct ufid *)fhp; 2062 return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags, 2063 vpp, 0)); 2064 } 2065 2066 int 2067 ffs_inotovp(mp, ino, gen, lflags, vpp, ffs_flags) 2068 struct mount *mp; 2069 ino_t ino; 2070 u_int64_t gen; 2071 int lflags; 2072 struct vnode **vpp; 2073 int ffs_flags; 2074 { 2075 struct ufsmount *ump; 2076 struct vnode *nvp; 2077 struct inode *ip; 2078 struct fs *fs; 2079 struct cg *cgp; 2080 struct buf *bp; 2081 u_int cg; 2082 int error; 2083 2084 ump = VFSTOUFS(mp); 2085 fs = ump->um_fs; 2086 *vpp = NULL; 2087 2088 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) 2089 return (ESTALE); 2090 2091 /* 2092 * Need to check if inode is initialized because UFS2 does lazy 2093 * initialization and nfs_fhtovp can offer arbitrary inode numbers. 2094 */ 2095 if (fs->fs_magic == FS_UFS2_MAGIC) { 2096 cg = ino_to_cg(fs, ino); 2097 error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp); 2098 if (error != 0) 2099 return (error); 2100 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { 2101 brelse(bp); 2102 return (ESTALE); 2103 } 2104 brelse(bp); 2105 } 2106 2107 error = ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags); 2108 if (error != 0) 2109 return (error); 2110 2111 ip = VTOI(nvp); 2112 if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) { 2113 if (ip->i_mode == 0) 2114 vgone(nvp); 2115 vput(nvp); 2116 return (ESTALE); 2117 } 2118 2119 vnode_create_vobject(nvp, DIP(ip, i_size), curthread); 2120 *vpp = nvp; 2121 return (0); 2122 } 2123 2124 /* 2125 * Initialize the filesystem. 2126 */ 2127 static int 2128 ffs_init(vfsp) 2129 struct vfsconf *vfsp; 2130 { 2131 2132 ffs_susp_initialize(); 2133 softdep_initialize(); 2134 return (ufs_init(vfsp)); 2135 } 2136 2137 /* 2138 * Undo the work of ffs_init(). 2139 */ 2140 static int 2141 ffs_uninit(vfsp) 2142 struct vfsconf *vfsp; 2143 { 2144 int ret; 2145 2146 ret = ufs_uninit(vfsp); 2147 softdep_uninitialize(); 2148 ffs_susp_uninitialize(); 2149 taskqueue_drain_all(taskqueue_thread); 2150 return (ret); 2151 } 2152 2153 /* 2154 * Structure used to pass information from ffs_sbupdate to its 2155 * helper routine ffs_use_bwrite. 2156 */ 2157 struct devfd { 2158 struct ufsmount *ump; 2159 struct buf *sbbp; 2160 int waitfor; 2161 int suspended; 2162 int error; 2163 }; 2164 2165 /* 2166 * Write a superblock and associated information back to disk. 2167 */ 2168 int 2169 ffs_sbupdate(ump, waitfor, suspended) 2170 struct ufsmount *ump; 2171 int waitfor; 2172 int suspended; 2173 { 2174 struct fs *fs; 2175 struct buf *sbbp; 2176 struct devfd devfd; 2177 2178 fs = ump->um_fs; 2179 if (fs->fs_ronly == 1 && 2180 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 2181 (MNT_RDONLY | MNT_UPDATE)) 2182 panic("ffs_sbupdate: write read-only filesystem"); 2183 /* 2184 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 2185 */ 2186 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 2187 (int)fs->fs_sbsize, 0, 0, 0); 2188 /* 2189 * Initialize info needed for write function. 2190 */ 2191 devfd.ump = ump; 2192 devfd.sbbp = sbbp; 2193 devfd.waitfor = waitfor; 2194 devfd.suspended = suspended; 2195 devfd.error = 0; 2196 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); 2197 } 2198 2199 /* 2200 * Write function for use by filesystem-layer routines. 2201 */ 2202 static int 2203 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) 2204 { 2205 struct devfd *devfdp; 2206 struct ufsmount *ump; 2207 struct buf *bp; 2208 struct fs *fs; 2209 int error; 2210 2211 devfdp = devfd; 2212 ump = devfdp->ump; 2213 fs = ump->um_fs; 2214 /* 2215 * Writing the superblock summary information. 2216 */ 2217 if (loc != fs->fs_sblockloc) { 2218 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); 2219 bcopy(buf, bp->b_data, (u_int)size); 2220 if (devfdp->suspended) 2221 bp->b_flags |= B_VALIDSUSPWRT; 2222 if (devfdp->waitfor != MNT_WAIT) 2223 bawrite(bp); 2224 else if ((error = bwrite(bp)) != 0) 2225 devfdp->error = error; 2226 return (0); 2227 } 2228 /* 2229 * Writing the superblock itself. We need to do special checks for it. 2230 */ 2231 bp = devfdp->sbbp; 2232 if (ffs_fsfail_cleanup(ump, devfdp->error)) 2233 devfdp->error = 0; 2234 if (devfdp->error != 0) { 2235 brelse(bp); 2236 return (devfdp->error); 2237 } 2238 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 2239 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2240 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2241 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 2242 fs->fs_sblockloc = SBLOCK_UFS1; 2243 } 2244 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 2245 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2246 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2247 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 2248 fs->fs_sblockloc = SBLOCK_UFS2; 2249 } 2250 if (MOUNTEDSOFTDEP(ump->um_mountp)) 2251 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 2252 UFS_LOCK(ump); 2253 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 2254 UFS_UNLOCK(ump); 2255 fs = (struct fs *)bp->b_data; 2256 fs->fs_fmod = 0; 2257 ffs_oldfscompat_write(fs, ump); 2258 fs->fs_si = NULL; 2259 /* Recalculate the superblock hash */ 2260 fs->fs_ckhash = ffs_calc_sbhash(fs); 2261 if (devfdp->suspended) 2262 bp->b_flags |= B_VALIDSUSPWRT; 2263 if (devfdp->waitfor != MNT_WAIT) 2264 bawrite(bp); 2265 else if ((error = bwrite(bp)) != 0) 2266 devfdp->error = error; 2267 return (devfdp->error); 2268 } 2269 2270 static int 2271 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 2272 int attrnamespace, const char *attrname) 2273 { 2274 2275 #ifdef UFS_EXTATTR 2276 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 2277 attrname)); 2278 #else 2279 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 2280 attrname)); 2281 #endif 2282 } 2283 2284 static void 2285 ffs_ifree(struct ufsmount *ump, struct inode *ip) 2286 { 2287 2288 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 2289 uma_zfree(uma_ufs1, ip->i_din1); 2290 else if (ip->i_din2 != NULL) 2291 uma_zfree(uma_ufs2, ip->i_din2); 2292 uma_zfree_smr(uma_inode, ip); 2293 } 2294 2295 static int dobkgrdwrite = 1; 2296 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2297 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2298 2299 /* 2300 * Complete a background write started from bwrite. 2301 */ 2302 static void 2303 ffs_backgroundwritedone(struct buf *bp) 2304 { 2305 struct bufobj *bufobj; 2306 struct buf *origbp; 2307 2308 #ifdef SOFTUPDATES 2309 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0) 2310 softdep_handle_error(bp); 2311 #endif 2312 2313 /* 2314 * Find the original buffer that we are writing. 2315 */ 2316 bufobj = bp->b_bufobj; 2317 BO_LOCK(bufobj); 2318 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2319 panic("backgroundwritedone: lost buffer"); 2320 2321 /* 2322 * We should mark the cylinder group buffer origbp as 2323 * dirty, to not lose the failed write. 2324 */ 2325 if ((bp->b_ioflags & BIO_ERROR) != 0) 2326 origbp->b_vflags |= BV_BKGRDERR; 2327 BO_UNLOCK(bufobj); 2328 /* 2329 * Process dependencies then return any unfinished ones. 2330 */ 2331 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2332 buf_complete(bp); 2333 #ifdef SOFTUPDATES 2334 if (!LIST_EMPTY(&bp->b_dep)) 2335 softdep_move_dependencies(bp, origbp); 2336 #endif 2337 /* 2338 * This buffer is marked B_NOCACHE so when it is released 2339 * by biodone it will be tossed. Clear B_IOSTARTED in case of error. 2340 */ 2341 bp->b_flags |= B_NOCACHE; 2342 bp->b_flags &= ~(B_CACHE | B_IOSTARTED); 2343 pbrelvp(bp); 2344 2345 /* 2346 * Prevent brelse() from trying to keep and re-dirtying bp on 2347 * errors. It causes b_bufobj dereference in 2348 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2349 * pbrelvp() above. 2350 */ 2351 if ((bp->b_ioflags & BIO_ERROR) != 0) 2352 bp->b_flags |= B_INVAL; 2353 bufdone(bp); 2354 BO_LOCK(bufobj); 2355 /* 2356 * Clear the BV_BKGRDINPROG flag in the original buffer 2357 * and awaken it if it is waiting for the write to complete. 2358 * If BV_BKGRDINPROG is not set in the original buffer it must 2359 * have been released and re-instantiated - which is not legal. 2360 */ 2361 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2362 ("backgroundwritedone: lost buffer2")); 2363 origbp->b_vflags &= ~BV_BKGRDINPROG; 2364 if (origbp->b_vflags & BV_BKGRDWAIT) { 2365 origbp->b_vflags &= ~BV_BKGRDWAIT; 2366 wakeup(&origbp->b_xflags); 2367 } 2368 BO_UNLOCK(bufobj); 2369 } 2370 2371 /* 2372 * Write, release buffer on completion. (Done by iodone 2373 * if async). Do not bother writing anything if the buffer 2374 * is invalid. 2375 * 2376 * Note that we set B_CACHE here, indicating that buffer is 2377 * fully valid and thus cacheable. This is true even of NFS 2378 * now so we set it generally. This could be set either here 2379 * or in biodone() since the I/O is synchronous. We put it 2380 * here. 2381 */ 2382 static int 2383 ffs_bufwrite(struct buf *bp) 2384 { 2385 struct buf *newbp; 2386 struct cg *cgp; 2387 2388 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2389 if (bp->b_flags & B_INVAL) { 2390 brelse(bp); 2391 return (0); 2392 } 2393 2394 if (!BUF_ISLOCKED(bp)) 2395 panic("bufwrite: buffer is not busy???"); 2396 /* 2397 * If a background write is already in progress, delay 2398 * writing this block if it is asynchronous. Otherwise 2399 * wait for the background write to complete. 2400 */ 2401 BO_LOCK(bp->b_bufobj); 2402 if (bp->b_vflags & BV_BKGRDINPROG) { 2403 if (bp->b_flags & B_ASYNC) { 2404 BO_UNLOCK(bp->b_bufobj); 2405 bdwrite(bp); 2406 return (0); 2407 } 2408 bp->b_vflags |= BV_BKGRDWAIT; 2409 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2410 "bwrbg", 0); 2411 if (bp->b_vflags & BV_BKGRDINPROG) 2412 panic("bufwrite: still writing"); 2413 } 2414 bp->b_vflags &= ~BV_BKGRDERR; 2415 BO_UNLOCK(bp->b_bufobj); 2416 2417 /* 2418 * If this buffer is marked for background writing and we 2419 * do not have to wait for it, make a copy and write the 2420 * copy so as to leave this buffer ready for further use. 2421 * 2422 * This optimization eats a lot of memory. If we have a page 2423 * or buffer shortfall we can't do it. 2424 */ 2425 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2426 (bp->b_flags & B_ASYNC) && 2427 !vm_page_count_severe() && 2428 !buf_dirty_count_severe()) { 2429 KASSERT(bp->b_iodone == NULL, 2430 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2431 2432 /* get a new block */ 2433 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2434 if (newbp == NULL) 2435 goto normal_write; 2436 2437 KASSERT(buf_mapped(bp), ("Unmapped cg")); 2438 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2439 BO_LOCK(bp->b_bufobj); 2440 bp->b_vflags |= BV_BKGRDINPROG; 2441 BO_UNLOCK(bp->b_bufobj); 2442 newbp->b_xflags |= 2443 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; 2444 newbp->b_lblkno = bp->b_lblkno; 2445 newbp->b_blkno = bp->b_blkno; 2446 newbp->b_offset = bp->b_offset; 2447 newbp->b_iodone = ffs_backgroundwritedone; 2448 newbp->b_flags |= B_ASYNC; 2449 newbp->b_flags &= ~B_INVAL; 2450 pbgetvp(bp->b_vp, newbp); 2451 2452 #ifdef SOFTUPDATES 2453 /* 2454 * Move over the dependencies. If there are rollbacks, 2455 * leave the parent buffer dirtied as it will need to 2456 * be written again. 2457 */ 2458 if (LIST_EMPTY(&bp->b_dep) || 2459 softdep_move_dependencies(bp, newbp) == 0) 2460 bundirty(bp); 2461 #else 2462 bundirty(bp); 2463 #endif 2464 2465 /* 2466 * Initiate write on the copy, release the original. The 2467 * BKGRDINPROG flag prevents it from going away until 2468 * the background write completes. We have to recalculate 2469 * its check hash in case the buffer gets freed and then 2470 * reconstituted from the buffer cache during a later read. 2471 */ 2472 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2473 cgp = (struct cg *)bp->b_data; 2474 cgp->cg_ckhash = 0; 2475 cgp->cg_ckhash = 2476 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2477 } 2478 bqrelse(bp); 2479 bp = newbp; 2480 } else 2481 /* Mark the buffer clean */ 2482 bundirty(bp); 2483 2484 /* Let the normal bufwrite do the rest for us */ 2485 normal_write: 2486 /* 2487 * If we are writing a cylinder group, update its time. 2488 */ 2489 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2490 cgp = (struct cg *)bp->b_data; 2491 cgp->cg_old_time = cgp->cg_time = time_second; 2492 } 2493 return (bufwrite(bp)); 2494 } 2495 2496 static void 2497 ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2498 { 2499 struct vnode *vp; 2500 struct buf *tbp; 2501 int error, nocopy; 2502 2503 /* 2504 * This is the bufobj strategy for the private VCHR vnodes 2505 * used by FFS to access the underlying storage device. 2506 * We override the default bufobj strategy and thus bypass 2507 * VOP_STRATEGY() for these vnodes. 2508 */ 2509 vp = bo2vnode(bo); 2510 KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR || 2511 bp->b_vp->v_rdev == NULL || 2512 bp->b_vp->v_rdev->si_mountpt == NULL || 2513 VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL || 2514 vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp, 2515 ("ffs_geom_strategy() with wrong vp")); 2516 if (bp->b_iocmd == BIO_WRITE) { 2517 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2518 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2519 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2520 panic("ffs_geom_strategy: bad I/O"); 2521 nocopy = bp->b_flags & B_NOCOPY; 2522 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2523 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2524 vp->v_rdev->si_snapdata != NULL) { 2525 if ((bp->b_flags & B_CLUSTER) != 0) { 2526 runningbufwakeup(bp); 2527 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2528 b_cluster.cluster_entry) { 2529 error = ffs_copyonwrite(vp, tbp); 2530 if (error != 0 && 2531 error != EOPNOTSUPP) { 2532 bp->b_error = error; 2533 bp->b_ioflags |= BIO_ERROR; 2534 bp->b_flags &= ~B_BARRIER; 2535 bufdone(bp); 2536 return; 2537 } 2538 } 2539 bp->b_runningbufspace = bp->b_bufsize; 2540 atomic_add_long(&runningbufspace, 2541 bp->b_runningbufspace); 2542 } else { 2543 error = ffs_copyonwrite(vp, bp); 2544 if (error != 0 && error != EOPNOTSUPP) { 2545 bp->b_error = error; 2546 bp->b_ioflags |= BIO_ERROR; 2547 bp->b_flags &= ~B_BARRIER; 2548 bufdone(bp); 2549 return; 2550 } 2551 } 2552 } 2553 #ifdef SOFTUPDATES 2554 if ((bp->b_flags & B_CLUSTER) != 0) { 2555 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2556 b_cluster.cluster_entry) { 2557 if (!LIST_EMPTY(&tbp->b_dep)) 2558 buf_start(tbp); 2559 } 2560 } else { 2561 if (!LIST_EMPTY(&bp->b_dep)) 2562 buf_start(bp); 2563 } 2564 2565 #endif 2566 /* 2567 * Check for metadata that needs check-hashes and update them. 2568 */ 2569 switch (bp->b_xflags & BX_FSPRIV) { 2570 case BX_CYLGRP: 2571 ((struct cg *)bp->b_data)->cg_ckhash = 0; 2572 ((struct cg *)bp->b_data)->cg_ckhash = 2573 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2574 break; 2575 2576 case BX_SUPERBLOCK: 2577 case BX_INODE: 2578 case BX_INDIR: 2579 case BX_DIR: 2580 printf("Check-hash write is unimplemented!!!\n"); 2581 break; 2582 2583 case 0: 2584 break; 2585 2586 default: 2587 printf("multiple buffer types 0x%b\n", 2588 (u_int)(bp->b_xflags & BX_FSPRIV), 2589 PRINT_UFS_BUF_XFLAGS); 2590 break; 2591 } 2592 } 2593 if (bp->b_iocmd != BIO_READ && ffs_enxio_enable) 2594 bp->b_xflags |= BX_CVTENXIO; 2595 g_vfs_strategy(bo, bp); 2596 } 2597 2598 int 2599 ffs_own_mount(const struct mount *mp) 2600 { 2601 2602 if (mp->mnt_op == &ufs_vfsops) 2603 return (1); 2604 return (0); 2605 } 2606 2607 #ifdef DDB 2608 #ifdef SOFTUPDATES 2609 2610 /* defined in ffs_softdep.c */ 2611 extern void db_print_ffs(struct ufsmount *ump); 2612 2613 DB_SHOW_COMMAND(ffs, db_show_ffs) 2614 { 2615 struct mount *mp; 2616 struct ufsmount *ump; 2617 2618 if (have_addr) { 2619 ump = VFSTOUFS((struct mount *)addr); 2620 db_print_ffs(ump); 2621 return; 2622 } 2623 2624 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2625 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2626 db_print_ffs(VFSTOUFS(mp)); 2627 } 2628 } 2629 2630 #endif /* SOFTUPDATES */ 2631 #endif /* DDB */ 2632