1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1991, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_quota.h" 38 #include "opt_ufs.h" 39 #include "opt_ffs.h" 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/gsb_crc32.h> 44 #include <sys/systm.h> 45 #include <sys/namei.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/taskqueue.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/vnode.h> 52 #include <sys/mount.h> 53 #include <sys/bio.h> 54 #include <sys/buf.h> 55 #include <sys/conf.h> 56 #include <sys/fcntl.h> 57 #include <sys/ioccom.h> 58 #include <sys/malloc.h> 59 #include <sys/mutex.h> 60 #include <sys/rwlock.h> 61 #include <sys/sysctl.h> 62 #include <sys/vmmeter.h> 63 64 #include <security/mac/mac_framework.h> 65 66 #include <ufs/ufs/dir.h> 67 #include <ufs/ufs/extattr.h> 68 #include <ufs/ufs/gjournal.h> 69 #include <ufs/ufs/quota.h> 70 #include <ufs/ufs/ufsmount.h> 71 #include <ufs/ufs/inode.h> 72 #include <ufs/ufs/ufs_extern.h> 73 74 #include <ufs/ffs/fs.h> 75 #include <ufs/ffs/ffs_extern.h> 76 77 #include <vm/vm.h> 78 #include <vm/uma.h> 79 #include <vm/vm_page.h> 80 81 #include <geom/geom.h> 82 #include <geom/geom_vfs.h> 83 84 #include <ddb/ddb.h> 85 86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 87 VFS_SMR_DECLARE; 88 89 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 90 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 91 ufs2_daddr_t); 92 static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 93 static int ffs_sync_lazy(struct mount *mp); 94 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); 95 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); 96 97 static vfs_init_t ffs_init; 98 static vfs_uninit_t ffs_uninit; 99 static vfs_extattrctl_t ffs_extattrctl; 100 static vfs_cmount_t ffs_cmount; 101 static vfs_unmount_t ffs_unmount; 102 static vfs_mount_t ffs_mount; 103 static vfs_statfs_t ffs_statfs; 104 static vfs_fhtovp_t ffs_fhtovp; 105 static vfs_sync_t ffs_sync; 106 107 static struct vfsops ufs_vfsops = { 108 .vfs_extattrctl = ffs_extattrctl, 109 .vfs_fhtovp = ffs_fhtovp, 110 .vfs_init = ffs_init, 111 .vfs_mount = ffs_mount, 112 .vfs_cmount = ffs_cmount, 113 .vfs_quotactl = ufs_quotactl, 114 .vfs_root = vfs_cache_root, 115 .vfs_cachedroot = ufs_root, 116 .vfs_statfs = ffs_statfs, 117 .vfs_sync = ffs_sync, 118 .vfs_uninit = ffs_uninit, 119 .vfs_unmount = ffs_unmount, 120 .vfs_vget = ffs_vget, 121 .vfs_susp_clean = process_deferred_inactive, 122 }; 123 124 VFS_SET(ufs_vfsops, ufs, 0); 125 MODULE_VERSION(ufs, 1); 126 127 static b_strategy_t ffs_geom_strategy; 128 static b_write_t ffs_bufwrite; 129 130 static struct buf_ops ffs_ops = { 131 .bop_name = "FFS", 132 .bop_write = ffs_bufwrite, 133 .bop_strategy = ffs_geom_strategy, 134 .bop_sync = bufsync, 135 #ifdef NO_FFS_SNAPSHOT 136 .bop_bdflush = bufbdflush, 137 #else 138 .bop_bdflush = ffs_bdflush, 139 #endif 140 }; 141 142 /* 143 * Note that userquota and groupquota options are not currently used 144 * by UFS/FFS code and generally mount(8) does not pass those options 145 * from userland, but they can be passed by loader(8) via 146 * vfs.root.mountfrom.options. 147 */ 148 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 149 "noclusterw", "noexec", "export", "force", "from", "groupquota", 150 "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir", 151 "nosymfollow", "sync", "union", "userquota", "untrusted", NULL }; 152 153 static int ffs_enxio_enable = 1; 154 SYSCTL_DECL(_vfs_ffs); 155 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN, 156 &ffs_enxio_enable, 0, 157 "enable mapping of other disk I/O errors to ENXIO"); 158 159 /* 160 * Return buffer with the contents of block "offset" from the beginning of 161 * directory "ip". If "res" is non-zero, fill it in with a pointer to the 162 * remaining space in the directory. 163 */ 164 static int 165 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) 166 { 167 struct inode *ip; 168 struct fs *fs; 169 struct buf *bp; 170 ufs_lbn_t lbn; 171 int bsize, error; 172 173 ip = VTOI(vp); 174 fs = ITOFS(ip); 175 lbn = lblkno(fs, offset); 176 bsize = blksize(fs, ip, lbn); 177 178 *bpp = NULL; 179 error = bread(vp, lbn, bsize, NOCRED, &bp); 180 if (error) { 181 return (error); 182 } 183 if (res) 184 *res = (char *)bp->b_data + blkoff(fs, offset); 185 *bpp = bp; 186 return (0); 187 } 188 189 /* 190 * Load up the contents of an inode and copy the appropriate pieces 191 * to the incore copy. 192 */ 193 static int 194 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino) 195 { 196 struct ufs1_dinode *dip1; 197 struct ufs2_dinode *dip2; 198 int error; 199 200 if (I_IS_UFS1(ip)) { 201 dip1 = ip->i_din1; 202 *dip1 = 203 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 204 ip->i_mode = dip1->di_mode; 205 ip->i_nlink = dip1->di_nlink; 206 ip->i_effnlink = dip1->di_nlink; 207 ip->i_size = dip1->di_size; 208 ip->i_flags = dip1->di_flags; 209 ip->i_gen = dip1->di_gen; 210 ip->i_uid = dip1->di_uid; 211 ip->i_gid = dip1->di_gid; 212 return (0); 213 } 214 dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 215 if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 && 216 !ffs_fsfail_cleanup(ITOUMP(ip), error)) { 217 printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt, 218 (intmax_t)ino); 219 return (error); 220 } 221 *ip->i_din2 = *dip2; 222 dip2 = ip->i_din2; 223 ip->i_mode = dip2->di_mode; 224 ip->i_nlink = dip2->di_nlink; 225 ip->i_effnlink = dip2->di_nlink; 226 ip->i_size = dip2->di_size; 227 ip->i_flags = dip2->di_flags; 228 ip->i_gen = dip2->di_gen; 229 ip->i_uid = dip2->di_uid; 230 ip->i_gid = dip2->di_gid; 231 return (0); 232 } 233 234 /* 235 * Verify that a filesystem block number is a valid data block. 236 * This routine is only called on untrusted filesystems. 237 */ 238 static int 239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize) 240 { 241 struct fs *fs; 242 struct ufsmount *ump; 243 ufs2_daddr_t end_daddr; 244 int cg, havemtx; 245 246 KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0, 247 ("ffs_check_blkno called on a trusted file system")); 248 ump = VFSTOUFS(mp); 249 fs = ump->um_fs; 250 cg = dtog(fs, daddr); 251 end_daddr = daddr + numfrags(fs, blksize); 252 /* 253 * Verify that the block number is a valid data block. Also check 254 * that it does not point to an inode block or a superblock. Accept 255 * blocks that are unalloacted (0) or part of snapshot metadata 256 * (BLK_NOCOPY or BLK_SNAP). 257 * 258 * Thus, the block must be in a valid range for the filesystem and 259 * either in the space before a backup superblock (except the first 260 * cylinder group where that space is used by the bootstrap code) or 261 * after the inode blocks and before the end of the cylinder group. 262 */ 263 if ((uint64_t)daddr <= BLK_SNAP || 264 ((uint64_t)end_daddr <= fs->fs_size && 265 ((cg > 0 && end_daddr <= cgsblock(fs, cg)) || 266 (daddr >= cgdmin(fs, cg) && 267 end_daddr <= cgbase(fs, cg) + fs->fs_fpg)))) 268 return (0); 269 if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0) 270 UFS_LOCK(ump); 271 if (ppsratecheck(&ump->um_last_integritymsg, 272 &ump->um_secs_integritymsg, 1)) { 273 UFS_UNLOCK(ump); 274 uprintf("\n%s: inode %jd, out-of-range indirect block " 275 "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr); 276 if (havemtx) 277 UFS_LOCK(ump); 278 } else if (!havemtx) 279 UFS_UNLOCK(ump); 280 return (EINTEGRITY); 281 } 282 283 /* 284 * On first ENXIO error, initiate an asynchronous forcible unmount. 285 * Used to unmount filesystems whose underlying media has gone away. 286 * 287 * Return true if a cleanup is in progress. 288 */ 289 int 290 ffs_fsfail_cleanup(struct ufsmount *ump, int error) 291 { 292 int retval; 293 294 UFS_LOCK(ump); 295 retval = ffs_fsfail_cleanup_locked(ump, error); 296 UFS_UNLOCK(ump); 297 return (retval); 298 } 299 300 int 301 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error) 302 { 303 mtx_assert(UFS_MTX(ump), MA_OWNED); 304 if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) { 305 ump->um_flags |= UM_FSFAIL_CLEANUP; 306 /* 307 * Queue an async forced unmount. 308 */ 309 vfs_ref(ump->um_mountp); 310 dounmount(ump->um_mountp, 311 MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread); 312 printf("UFS: forcibly unmounting %s from %s\n", 313 ump->um_mountp->mnt_stat.f_mntfromname, 314 ump->um_mountp->mnt_stat.f_mntonname); 315 } 316 return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0); 317 } 318 319 /* 320 * Wrapper used during ENXIO cleanup to allocate empty buffers when 321 * the kernel is unable to read the real one. They are needed so that 322 * the soft updates code can use them to unwind its dependencies. 323 */ 324 int 325 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno, 326 daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt, 327 struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *), 328 struct buf **bpp) 329 { 330 int error; 331 332 flags |= GB_CVTENXIO; 333 error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt, 334 cred, flags, ckhashfunc, bpp); 335 if (error != 0 && ffs_fsfail_cleanup(ump, error)) { 336 error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp); 337 KASSERT(error == 0, ("getblkx failed")); 338 vfs_bio_bzero_buf(*bpp, 0, size); 339 } 340 return (error); 341 } 342 343 static int 344 ffs_mount(struct mount *mp) 345 { 346 struct vnode *devvp, *odevvp; 347 struct thread *td; 348 struct ufsmount *ump = NULL; 349 struct fs *fs; 350 int error, flags; 351 int error1 __diagused; 352 uint64_t mntorflags, saved_mnt_flag; 353 accmode_t accmode; 354 struct nameidata ndp; 355 char *fspec; 356 bool mounted_softdep; 357 358 td = curthread; 359 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 360 return (EINVAL); 361 if (uma_inode == NULL) { 362 uma_inode = uma_zcreate("FFS inode", 363 sizeof(struct inode), NULL, NULL, NULL, NULL, 364 UMA_ALIGN_PTR, 0); 365 uma_ufs1 = uma_zcreate("FFS1 dinode", 366 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 367 UMA_ALIGN_PTR, 0); 368 uma_ufs2 = uma_zcreate("FFS2 dinode", 369 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 370 UMA_ALIGN_PTR, 0); 371 VFS_SMR_ZONE_SET(uma_inode); 372 } 373 374 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 375 vfs_deleteopt(mp->mnt_optnew, "userquota"); 376 377 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 378 if (error) 379 return (error); 380 381 mntorflags = 0; 382 if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0) 383 mntorflags |= MNT_UNTRUSTED; 384 385 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 386 mntorflags |= MNT_ACLS; 387 388 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 389 mntorflags |= MNT_SNAPSHOT; 390 /* 391 * Once we have set the MNT_SNAPSHOT flag, do not 392 * persist "snapshot" in the options list. 393 */ 394 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 395 vfs_deleteopt(mp->mnt_opt, "snapshot"); 396 } 397 398 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 399 if (mntorflags & MNT_ACLS) { 400 vfs_mount_error(mp, 401 "\"acls\" and \"nfsv4acls\" options " 402 "are mutually exclusive"); 403 return (EINVAL); 404 } 405 mntorflags |= MNT_NFS4ACLS; 406 } 407 408 MNT_ILOCK(mp); 409 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP; 410 mp->mnt_flag |= mntorflags; 411 MNT_IUNLOCK(mp); 412 413 /* 414 * If this is a snapshot request, take the snapshot. 415 */ 416 if (mp->mnt_flag & MNT_SNAPSHOT) { 417 if ((mp->mnt_flag & MNT_UPDATE) == 0) 418 return (EINVAL); 419 return (ffs_snapshot(mp, fspec)); 420 } 421 422 /* 423 * Must not call namei() while owning busy ref. 424 */ 425 if (mp->mnt_flag & MNT_UPDATE) 426 vfs_unbusy(mp); 427 428 /* 429 * Not an update, or updating the name: look up the name 430 * and verify that it refers to a sensible disk device. 431 */ 432 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec); 433 error = namei(&ndp); 434 if ((mp->mnt_flag & MNT_UPDATE) != 0) { 435 /* 436 * Unmount does not start if MNT_UPDATE is set. Mount 437 * update busies mp before setting MNT_UPDATE. We 438 * must be able to retain our busy ref successfully, 439 * without sleep. 440 */ 441 error1 = vfs_busy(mp, MBF_NOWAIT); 442 MPASS(error1 == 0); 443 } 444 if (error != 0) 445 return (error); 446 NDFREE_PNBUF(&ndp); 447 if (!vn_isdisk_error(ndp.ni_vp, &error)) { 448 vput(ndp.ni_vp); 449 return (error); 450 } 451 452 /* 453 * If mount by non-root, then verify that user has necessary 454 * permissions on the device. 455 */ 456 accmode = VREAD; 457 if ((mp->mnt_flag & MNT_RDONLY) == 0) 458 accmode |= VWRITE; 459 error = VOP_ACCESS(ndp.ni_vp, accmode, td->td_ucred, td); 460 if (error) 461 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 462 if (error) { 463 vput(ndp.ni_vp); 464 return (error); 465 } 466 467 /* 468 * New mount 469 * 470 * We need the name for the mount point (also used for 471 * "last mounted on") copied in. If an error occurs, 472 * the mount point is discarded by the upper level code. 473 * Note that vfs_mount_alloc() populates f_mntonname for us. 474 */ 475 if ((mp->mnt_flag & MNT_UPDATE) == 0) { 476 if ((error = ffs_mountfs(ndp.ni_vp, mp, td)) != 0) { 477 vrele(ndp.ni_vp); 478 return (error); 479 } 480 } else { 481 /* 482 * When updating, check whether changing from read-only to 483 * read/write; if there is no device name, that's all we do. 484 */ 485 ump = VFSTOUFS(mp); 486 fs = ump->um_fs; 487 odevvp = ump->um_odevvp; 488 devvp = ump->um_devvp; 489 490 /* 491 * If it's not the same vnode, or at least the same device 492 * then it's not correct. 493 */ 494 if (ndp.ni_vp->v_rdev != ump->um_odevvp->v_rdev) 495 error = EINVAL; /* needs translation */ 496 vput(ndp.ni_vp); 497 if (error) 498 return (error); 499 if (fs->fs_ronly == 0 && 500 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 501 /* 502 * Flush any dirty data and suspend filesystem. 503 */ 504 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 505 return (error); 506 error = vfs_write_suspend_umnt(mp); 507 if (error != 0) 508 return (error); 509 510 fs->fs_ronly = 1; 511 if (MOUNTEDSOFTDEP(mp)) { 512 MNT_ILOCK(mp); 513 mp->mnt_flag &= ~MNT_SOFTDEP; 514 MNT_IUNLOCK(mp); 515 mounted_softdep = true; 516 } else 517 mounted_softdep = false; 518 519 /* 520 * Check for and optionally get rid of files open 521 * for writing. 522 */ 523 flags = WRITECLOSE; 524 if (mp->mnt_flag & MNT_FORCE) 525 flags |= FORCECLOSE; 526 if (mounted_softdep) { 527 error = softdep_flushfiles(mp, flags, td); 528 } else { 529 error = ffs_flushfiles(mp, flags, td); 530 } 531 if (error) { 532 fs->fs_ronly = 0; 533 if (mounted_softdep) { 534 MNT_ILOCK(mp); 535 mp->mnt_flag |= MNT_SOFTDEP; 536 MNT_IUNLOCK(mp); 537 } 538 vfs_write_resume(mp, 0); 539 return (error); 540 } 541 542 if (fs->fs_pendingblocks != 0 || 543 fs->fs_pendinginodes != 0) { 544 printf("WARNING: %s Update error: blocks %jd " 545 "files %d\n", fs->fs_fsmnt, 546 (intmax_t)fs->fs_pendingblocks, 547 fs->fs_pendinginodes); 548 fs->fs_pendingblocks = 0; 549 fs->fs_pendinginodes = 0; 550 } 551 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 552 fs->fs_clean = 1; 553 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 554 fs->fs_ronly = 0; 555 fs->fs_clean = 0; 556 if (mounted_softdep) { 557 MNT_ILOCK(mp); 558 mp->mnt_flag |= MNT_SOFTDEP; 559 MNT_IUNLOCK(mp); 560 } 561 vfs_write_resume(mp, 0); 562 return (error); 563 } 564 if (mounted_softdep) 565 softdep_unmount(mp); 566 g_topology_lock(); 567 /* 568 * Drop our write and exclusive access. 569 */ 570 g_access(ump->um_cp, 0, -1, -1); 571 g_topology_unlock(); 572 MNT_ILOCK(mp); 573 mp->mnt_flag |= MNT_RDONLY; 574 MNT_IUNLOCK(mp); 575 /* 576 * Allow the writers to note that filesystem 577 * is ro now. 578 */ 579 vfs_write_resume(mp, 0); 580 } 581 if ((mp->mnt_flag & MNT_RELOAD) && 582 (error = ffs_reload(mp, 0)) != 0) 583 return (error); 584 if (fs->fs_ronly && 585 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 586 /* 587 * If upgrade to read-write by non-root, then verify 588 * that user has necessary permissions on the device. 589 */ 590 vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY); 591 error = VOP_ACCESS(odevvp, VREAD | VWRITE, 592 td->td_ucred, td); 593 if (error) 594 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 595 VOP_UNLOCK(odevvp); 596 if (error) { 597 return (error); 598 } 599 fs->fs_flags &= ~FS_UNCLEAN; 600 if (fs->fs_clean == 0) { 601 fs->fs_flags |= FS_UNCLEAN; 602 if ((mp->mnt_flag & MNT_FORCE) || 603 ((fs->fs_flags & 604 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 605 (fs->fs_flags & FS_DOSOFTDEP))) { 606 printf("WARNING: %s was not properly " 607 "dismounted\n", 608 mp->mnt_stat.f_mntonname); 609 } else { 610 vfs_mount_error(mp, 611 "R/W mount of %s denied. %s.%s", 612 mp->mnt_stat.f_mntonname, 613 "Filesystem is not clean - run fsck", 614 (fs->fs_flags & FS_SUJ) == 0 ? "" : 615 " Forced mount will invalidate" 616 " journal contents"); 617 return (EPERM); 618 } 619 } 620 g_topology_lock(); 621 /* 622 * Request exclusive write access. 623 */ 624 error = g_access(ump->um_cp, 0, 1, 1); 625 g_topology_unlock(); 626 if (error) 627 return (error); 628 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 629 return (error); 630 error = vfs_write_suspend_umnt(mp); 631 if (error != 0) 632 return (error); 633 fs->fs_ronly = 0; 634 MNT_ILOCK(mp); 635 saved_mnt_flag = MNT_RDONLY; 636 if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag & 637 MNT_ASYNC) != 0) 638 saved_mnt_flag |= MNT_ASYNC; 639 mp->mnt_flag &= ~saved_mnt_flag; 640 MNT_IUNLOCK(mp); 641 fs->fs_mtime = time_second; 642 /* check to see if we need to start softdep */ 643 if ((fs->fs_flags & FS_DOSOFTDEP) && 644 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 645 fs->fs_ronly = 1; 646 MNT_ILOCK(mp); 647 mp->mnt_flag |= saved_mnt_flag; 648 MNT_IUNLOCK(mp); 649 vfs_write_resume(mp, 0); 650 return (error); 651 } 652 fs->fs_clean = 0; 653 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 654 fs->fs_ronly = 1; 655 if ((fs->fs_flags & FS_DOSOFTDEP) != 0) 656 softdep_unmount(mp); 657 MNT_ILOCK(mp); 658 mp->mnt_flag |= saved_mnt_flag; 659 MNT_IUNLOCK(mp); 660 vfs_write_resume(mp, 0); 661 return (error); 662 } 663 if (fs->fs_snapinum[0] != 0) 664 ffs_snapshot_mount(mp); 665 vfs_write_resume(mp, 0); 666 } 667 /* 668 * Soft updates is incompatible with "async", 669 * so if we are doing softupdates stop the user 670 * from setting the async flag in an update. 671 * Softdep_mount() clears it in an initial mount 672 * or ro->rw remount. 673 */ 674 if (MOUNTEDSOFTDEP(mp)) { 675 /* XXX: Reset too late ? */ 676 MNT_ILOCK(mp); 677 mp->mnt_flag &= ~MNT_ASYNC; 678 MNT_IUNLOCK(mp); 679 } 680 /* 681 * Keep MNT_ACLS flag if it is stored in superblock. 682 */ 683 if ((fs->fs_flags & FS_ACLS) != 0) { 684 /* XXX: Set too late ? */ 685 MNT_ILOCK(mp); 686 mp->mnt_flag |= MNT_ACLS; 687 MNT_IUNLOCK(mp); 688 } 689 690 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 691 /* XXX: Set too late ? */ 692 MNT_ILOCK(mp); 693 mp->mnt_flag |= MNT_NFS4ACLS; 694 MNT_IUNLOCK(mp); 695 } 696 697 } 698 699 MNT_ILOCK(mp); 700 /* 701 * This is racy versus lookup, see ufs_fplookup_vexec for details. 702 */ 703 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0) 704 panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp); 705 if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0) 706 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 707 MNT_IUNLOCK(mp); 708 709 vfs_mountedfrom(mp, fspec); 710 return (0); 711 } 712 713 /* 714 * Compatibility with old mount system call. 715 */ 716 717 static int 718 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 719 { 720 struct ufs_args args; 721 int error; 722 723 if (data == NULL) 724 return (EINVAL); 725 error = copyin(data, &args, sizeof args); 726 if (error) 727 return (error); 728 729 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 730 ma = mount_arg(ma, "export", &args.export, sizeof(args.export)); 731 error = kernel_mount(ma, flags); 732 733 return (error); 734 } 735 736 /* 737 * Reload all incore data for a filesystem (used after running fsck on 738 * the root filesystem and finding things to fix). If the 'force' flag 739 * is 0, the filesystem must be mounted read-only. 740 * 741 * Things to do to update the mount: 742 * 1) invalidate all cached meta-data. 743 * 2) re-read superblock from disk. 744 * 3) re-read summary information from disk. 745 * 4) invalidate all inactive vnodes. 746 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary 747 * writers, if requested. 748 * 6) invalidate all cached file data. 749 * 7) re-read inode data for all active vnodes. 750 */ 751 int 752 ffs_reload(struct mount *mp, int flags) 753 { 754 struct vnode *vp, *mvp, *devvp; 755 struct inode *ip; 756 void *space; 757 struct buf *bp; 758 struct fs *fs, *newfs; 759 struct ufsmount *ump; 760 ufs2_daddr_t sblockloc; 761 int i, blks, error; 762 u_long size; 763 int32_t *lp; 764 765 ump = VFSTOUFS(mp); 766 767 MNT_ILOCK(mp); 768 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 769 MNT_IUNLOCK(mp); 770 return (EINVAL); 771 } 772 MNT_IUNLOCK(mp); 773 774 /* 775 * Step 1: invalidate all cached meta-data. 776 */ 777 devvp = VFSTOUFS(mp)->um_devvp; 778 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 779 if (vinvalbuf(devvp, 0, 0, 0) != 0) 780 panic("ffs_reload: dirty1"); 781 VOP_UNLOCK(devvp); 782 783 /* 784 * Step 2: re-read superblock from disk. 785 */ 786 fs = VFSTOUFS(mp)->um_fs; 787 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 788 NOCRED, &bp)) != 0) 789 return (error); 790 newfs = (struct fs *)bp->b_data; 791 if ((newfs->fs_magic != FS_UFS1_MAGIC && 792 newfs->fs_magic != FS_UFS2_MAGIC) || 793 newfs->fs_bsize > MAXBSIZE || 794 newfs->fs_bsize < sizeof(struct fs)) { 795 brelse(bp); 796 return (EIO); /* XXX needs translation */ 797 } 798 /* 799 * Preserve the summary information, read-only status, and 800 * superblock location by copying these fields into our new 801 * superblock before using it to update the existing superblock. 802 */ 803 newfs->fs_si = fs->fs_si; 804 newfs->fs_ronly = fs->fs_ronly; 805 sblockloc = fs->fs_sblockloc; 806 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 807 brelse(bp); 808 ump->um_bsize = fs->fs_bsize; 809 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 810 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 811 UFS_LOCK(ump); 812 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 813 printf("WARNING: %s: reload pending error: blocks %jd " 814 "files %d\n", mp->mnt_stat.f_mntonname, 815 (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); 816 fs->fs_pendingblocks = 0; 817 fs->fs_pendinginodes = 0; 818 } 819 UFS_UNLOCK(ump); 820 821 /* 822 * Step 3: re-read summary information from disk. 823 */ 824 size = fs->fs_cssize; 825 blks = howmany(size, fs->fs_fsize); 826 if (fs->fs_contigsumsize > 0) 827 size += fs->fs_ncg * sizeof(int32_t); 828 size += fs->fs_ncg * sizeof(u_int8_t); 829 free(fs->fs_csp, M_UFSMNT); 830 space = malloc(size, M_UFSMNT, M_WAITOK); 831 fs->fs_csp = space; 832 for (i = 0; i < blks; i += fs->fs_frag) { 833 size = fs->fs_bsize; 834 if (i + fs->fs_frag > blks) 835 size = (blks - i) * fs->fs_fsize; 836 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 837 NOCRED, &bp); 838 if (error) 839 return (error); 840 bcopy(bp->b_data, space, (u_int)size); 841 space = (char *)space + size; 842 brelse(bp); 843 } 844 /* 845 * We no longer know anything about clusters per cylinder group. 846 */ 847 if (fs->fs_contigsumsize > 0) { 848 fs->fs_maxcluster = lp = space; 849 for (i = 0; i < fs->fs_ncg; i++) 850 *lp++ = fs->fs_contigsumsize; 851 space = lp; 852 } 853 size = fs->fs_ncg * sizeof(u_int8_t); 854 fs->fs_contigdirs = (u_int8_t *)space; 855 bzero(fs->fs_contigdirs, size); 856 if ((flags & FFSR_UNSUSPEND) != 0) { 857 MNT_ILOCK(mp); 858 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 859 wakeup(&mp->mnt_flag); 860 MNT_IUNLOCK(mp); 861 } 862 863 loop: 864 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 865 /* 866 * Skip syncer vnode. 867 */ 868 if (vp->v_type == VNON) { 869 VI_UNLOCK(vp); 870 continue; 871 } 872 /* 873 * Step 4: invalidate all cached file data. 874 */ 875 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 876 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 877 goto loop; 878 } 879 if (vinvalbuf(vp, 0, 0, 0)) 880 panic("ffs_reload: dirty2"); 881 /* 882 * Step 5: re-read inode data for all active vnodes. 883 */ 884 ip = VTOI(vp); 885 error = 886 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 887 (int)fs->fs_bsize, NOCRED, &bp); 888 if (error) { 889 vput(vp); 890 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 891 return (error); 892 } 893 if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) { 894 brelse(bp); 895 vput(vp); 896 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 897 return (error); 898 } 899 ip->i_effnlink = ip->i_nlink; 900 brelse(bp); 901 vput(vp); 902 } 903 return (0); 904 } 905 906 /* 907 * Common code for mount and mountroot 908 */ 909 static int 910 ffs_mountfs(struct vnode *odevvp, struct mount *mp, struct thread *td) 911 { 912 struct ufsmount *ump; 913 struct fs *fs; 914 struct cdev *dev; 915 int error, i, len, ronly; 916 struct ucred *cred; 917 struct g_consumer *cp; 918 struct mount *nmp; 919 struct vnode *devvp; 920 int candelete, canspeedup; 921 922 fs = NULL; 923 ump = NULL; 924 cred = td ? td->td_ucred : NOCRED; 925 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 926 927 devvp = mntfs_allocvp(mp, odevvp); 928 VOP_UNLOCK(odevvp); 929 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 930 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 931 dev = devvp->v_rdev; 932 KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data")); 933 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 934 (uintptr_t)mp) == 0) { 935 mntfs_freevp(devvp); 936 return (EBUSY); 937 } 938 g_topology_lock(); 939 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 940 g_topology_unlock(); 941 if (error != 0) { 942 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 943 mntfs_freevp(devvp); 944 return (error); 945 } 946 dev_ref(dev); 947 devvp->v_bufobj.bo_ops = &ffs_ops; 948 BO_LOCK(&odevvp->v_bufobj); 949 odevvp->v_bufobj.bo_flag |= BO_NOBUFS; 950 BO_UNLOCK(&odevvp->v_bufobj); 951 VOP_UNLOCK(devvp); 952 if (dev->si_iosize_max != 0) 953 mp->mnt_iosize_max = dev->si_iosize_max; 954 if (mp->mnt_iosize_max > maxphys) 955 mp->mnt_iosize_max = maxphys; 956 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 957 error = EINVAL; 958 vfs_mount_error(mp, 959 "Invalid sectorsize %d for superblock size %d", 960 cp->provider->sectorsize, SBLOCKSIZE); 961 goto out; 962 } 963 /* fetch the superblock and summary information */ 964 if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0) 965 error = ffs_sbsearch(devvp, &fs, 0, M_UFSMNT, ffs_use_bread); 966 else 967 error = ffs_sbget(devvp, &fs, UFS_STDSB, 0, M_UFSMNT, 968 ffs_use_bread); 969 if (error != 0) 970 goto out; 971 fs->fs_flags &= ~FS_UNCLEAN; 972 if (fs->fs_clean == 0) { 973 fs->fs_flags |= FS_UNCLEAN; 974 if (ronly || (mp->mnt_flag & MNT_FORCE) || 975 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 976 (fs->fs_flags & FS_DOSOFTDEP))) { 977 printf("WARNING: %s was not properly dismounted\n", 978 mp->mnt_stat.f_mntonname); 979 } else { 980 vfs_mount_error(mp, "R/W mount on %s denied. " 981 "Filesystem is not clean - run fsck.%s", 982 mp->mnt_stat.f_mntonname, 983 (fs->fs_flags & FS_SUJ) == 0 ? "" : 984 " Forced mount will invalidate journal contents"); 985 error = EPERM; 986 goto out; 987 } 988 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 989 (mp->mnt_flag & MNT_FORCE)) { 990 printf("WARNING: %s: lost blocks %jd files %d\n", 991 mp->mnt_stat.f_mntonname, 992 (intmax_t)fs->fs_pendingblocks, 993 fs->fs_pendinginodes); 994 fs->fs_pendingblocks = 0; 995 fs->fs_pendinginodes = 0; 996 } 997 } 998 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 999 printf("WARNING: %s: mount pending error: blocks %jd " 1000 "files %d\n", mp->mnt_stat.f_mntonname, 1001 (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); 1002 fs->fs_pendingblocks = 0; 1003 fs->fs_pendinginodes = 0; 1004 } 1005 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 1006 #ifdef UFS_GJOURNAL 1007 /* 1008 * Get journal provider name. 1009 */ 1010 len = 1024; 1011 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); 1012 if (g_io_getattr("GJOURNAL::provider", cp, &len, 1013 mp->mnt_gjprovider) == 0) { 1014 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 1015 M_UFSMNT, M_WAITOK); 1016 MNT_ILOCK(mp); 1017 mp->mnt_flag |= MNT_GJOURNAL; 1018 MNT_IUNLOCK(mp); 1019 } else { 1020 if ((mp->mnt_flag & MNT_RDONLY) == 0) 1021 printf("WARNING: %s: GJOURNAL flag on fs " 1022 "but no gjournal provider below\n", 1023 mp->mnt_stat.f_mntonname); 1024 free(mp->mnt_gjprovider, M_UFSMNT); 1025 mp->mnt_gjprovider = NULL; 1026 } 1027 #else 1028 printf("WARNING: %s: GJOURNAL flag on fs but no " 1029 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 1030 #endif 1031 } else { 1032 mp->mnt_gjprovider = NULL; 1033 } 1034 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 1035 ump->um_cp = cp; 1036 ump->um_bo = &devvp->v_bufobj; 1037 ump->um_fs = fs; 1038 if (fs->fs_magic == FS_UFS1_MAGIC) { 1039 ump->um_fstype = UFS1; 1040 ump->um_balloc = ffs_balloc_ufs1; 1041 } else { 1042 ump->um_fstype = UFS2; 1043 ump->um_balloc = ffs_balloc_ufs2; 1044 } 1045 ump->um_blkatoff = ffs_blkatoff; 1046 ump->um_truncate = ffs_truncate; 1047 ump->um_update = ffs_update; 1048 ump->um_valloc = ffs_valloc; 1049 ump->um_vfree = ffs_vfree; 1050 ump->um_ifree = ffs_ifree; 1051 ump->um_rdonly = ffs_rdonly; 1052 ump->um_snapgone = ffs_snapgone; 1053 if ((mp->mnt_flag & MNT_UNTRUSTED) != 0) 1054 ump->um_check_blkno = ffs_check_blkno; 1055 else 1056 ump->um_check_blkno = NULL; 1057 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 1058 sx_init(&ump->um_checkpath_lock, "uchpth"); 1059 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc); 1060 fs->fs_ronly = ronly; 1061 fs->fs_active = NULL; 1062 mp->mnt_data = ump; 1063 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 1064 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 1065 nmp = NULL; 1066 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 1067 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 1068 if (nmp) 1069 vfs_rel(nmp); 1070 vfs_getnewfsid(mp); 1071 } 1072 ump->um_bsize = fs->fs_bsize; 1073 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 1074 MNT_ILOCK(mp); 1075 mp->mnt_flag |= MNT_LOCAL; 1076 MNT_IUNLOCK(mp); 1077 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 1078 #ifdef MAC 1079 MNT_ILOCK(mp); 1080 mp->mnt_flag |= MNT_MULTILABEL; 1081 MNT_IUNLOCK(mp); 1082 #else 1083 printf("WARNING: %s: multilabel flag on fs but " 1084 "no MAC support\n", mp->mnt_stat.f_mntonname); 1085 #endif 1086 } 1087 if ((fs->fs_flags & FS_ACLS) != 0) { 1088 #ifdef UFS_ACL 1089 MNT_ILOCK(mp); 1090 1091 if (mp->mnt_flag & MNT_NFS4ACLS) 1092 printf("WARNING: %s: ACLs flag on fs conflicts with " 1093 "\"nfsv4acls\" mount option; option ignored\n", 1094 mp->mnt_stat.f_mntonname); 1095 mp->mnt_flag &= ~MNT_NFS4ACLS; 1096 mp->mnt_flag |= MNT_ACLS; 1097 1098 MNT_IUNLOCK(mp); 1099 #else 1100 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 1101 mp->mnt_stat.f_mntonname); 1102 #endif 1103 } 1104 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 1105 #ifdef UFS_ACL 1106 MNT_ILOCK(mp); 1107 1108 if (mp->mnt_flag & MNT_ACLS) 1109 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 1110 "with \"acls\" mount option; option ignored\n", 1111 mp->mnt_stat.f_mntonname); 1112 mp->mnt_flag &= ~MNT_ACLS; 1113 mp->mnt_flag |= MNT_NFS4ACLS; 1114 1115 MNT_IUNLOCK(mp); 1116 #else 1117 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 1118 "ACLs support\n", mp->mnt_stat.f_mntonname); 1119 #endif 1120 } 1121 if ((fs->fs_flags & FS_TRIM) != 0) { 1122 len = sizeof(int); 1123 if (g_io_getattr("GEOM::candelete", cp, &len, 1124 &candelete) == 0) { 1125 if (candelete) 1126 ump->um_flags |= UM_CANDELETE; 1127 else 1128 printf("WARNING: %s: TRIM flag on fs but disk " 1129 "does not support TRIM\n", 1130 mp->mnt_stat.f_mntonname); 1131 } else { 1132 printf("WARNING: %s: TRIM flag on fs but disk does " 1133 "not confirm that it supports TRIM\n", 1134 mp->mnt_stat.f_mntonname); 1135 } 1136 if (((ump->um_flags) & UM_CANDELETE) != 0) { 1137 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 1138 taskqueue_thread_enqueue, &ump->um_trim_tq); 1139 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 1140 "%s trim", mp->mnt_stat.f_mntonname); 1141 ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM, 1142 &ump->um_trimlisthashsize); 1143 } 1144 } 1145 1146 len = sizeof(int); 1147 if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) { 1148 if (canspeedup) 1149 ump->um_flags |= UM_CANSPEEDUP; 1150 } 1151 1152 ump->um_mountp = mp; 1153 ump->um_dev = dev; 1154 ump->um_devvp = devvp; 1155 ump->um_odevvp = odevvp; 1156 ump->um_nindir = fs->fs_nindir; 1157 ump->um_bptrtodb = fs->fs_fsbtodb; 1158 ump->um_seqinc = fs->fs_frag; 1159 for (i = 0; i < MAXQUOTAS; i++) 1160 ump->um_quotas[i] = NULLVP; 1161 #ifdef UFS_EXTATTR 1162 ufs_extattr_uepm_init(&ump->um_extattr); 1163 #endif 1164 /* 1165 * Set FS local "last mounted on" information (NULL pad) 1166 */ 1167 bzero(fs->fs_fsmnt, MAXMNTLEN); 1168 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1169 mp->mnt_stat.f_iosize = fs->fs_bsize; 1170 1171 if (mp->mnt_flag & MNT_ROOTFS) { 1172 /* 1173 * Root mount; update timestamp in mount structure. 1174 * this will be used by the common root mount code 1175 * to update the system clock. 1176 */ 1177 mp->mnt_time = fs->fs_time; 1178 } 1179 1180 if (ronly == 0) { 1181 fs->fs_mtime = time_second; 1182 if ((fs->fs_flags & FS_DOSOFTDEP) && 1183 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1184 ffs_flushfiles(mp, FORCECLOSE, td); 1185 goto out; 1186 } 1187 if (fs->fs_snapinum[0] != 0) 1188 ffs_snapshot_mount(mp); 1189 fs->fs_fmod = 1; 1190 fs->fs_clean = 0; 1191 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1192 } 1193 /* 1194 * Initialize filesystem state information in mount struct. 1195 */ 1196 MNT_ILOCK(mp); 1197 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1198 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1199 MNT_IUNLOCK(mp); 1200 #ifdef UFS_EXTATTR 1201 #ifdef UFS_EXTATTR_AUTOSTART 1202 /* 1203 * 1204 * Auto-starting does the following: 1205 * - check for /.attribute in the fs, and extattr_start if so 1206 * - for each file in .attribute, enable that file with 1207 * an attribute of the same name. 1208 * Not clear how to report errors -- probably eat them. 1209 * This would all happen while the filesystem was busy/not 1210 * available, so would effectively be "atomic". 1211 */ 1212 (void) ufs_extattr_autostart(mp, td); 1213 #endif /* !UFS_EXTATTR_AUTOSTART */ 1214 #endif /* !UFS_EXTATTR */ 1215 return (0); 1216 out: 1217 if (fs != NULL) { 1218 free(fs->fs_csp, M_UFSMNT); 1219 free(fs->fs_si, M_UFSMNT); 1220 free(fs, M_UFSMNT); 1221 } 1222 if (cp != NULL) { 1223 g_topology_lock(); 1224 g_vfs_close(cp); 1225 g_topology_unlock(); 1226 } 1227 if (ump != NULL) { 1228 mtx_destroy(UFS_MTX(ump)); 1229 sx_destroy(&ump->um_checkpath_lock); 1230 if (mp->mnt_gjprovider != NULL) { 1231 free(mp->mnt_gjprovider, M_UFSMNT); 1232 mp->mnt_gjprovider = NULL; 1233 } 1234 MPASS(ump->um_softdep == NULL); 1235 free(ump, M_UFSMNT); 1236 mp->mnt_data = NULL; 1237 } 1238 BO_LOCK(&odevvp->v_bufobj); 1239 odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1240 BO_UNLOCK(&odevvp->v_bufobj); 1241 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1242 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1243 mntfs_freevp(devvp); 1244 dev_rel(dev); 1245 return (error); 1246 } 1247 1248 /* 1249 * A read function for use by filesystem-layer routines. 1250 */ 1251 static int 1252 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) 1253 { 1254 struct buf *bp; 1255 int error; 1256 1257 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); 1258 *bufp = malloc(size, M_UFSMNT, M_WAITOK); 1259 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, 1260 &bp)) != 0) 1261 return (error); 1262 bcopy(bp->b_data, *bufp, size); 1263 bp->b_flags |= B_INVAL | B_NOCACHE; 1264 brelse(bp); 1265 return (0); 1266 } 1267 1268 static int bigcgs = 0; 1269 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1270 1271 /* 1272 * Sanity checks for loading old filesystem superblocks. 1273 * See ffs_oldfscompat_write below for unwound actions. 1274 * 1275 * XXX - Parts get retired eventually. 1276 * Unfortunately new bits get added. 1277 */ 1278 static void 1279 ffs_oldfscompat_read(struct fs *fs, 1280 struct ufsmount *ump, 1281 ufs2_daddr_t sblockloc) 1282 { 1283 off_t maxfilesize; 1284 1285 /* 1286 * If not yet done, update fs_flags location and value of fs_sblockloc. 1287 */ 1288 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1289 fs->fs_flags = fs->fs_old_flags; 1290 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1291 fs->fs_sblockloc = sblockloc; 1292 } 1293 /* 1294 * If not yet done, update UFS1 superblock with new wider fields. 1295 */ 1296 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1297 fs->fs_maxbsize = fs->fs_bsize; 1298 fs->fs_time = fs->fs_old_time; 1299 fs->fs_size = fs->fs_old_size; 1300 fs->fs_dsize = fs->fs_old_dsize; 1301 fs->fs_csaddr = fs->fs_old_csaddr; 1302 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1303 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1304 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1305 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1306 } 1307 if (fs->fs_magic == FS_UFS1_MAGIC && 1308 fs->fs_old_inodefmt < FS_44INODEFMT) { 1309 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1310 fs->fs_qbmask = ~fs->fs_bmask; 1311 fs->fs_qfmask = ~fs->fs_fmask; 1312 } 1313 if (fs->fs_magic == FS_UFS1_MAGIC) { 1314 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1315 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1316 if (fs->fs_maxfilesize > maxfilesize) 1317 fs->fs_maxfilesize = maxfilesize; 1318 } 1319 /* Compatibility for old filesystems */ 1320 if (fs->fs_avgfilesize <= 0) 1321 fs->fs_avgfilesize = AVFILESIZ; 1322 if (fs->fs_avgfpdir <= 0) 1323 fs->fs_avgfpdir = AFPDIR; 1324 if (bigcgs) { 1325 fs->fs_save_cgsize = fs->fs_cgsize; 1326 fs->fs_cgsize = fs->fs_bsize; 1327 } 1328 } 1329 1330 /* 1331 * Unwinding superblock updates for old filesystems. 1332 * See ffs_oldfscompat_read above for details. 1333 * 1334 * XXX - Parts get retired eventually. 1335 * Unfortunately new bits get added. 1336 */ 1337 void 1338 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump) 1339 { 1340 1341 /* 1342 * Copy back UFS2 updated fields that UFS1 inspects. 1343 */ 1344 if (fs->fs_magic == FS_UFS1_MAGIC) { 1345 fs->fs_old_time = fs->fs_time; 1346 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1347 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1348 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1349 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1350 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1351 } 1352 if (bigcgs) { 1353 fs->fs_cgsize = fs->fs_save_cgsize; 1354 fs->fs_save_cgsize = 0; 1355 } 1356 } 1357 1358 /* 1359 * unmount system call 1360 */ 1361 static int 1362 ffs_unmount(struct mount *mp, int mntflags) 1363 { 1364 struct thread *td; 1365 struct ufsmount *ump = VFSTOUFS(mp); 1366 struct fs *fs; 1367 int error, flags, susp; 1368 #ifdef UFS_EXTATTR 1369 int e_restart; 1370 #endif 1371 1372 flags = 0; 1373 td = curthread; 1374 fs = ump->um_fs; 1375 if (mntflags & MNT_FORCE) 1376 flags |= FORCECLOSE; 1377 susp = fs->fs_ronly == 0; 1378 #ifdef UFS_EXTATTR 1379 if ((error = ufs_extattr_stop(mp, td))) { 1380 if (error != EOPNOTSUPP) 1381 printf("WARNING: unmount %s: ufs_extattr_stop " 1382 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1383 error); 1384 e_restart = 0; 1385 } else { 1386 ufs_extattr_uepm_destroy(&ump->um_extattr); 1387 e_restart = 1; 1388 } 1389 #endif 1390 if (susp) { 1391 error = vfs_write_suspend_umnt(mp); 1392 if (error != 0) 1393 goto fail1; 1394 } 1395 if (MOUNTEDSOFTDEP(mp)) 1396 error = softdep_flushfiles(mp, flags, td); 1397 else 1398 error = ffs_flushfiles(mp, flags, td); 1399 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) 1400 goto fail; 1401 1402 UFS_LOCK(ump); 1403 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1404 printf("WARNING: unmount %s: pending error: blocks %jd " 1405 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1406 fs->fs_pendinginodes); 1407 fs->fs_pendingblocks = 0; 1408 fs->fs_pendinginodes = 0; 1409 } 1410 UFS_UNLOCK(ump); 1411 if (MOUNTEDSOFTDEP(mp)) 1412 softdep_unmount(mp); 1413 MPASS(ump->um_softdep == NULL); 1414 if (fs->fs_ronly == 0) { 1415 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1416 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1417 if (ffs_fsfail_cleanup(ump, error)) 1418 error = 0; 1419 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) { 1420 fs->fs_clean = 0; 1421 goto fail; 1422 } 1423 } 1424 if (susp) 1425 vfs_write_resume(mp, VR_START_WRITE); 1426 if (ump->um_trim_tq != NULL) { 1427 MPASS(ump->um_trim_inflight == 0); 1428 taskqueue_free(ump->um_trim_tq); 1429 free (ump->um_trimhash, M_TRIM); 1430 } 1431 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1432 g_topology_lock(); 1433 g_vfs_close(ump->um_cp); 1434 g_topology_unlock(); 1435 BO_LOCK(&ump->um_odevvp->v_bufobj); 1436 ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1437 BO_UNLOCK(&ump->um_odevvp->v_bufobj); 1438 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1439 mntfs_freevp(ump->um_devvp); 1440 vrele(ump->um_odevvp); 1441 dev_rel(ump->um_dev); 1442 mtx_destroy(UFS_MTX(ump)); 1443 sx_destroy(&ump->um_checkpath_lock); 1444 if (mp->mnt_gjprovider != NULL) { 1445 free(mp->mnt_gjprovider, M_UFSMNT); 1446 mp->mnt_gjprovider = NULL; 1447 } 1448 free(fs->fs_csp, M_UFSMNT); 1449 free(fs->fs_si, M_UFSMNT); 1450 free(fs, M_UFSMNT); 1451 free(ump, M_UFSMNT); 1452 mp->mnt_data = NULL; 1453 MNT_ILOCK(mp); 1454 mp->mnt_flag &= ~MNT_LOCAL; 1455 MNT_IUNLOCK(mp); 1456 if (td->td_su == mp) { 1457 td->td_su = NULL; 1458 vfs_rel(mp); 1459 } 1460 return (error); 1461 1462 fail: 1463 if (susp) 1464 vfs_write_resume(mp, VR_START_WRITE); 1465 fail1: 1466 #ifdef UFS_EXTATTR 1467 if (e_restart) { 1468 ufs_extattr_uepm_init(&ump->um_extattr); 1469 #ifdef UFS_EXTATTR_AUTOSTART 1470 (void) ufs_extattr_autostart(mp, td); 1471 #endif 1472 } 1473 #endif 1474 1475 return (error); 1476 } 1477 1478 /* 1479 * Flush out all the files in a filesystem. 1480 */ 1481 int 1482 ffs_flushfiles(struct mount *mp, int flags, struct thread *td) 1483 { 1484 struct ufsmount *ump; 1485 int qerror, error; 1486 1487 ump = VFSTOUFS(mp); 1488 qerror = 0; 1489 #ifdef QUOTA 1490 if (mp->mnt_flag & MNT_QUOTA) { 1491 int i; 1492 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1493 if (error) 1494 return (error); 1495 for (i = 0; i < MAXQUOTAS; i++) { 1496 error = quotaoff(td, mp, i); 1497 if (error != 0) { 1498 if ((flags & EARLYFLUSH) == 0) 1499 return (error); 1500 else 1501 qerror = error; 1502 } 1503 } 1504 1505 /* 1506 * Here we fall through to vflush again to ensure that 1507 * we have gotten rid of all the system vnodes, unless 1508 * quotas must not be closed. 1509 */ 1510 } 1511 #endif 1512 /* devvp is not locked there */ 1513 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1514 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1515 return (error); 1516 ffs_snapshot_unmount(mp); 1517 flags |= FORCECLOSE; 1518 /* 1519 * Here we fall through to vflush again to ensure 1520 * that we have gotten rid of all the system vnodes. 1521 */ 1522 } 1523 1524 /* 1525 * Do not close system files if quotas were not closed, to be 1526 * able to sync the remaining dquots. The freeblks softupdate 1527 * workitems might hold a reference on a dquot, preventing 1528 * quotaoff() from completing. Next round of 1529 * softdep_flushworklist() iteration should process the 1530 * blockers, allowing the next run of quotaoff() to finally 1531 * flush held dquots. 1532 * 1533 * Otherwise, flush all the files. 1534 */ 1535 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1536 return (error); 1537 1538 /* 1539 * If this is a forcible unmount and there were any files that 1540 * were unlinked but still open, then vflush() will have 1541 * truncated and freed those files, which might have started 1542 * some trim work. Wait here for any trims to complete 1543 * and process the blkfrees which follow the trims. 1544 * This may create more dirty devvp buffers and softdep deps. 1545 */ 1546 if (ump->um_trim_tq != NULL) { 1547 while (ump->um_trim_inflight != 0) 1548 pause("ufsutr", hz); 1549 taskqueue_drain_all(ump->um_trim_tq); 1550 } 1551 1552 /* 1553 * Flush filesystem metadata. 1554 */ 1555 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1556 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1557 VOP_UNLOCK(ump->um_devvp); 1558 return (error); 1559 } 1560 1561 /* 1562 * Get filesystem statistics. 1563 */ 1564 static int 1565 ffs_statfs(struct mount *mp, struct statfs *sbp) 1566 { 1567 struct ufsmount *ump; 1568 struct fs *fs; 1569 1570 ump = VFSTOUFS(mp); 1571 fs = ump->um_fs; 1572 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1573 panic("ffs_statfs"); 1574 sbp->f_version = STATFS_VERSION; 1575 sbp->f_bsize = fs->fs_fsize; 1576 sbp->f_iosize = fs->fs_bsize; 1577 sbp->f_blocks = fs->fs_dsize; 1578 UFS_LOCK(ump); 1579 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1580 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1581 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1582 dbtofsb(fs, fs->fs_pendingblocks); 1583 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; 1584 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1585 UFS_UNLOCK(ump); 1586 sbp->f_namemax = UFS_MAXNAMLEN; 1587 return (0); 1588 } 1589 1590 static bool 1591 sync_doupdate(struct inode *ip) 1592 { 1593 1594 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1595 IN_UPDATE)) != 0); 1596 } 1597 1598 static int 1599 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused) 1600 { 1601 struct inode *ip; 1602 1603 /* 1604 * Flags are safe to access because ->v_data invalidation 1605 * is held off by listmtx. 1606 */ 1607 if (vp->v_type == VNON) 1608 return (false); 1609 ip = VTOI(vp); 1610 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) 1611 return (false); 1612 return (true); 1613 } 1614 1615 /* 1616 * For a lazy sync, we only care about access times, quotas and the 1617 * superblock. Other filesystem changes are already converted to 1618 * cylinder group blocks or inode blocks updates and are written to 1619 * disk by syncer. 1620 */ 1621 static int 1622 ffs_sync_lazy(struct mount *mp) 1623 { 1624 struct vnode *mvp, *vp; 1625 struct inode *ip; 1626 int allerror, error; 1627 1628 allerror = 0; 1629 if ((mp->mnt_flag & MNT_NOATIME) != 0) { 1630 #ifdef QUOTA 1631 qsync(mp); 1632 #endif 1633 goto sbupdate; 1634 } 1635 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) { 1636 if (vp->v_type == VNON) { 1637 VI_UNLOCK(vp); 1638 continue; 1639 } 1640 ip = VTOI(vp); 1641 1642 /* 1643 * The IN_ACCESS flag is converted to IN_MODIFIED by 1644 * ufs_close() and ufs_getattr() by the calls to 1645 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1646 * Test also all the other timestamp flags too, to pick up 1647 * any other cases that could be missed. 1648 */ 1649 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1650 VI_UNLOCK(vp); 1651 continue; 1652 } 1653 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0) 1654 continue; 1655 #ifdef QUOTA 1656 qsyncvp(vp); 1657 #endif 1658 if (sync_doupdate(ip)) 1659 error = ffs_update(vp, 0); 1660 if (error != 0) 1661 allerror = error; 1662 vput(vp); 1663 } 1664 sbupdate: 1665 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1666 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1667 allerror = error; 1668 return (allerror); 1669 } 1670 1671 /* 1672 * Go through the disk queues to initiate sandbagged IO; 1673 * go through the inodes to write those that have been modified; 1674 * initiate the writing of the super block if it has been modified. 1675 * 1676 * Note: we are always called with the filesystem marked busy using 1677 * vfs_busy(). 1678 */ 1679 static int 1680 ffs_sync(struct mount *mp, int waitfor) 1681 { 1682 struct vnode *mvp, *vp, *devvp; 1683 struct thread *td; 1684 struct inode *ip; 1685 struct ufsmount *ump = VFSTOUFS(mp); 1686 struct fs *fs; 1687 int error, count, lockreq, allerror = 0; 1688 int suspend; 1689 int suspended; 1690 int secondary_writes; 1691 int secondary_accwrites; 1692 int softdep_deps; 1693 int softdep_accdeps; 1694 struct bufobj *bo; 1695 1696 suspend = 0; 1697 suspended = 0; 1698 td = curthread; 1699 fs = ump->um_fs; 1700 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) 1701 panic("%s: ffs_sync: modification on read-only filesystem", 1702 fs->fs_fsmnt); 1703 if (waitfor == MNT_LAZY) { 1704 if (!rebooting) 1705 return (ffs_sync_lazy(mp)); 1706 waitfor = MNT_NOWAIT; 1707 } 1708 1709 /* 1710 * Write back each (modified) inode. 1711 */ 1712 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1713 if (waitfor == MNT_SUSPEND) { 1714 suspend = 1; 1715 waitfor = MNT_WAIT; 1716 } 1717 if (waitfor == MNT_WAIT) 1718 lockreq = LK_EXCLUSIVE; 1719 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1720 loop: 1721 /* Grab snapshot of secondary write counts */ 1722 MNT_ILOCK(mp); 1723 secondary_writes = mp->mnt_secondary_writes; 1724 secondary_accwrites = mp->mnt_secondary_accwrites; 1725 MNT_IUNLOCK(mp); 1726 1727 /* Grab snapshot of softdep dependency counts */ 1728 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1729 1730 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1731 /* 1732 * Depend on the vnode interlock to keep things stable enough 1733 * for a quick test. Since there might be hundreds of 1734 * thousands of vnodes, we cannot afford even a subroutine 1735 * call unless there's a good chance that we have work to do. 1736 */ 1737 if (vp->v_type == VNON) { 1738 VI_UNLOCK(vp); 1739 continue; 1740 } 1741 ip = VTOI(vp); 1742 if ((ip->i_flag & 1743 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1744 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1745 VI_UNLOCK(vp); 1746 continue; 1747 } 1748 if ((error = vget(vp, lockreq)) != 0) { 1749 if (error == ENOENT || error == ENOLCK) { 1750 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1751 goto loop; 1752 } 1753 continue; 1754 } 1755 #ifdef QUOTA 1756 qsyncvp(vp); 1757 #endif 1758 for (;;) { 1759 error = ffs_syncvnode(vp, waitfor, 0); 1760 if (error == ERELOOKUP) 1761 continue; 1762 if (error != 0) 1763 allerror = error; 1764 break; 1765 } 1766 vput(vp); 1767 } 1768 /* 1769 * Force stale filesystem control information to be flushed. 1770 */ 1771 if (waitfor == MNT_WAIT || rebooting) { 1772 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1773 allerror = error; 1774 if (ffs_fsfail_cleanup(ump, allerror)) 1775 allerror = 0; 1776 /* Flushed work items may create new vnodes to clean */ 1777 if (allerror == 0 && count) 1778 goto loop; 1779 } 1780 1781 devvp = ump->um_devvp; 1782 bo = &devvp->v_bufobj; 1783 BO_LOCK(bo); 1784 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1785 BO_UNLOCK(bo); 1786 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1787 error = VOP_FSYNC(devvp, waitfor, td); 1788 VOP_UNLOCK(devvp); 1789 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1790 error = ffs_sbupdate(ump, waitfor, 0); 1791 if (error != 0) 1792 allerror = error; 1793 if (ffs_fsfail_cleanup(ump, allerror)) 1794 allerror = 0; 1795 if (allerror == 0 && waitfor == MNT_WAIT) 1796 goto loop; 1797 } else if (suspend != 0) { 1798 if (softdep_check_suspend(mp, 1799 devvp, 1800 softdep_deps, 1801 softdep_accdeps, 1802 secondary_writes, 1803 secondary_accwrites) != 0) { 1804 MNT_IUNLOCK(mp); 1805 goto loop; /* More work needed */ 1806 } 1807 mtx_assert(MNT_MTX(mp), MA_OWNED); 1808 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1809 MNT_IUNLOCK(mp); 1810 suspended = 1; 1811 } else 1812 BO_UNLOCK(bo); 1813 /* 1814 * Write back modified superblock. 1815 */ 1816 if (fs->fs_fmod != 0 && 1817 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1818 allerror = error; 1819 if (ffs_fsfail_cleanup(ump, allerror)) 1820 allerror = 0; 1821 return (allerror); 1822 } 1823 1824 int 1825 ffs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) 1826 { 1827 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1828 } 1829 1830 int 1831 ffs_vgetf(struct mount *mp, 1832 ino_t ino, 1833 int flags, 1834 struct vnode **vpp, 1835 int ffs_flags) 1836 { 1837 struct fs *fs; 1838 struct inode *ip; 1839 struct ufsmount *ump; 1840 struct buf *bp; 1841 struct vnode *vp; 1842 daddr_t dbn; 1843 int error; 1844 1845 MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 || 1846 (flags & LK_EXCLUSIVE) != 0); 1847 1848 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1849 if (error != 0) 1850 return (error); 1851 if (*vpp != NULL) { 1852 if ((ffs_flags & FFSV_REPLACE) == 0 || 1853 ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 || 1854 !VN_IS_DOOMED(*vpp))) 1855 return (0); 1856 vgone(*vpp); 1857 vput(*vpp); 1858 } 1859 1860 /* 1861 * We must promote to an exclusive lock for vnode creation. This 1862 * can happen if lookup is passed LOCKSHARED. 1863 */ 1864 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1865 flags &= ~LK_TYPE_MASK; 1866 flags |= LK_EXCLUSIVE; 1867 } 1868 1869 /* 1870 * We do not lock vnode creation as it is believed to be too 1871 * expensive for such rare case as simultaneous creation of vnode 1872 * for same ino by different processes. We just allow them to race 1873 * and check later to decide who wins. Let the race begin! 1874 */ 1875 1876 ump = VFSTOUFS(mp); 1877 fs = ump->um_fs; 1878 ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO); 1879 1880 /* Allocate a new vnode/inode. */ 1881 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 1882 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1883 if (error) { 1884 *vpp = NULL; 1885 uma_zfree_smr(uma_inode, ip); 1886 return (error); 1887 } 1888 /* 1889 * FFS supports recursive locking. 1890 */ 1891 lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); 1892 VN_LOCK_AREC(vp); 1893 vp->v_data = ip; 1894 vp->v_bufobj.bo_bsize = fs->fs_bsize; 1895 ip->i_vnode = vp; 1896 ip->i_ump = ump; 1897 ip->i_number = ino; 1898 ip->i_ea_refs = 0; 1899 ip->i_nextclustercg = -1; 1900 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; 1901 ip->i_mode = 0; /* ensure error cases below throw away vnode */ 1902 cluster_init_vn(&ip->i_clusterw); 1903 #ifdef DIAGNOSTIC 1904 ufs_init_trackers(ip); 1905 #endif 1906 #ifdef QUOTA 1907 { 1908 int i; 1909 for (i = 0; i < MAXQUOTAS; i++) 1910 ip->i_dquot[i] = NODQUOT; 1911 } 1912 #endif 1913 1914 if (ffs_flags & FFSV_FORCEINSMQ) 1915 vp->v_vflag |= VV_FORCEINSMQ; 1916 error = insmntque(vp, mp); 1917 if (error != 0) { 1918 uma_zfree_smr(uma_inode, ip); 1919 *vpp = NULL; 1920 return (error); 1921 } 1922 vp->v_vflag &= ~VV_FORCEINSMQ; 1923 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 1924 if (error != 0) 1925 return (error); 1926 if (*vpp != NULL) { 1927 /* 1928 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set) 1929 * operate on empty inode, which must not be found by 1930 * other threads until fully filled. Vnode for empty 1931 * inode must be not re-inserted on the hash by other 1932 * thread, after removal by us at the beginning. 1933 */ 1934 MPASS((ffs_flags & FFSV_REPLACE) == 0); 1935 return (0); 1936 } 1937 if (I_IS_UFS1(ip)) 1938 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 1939 else 1940 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1941 1942 if ((ffs_flags & FFSV_NEWINODE) != 0) { 1943 /* New inode, just zero out its contents. */ 1944 if (I_IS_UFS1(ip)) 1945 memset(ip->i_din1, 0, sizeof(struct ufs1_dinode)); 1946 else 1947 memset(ip->i_din2, 0, sizeof(struct ufs2_dinode)); 1948 } else { 1949 /* Read the disk contents for the inode, copy into the inode. */ 1950 dbn = fsbtodb(fs, ino_to_fsba(fs, ino)); 1951 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, 1952 (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp); 1953 if (error != 0) { 1954 /* 1955 * The inode does not contain anything useful, so it 1956 * would be misleading to leave it on its hash chain. 1957 * With mode still zero, it will be unlinked and 1958 * returned to the free list by vput(). 1959 */ 1960 vgone(vp); 1961 vput(vp); 1962 *vpp = NULL; 1963 return (error); 1964 } 1965 if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) { 1966 bqrelse(bp); 1967 vgone(vp); 1968 vput(vp); 1969 *vpp = NULL; 1970 return (error); 1971 } 1972 bqrelse(bp); 1973 } 1974 if (DOINGSOFTDEP(vp) && (!fs->fs_ronly || 1975 (ffs_flags & FFSV_FORCEINODEDEP) != 0)) 1976 softdep_load_inodeblock(ip); 1977 else 1978 ip->i_effnlink = ip->i_nlink; 1979 1980 /* 1981 * Initialize the vnode from the inode, check for aliases. 1982 * Note that the underlying vnode may have changed. 1983 */ 1984 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, 1985 &vp); 1986 if (error) { 1987 vgone(vp); 1988 vput(vp); 1989 *vpp = NULL; 1990 return (error); 1991 } 1992 1993 /* 1994 * Finish inode initialization. 1995 */ 1996 if (vp->v_type != VFIFO) { 1997 /* FFS supports shared locking for all files except fifos. */ 1998 VN_LOCK_ASHARE(vp); 1999 } 2000 2001 /* 2002 * Set up a generation number for this inode if it does not 2003 * already have one. This should only happen on old filesystems. 2004 */ 2005 if (ip->i_gen == 0) { 2006 while (ip->i_gen == 0) 2007 ip->i_gen = arc4random(); 2008 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 2009 UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 2010 DIP_SET(ip, i_gen, ip->i_gen); 2011 } 2012 } 2013 #ifdef MAC 2014 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 2015 /* 2016 * If this vnode is already allocated, and we're running 2017 * multi-label, attempt to perform a label association 2018 * from the extended attributes on the inode. 2019 */ 2020 error = mac_vnode_associate_extattr(mp, vp); 2021 if (error) { 2022 /* ufs_inactive will release ip->i_devvp ref. */ 2023 vgone(vp); 2024 vput(vp); 2025 *vpp = NULL; 2026 return (error); 2027 } 2028 } 2029 #endif 2030 2031 *vpp = vp; 2032 return (0); 2033 } 2034 2035 /* 2036 * File handle to vnode 2037 * 2038 * Have to be really careful about stale file handles: 2039 * - check that the inode number is valid 2040 * - for UFS2 check that the inode number is initialized 2041 * - call ffs_vget() to get the locked inode 2042 * - check for an unallocated inode (i_mode == 0) 2043 * - check that the given client host has export rights and return 2044 * those rights via. exflagsp and credanonp 2045 */ 2046 static int 2047 ffs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp) 2048 { 2049 struct ufid *ufhp; 2050 2051 ufhp = (struct ufid *)fhp; 2052 return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags, 2053 vpp, 0)); 2054 } 2055 2056 int 2057 ffs_inotovp(struct mount *mp, 2058 ino_t ino, 2059 u_int64_t gen, 2060 int lflags, 2061 struct vnode **vpp, 2062 int ffs_flags) 2063 { 2064 struct ufsmount *ump; 2065 struct vnode *nvp; 2066 struct inode *ip; 2067 struct fs *fs; 2068 struct cg *cgp; 2069 struct buf *bp; 2070 u_int cg; 2071 int error; 2072 2073 ump = VFSTOUFS(mp); 2074 fs = ump->um_fs; 2075 *vpp = NULL; 2076 2077 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) 2078 return (ESTALE); 2079 2080 /* 2081 * Need to check if inode is initialized because UFS2 does lazy 2082 * initialization and nfs_fhtovp can offer arbitrary inode numbers. 2083 */ 2084 if (fs->fs_magic == FS_UFS2_MAGIC) { 2085 cg = ino_to_cg(fs, ino); 2086 error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp); 2087 if (error != 0) 2088 return (error); 2089 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { 2090 brelse(bp); 2091 return (ESTALE); 2092 } 2093 brelse(bp); 2094 } 2095 2096 error = ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags); 2097 if (error != 0) 2098 return (error); 2099 2100 ip = VTOI(nvp); 2101 if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) { 2102 if (ip->i_mode == 0) 2103 vgone(nvp); 2104 vput(nvp); 2105 return (ESTALE); 2106 } 2107 2108 vnode_create_vobject(nvp, DIP(ip, i_size), curthread); 2109 *vpp = nvp; 2110 return (0); 2111 } 2112 2113 /* 2114 * Initialize the filesystem. 2115 */ 2116 static int 2117 ffs_init(struct vfsconf *vfsp) 2118 { 2119 2120 ffs_susp_initialize(); 2121 softdep_initialize(); 2122 return (ufs_init(vfsp)); 2123 } 2124 2125 /* 2126 * Undo the work of ffs_init(). 2127 */ 2128 static int 2129 ffs_uninit(struct vfsconf *vfsp) 2130 { 2131 int ret; 2132 2133 ret = ufs_uninit(vfsp); 2134 softdep_uninitialize(); 2135 ffs_susp_uninitialize(); 2136 taskqueue_drain_all(taskqueue_thread); 2137 return (ret); 2138 } 2139 2140 /* 2141 * Structure used to pass information from ffs_sbupdate to its 2142 * helper routine ffs_use_bwrite. 2143 */ 2144 struct devfd { 2145 struct ufsmount *ump; 2146 struct buf *sbbp; 2147 int waitfor; 2148 int suspended; 2149 int error; 2150 }; 2151 2152 /* 2153 * Write a superblock and associated information back to disk. 2154 */ 2155 int 2156 ffs_sbupdate(struct ufsmount *ump, int waitfor, int suspended) 2157 { 2158 struct fs *fs; 2159 struct buf *sbbp; 2160 struct devfd devfd; 2161 2162 fs = ump->um_fs; 2163 if (fs->fs_ronly == 1 && 2164 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 2165 (MNT_RDONLY | MNT_UPDATE)) 2166 panic("ffs_sbupdate: write read-only filesystem"); 2167 /* 2168 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 2169 */ 2170 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 2171 (int)fs->fs_sbsize, 0, 0, 0); 2172 /* 2173 * Initialize info needed for write function. 2174 */ 2175 devfd.ump = ump; 2176 devfd.sbbp = sbbp; 2177 devfd.waitfor = waitfor; 2178 devfd.suspended = suspended; 2179 devfd.error = 0; 2180 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); 2181 } 2182 2183 /* 2184 * Write function for use by filesystem-layer routines. 2185 */ 2186 static int 2187 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) 2188 { 2189 struct devfd *devfdp; 2190 struct ufsmount *ump; 2191 struct buf *bp; 2192 struct fs *fs; 2193 int error; 2194 2195 devfdp = devfd; 2196 ump = devfdp->ump; 2197 fs = ump->um_fs; 2198 /* 2199 * Writing the superblock summary information. 2200 */ 2201 if (loc != fs->fs_sblockloc) { 2202 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); 2203 bcopy(buf, bp->b_data, (u_int)size); 2204 if (devfdp->suspended) 2205 bp->b_flags |= B_VALIDSUSPWRT; 2206 if (devfdp->waitfor != MNT_WAIT) 2207 bawrite(bp); 2208 else if ((error = bwrite(bp)) != 0) 2209 devfdp->error = error; 2210 return (0); 2211 } 2212 /* 2213 * Writing the superblock itself. We need to do special checks for it. 2214 */ 2215 bp = devfdp->sbbp; 2216 if (ffs_fsfail_cleanup(ump, devfdp->error)) 2217 devfdp->error = 0; 2218 if (devfdp->error != 0) { 2219 brelse(bp); 2220 return (devfdp->error); 2221 } 2222 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 2223 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2224 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2225 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 2226 fs->fs_sblockloc = SBLOCK_UFS1; 2227 } 2228 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 2229 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2230 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2231 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 2232 fs->fs_sblockloc = SBLOCK_UFS2; 2233 } 2234 if (MOUNTEDSOFTDEP(ump->um_mountp)) 2235 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 2236 UFS_LOCK(ump); 2237 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 2238 UFS_UNLOCK(ump); 2239 fs = (struct fs *)bp->b_data; 2240 fs->fs_fmod = 0; 2241 ffs_oldfscompat_write(fs, ump); 2242 fs->fs_si = NULL; 2243 /* Recalculate the superblock hash */ 2244 fs->fs_ckhash = ffs_calc_sbhash(fs); 2245 if (devfdp->suspended) 2246 bp->b_flags |= B_VALIDSUSPWRT; 2247 if (devfdp->waitfor != MNT_WAIT) 2248 bawrite(bp); 2249 else if ((error = bwrite(bp)) != 0) 2250 devfdp->error = error; 2251 return (devfdp->error); 2252 } 2253 2254 static int 2255 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 2256 int attrnamespace, const char *attrname) 2257 { 2258 2259 #ifdef UFS_EXTATTR 2260 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 2261 attrname)); 2262 #else 2263 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 2264 attrname)); 2265 #endif 2266 } 2267 2268 static void 2269 ffs_ifree(struct ufsmount *ump, struct inode *ip) 2270 { 2271 2272 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 2273 uma_zfree(uma_ufs1, ip->i_din1); 2274 else if (ip->i_din2 != NULL) 2275 uma_zfree(uma_ufs2, ip->i_din2); 2276 uma_zfree_smr(uma_inode, ip); 2277 } 2278 2279 static int dobkgrdwrite = 1; 2280 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2281 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2282 2283 /* 2284 * Complete a background write started from bwrite. 2285 */ 2286 static void 2287 ffs_backgroundwritedone(struct buf *bp) 2288 { 2289 struct bufobj *bufobj; 2290 struct buf *origbp; 2291 2292 #ifdef SOFTUPDATES 2293 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0) 2294 softdep_handle_error(bp); 2295 #endif 2296 2297 /* 2298 * Find the original buffer that we are writing. 2299 */ 2300 bufobj = bp->b_bufobj; 2301 BO_LOCK(bufobj); 2302 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2303 panic("backgroundwritedone: lost buffer"); 2304 2305 /* 2306 * We should mark the cylinder group buffer origbp as 2307 * dirty, to not lose the failed write. 2308 */ 2309 if ((bp->b_ioflags & BIO_ERROR) != 0) 2310 origbp->b_vflags |= BV_BKGRDERR; 2311 BO_UNLOCK(bufobj); 2312 /* 2313 * Process dependencies then return any unfinished ones. 2314 */ 2315 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2316 buf_complete(bp); 2317 #ifdef SOFTUPDATES 2318 if (!LIST_EMPTY(&bp->b_dep)) 2319 softdep_move_dependencies(bp, origbp); 2320 #endif 2321 /* 2322 * This buffer is marked B_NOCACHE so when it is released 2323 * by biodone it will be tossed. Clear B_IOSTARTED in case of error. 2324 */ 2325 bp->b_flags |= B_NOCACHE; 2326 bp->b_flags &= ~(B_CACHE | B_IOSTARTED); 2327 pbrelvp(bp); 2328 2329 /* 2330 * Prevent brelse() from trying to keep and re-dirtying bp on 2331 * errors. It causes b_bufobj dereference in 2332 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2333 * pbrelvp() above. 2334 */ 2335 if ((bp->b_ioflags & BIO_ERROR) != 0) 2336 bp->b_flags |= B_INVAL; 2337 bufdone(bp); 2338 BO_LOCK(bufobj); 2339 /* 2340 * Clear the BV_BKGRDINPROG flag in the original buffer 2341 * and awaken it if it is waiting for the write to complete. 2342 * If BV_BKGRDINPROG is not set in the original buffer it must 2343 * have been released and re-instantiated - which is not legal. 2344 */ 2345 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2346 ("backgroundwritedone: lost buffer2")); 2347 origbp->b_vflags &= ~BV_BKGRDINPROG; 2348 if (origbp->b_vflags & BV_BKGRDWAIT) { 2349 origbp->b_vflags &= ~BV_BKGRDWAIT; 2350 wakeup(&origbp->b_xflags); 2351 } 2352 BO_UNLOCK(bufobj); 2353 } 2354 2355 /* 2356 * Write, release buffer on completion. (Done by iodone 2357 * if async). Do not bother writing anything if the buffer 2358 * is invalid. 2359 * 2360 * Note that we set B_CACHE here, indicating that buffer is 2361 * fully valid and thus cacheable. This is true even of NFS 2362 * now so we set it generally. This could be set either here 2363 * or in biodone() since the I/O is synchronous. We put it 2364 * here. 2365 */ 2366 static int 2367 ffs_bufwrite(struct buf *bp) 2368 { 2369 struct buf *newbp; 2370 struct cg *cgp; 2371 2372 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2373 if (bp->b_flags & B_INVAL) { 2374 brelse(bp); 2375 return (0); 2376 } 2377 2378 if (!BUF_ISLOCKED(bp)) 2379 panic("bufwrite: buffer is not busy???"); 2380 /* 2381 * If a background write is already in progress, delay 2382 * writing this block if it is asynchronous. Otherwise 2383 * wait for the background write to complete. 2384 */ 2385 BO_LOCK(bp->b_bufobj); 2386 if (bp->b_vflags & BV_BKGRDINPROG) { 2387 if (bp->b_flags & B_ASYNC) { 2388 BO_UNLOCK(bp->b_bufobj); 2389 bdwrite(bp); 2390 return (0); 2391 } 2392 bp->b_vflags |= BV_BKGRDWAIT; 2393 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2394 "bwrbg", 0); 2395 if (bp->b_vflags & BV_BKGRDINPROG) 2396 panic("bufwrite: still writing"); 2397 } 2398 bp->b_vflags &= ~BV_BKGRDERR; 2399 BO_UNLOCK(bp->b_bufobj); 2400 2401 /* 2402 * If this buffer is marked for background writing and we 2403 * do not have to wait for it, make a copy and write the 2404 * copy so as to leave this buffer ready for further use. 2405 * 2406 * This optimization eats a lot of memory. If we have a page 2407 * or buffer shortfall we can't do it. 2408 */ 2409 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2410 (bp->b_flags & B_ASYNC) && 2411 !vm_page_count_severe() && 2412 !buf_dirty_count_severe()) { 2413 KASSERT(bp->b_iodone == NULL, 2414 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2415 2416 /* get a new block */ 2417 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2418 if (newbp == NULL) 2419 goto normal_write; 2420 2421 KASSERT(buf_mapped(bp), ("Unmapped cg")); 2422 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2423 BO_LOCK(bp->b_bufobj); 2424 bp->b_vflags |= BV_BKGRDINPROG; 2425 BO_UNLOCK(bp->b_bufobj); 2426 newbp->b_xflags |= 2427 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; 2428 newbp->b_lblkno = bp->b_lblkno; 2429 newbp->b_blkno = bp->b_blkno; 2430 newbp->b_offset = bp->b_offset; 2431 newbp->b_iodone = ffs_backgroundwritedone; 2432 newbp->b_flags |= B_ASYNC; 2433 newbp->b_flags &= ~B_INVAL; 2434 pbgetvp(bp->b_vp, newbp); 2435 2436 #ifdef SOFTUPDATES 2437 /* 2438 * Move over the dependencies. If there are rollbacks, 2439 * leave the parent buffer dirtied as it will need to 2440 * be written again. 2441 */ 2442 if (LIST_EMPTY(&bp->b_dep) || 2443 softdep_move_dependencies(bp, newbp) == 0) 2444 bundirty(bp); 2445 #else 2446 bundirty(bp); 2447 #endif 2448 2449 /* 2450 * Initiate write on the copy, release the original. The 2451 * BKGRDINPROG flag prevents it from going away until 2452 * the background write completes. We have to recalculate 2453 * its check hash in case the buffer gets freed and then 2454 * reconstituted from the buffer cache during a later read. 2455 */ 2456 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2457 cgp = (struct cg *)bp->b_data; 2458 cgp->cg_ckhash = 0; 2459 cgp->cg_ckhash = 2460 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2461 } 2462 bqrelse(bp); 2463 bp = newbp; 2464 } else 2465 /* Mark the buffer clean */ 2466 bundirty(bp); 2467 2468 /* Let the normal bufwrite do the rest for us */ 2469 normal_write: 2470 /* 2471 * If we are writing a cylinder group, update its time. 2472 */ 2473 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2474 cgp = (struct cg *)bp->b_data; 2475 cgp->cg_old_time = cgp->cg_time = time_second; 2476 } 2477 return (bufwrite(bp)); 2478 } 2479 2480 static void 2481 ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2482 { 2483 struct vnode *vp; 2484 struct buf *tbp; 2485 int error, nocopy; 2486 2487 /* 2488 * This is the bufobj strategy for the private VCHR vnodes 2489 * used by FFS to access the underlying storage device. 2490 * We override the default bufobj strategy and thus bypass 2491 * VOP_STRATEGY() for these vnodes. 2492 */ 2493 vp = bo2vnode(bo); 2494 KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR || 2495 bp->b_vp->v_rdev == NULL || 2496 bp->b_vp->v_rdev->si_mountpt == NULL || 2497 VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL || 2498 vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp, 2499 ("ffs_geom_strategy() with wrong vp")); 2500 if (bp->b_iocmd == BIO_WRITE) { 2501 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2502 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2503 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2504 panic("ffs_geom_strategy: bad I/O"); 2505 nocopy = bp->b_flags & B_NOCOPY; 2506 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2507 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2508 vp->v_rdev->si_snapdata != NULL) { 2509 if ((bp->b_flags & B_CLUSTER) != 0) { 2510 runningbufwakeup(bp); 2511 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2512 b_cluster.cluster_entry) { 2513 error = ffs_copyonwrite(vp, tbp); 2514 if (error != 0 && 2515 error != EOPNOTSUPP) { 2516 bp->b_error = error; 2517 bp->b_ioflags |= BIO_ERROR; 2518 bp->b_flags &= ~B_BARRIER; 2519 bufdone(bp); 2520 return; 2521 } 2522 } 2523 bp->b_runningbufspace = bp->b_bufsize; 2524 atomic_add_long(&runningbufspace, 2525 bp->b_runningbufspace); 2526 } else { 2527 error = ffs_copyonwrite(vp, bp); 2528 if (error != 0 && error != EOPNOTSUPP) { 2529 bp->b_error = error; 2530 bp->b_ioflags |= BIO_ERROR; 2531 bp->b_flags &= ~B_BARRIER; 2532 bufdone(bp); 2533 return; 2534 } 2535 } 2536 } 2537 #ifdef SOFTUPDATES 2538 if ((bp->b_flags & B_CLUSTER) != 0) { 2539 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2540 b_cluster.cluster_entry) { 2541 if (!LIST_EMPTY(&tbp->b_dep)) 2542 buf_start(tbp); 2543 } 2544 } else { 2545 if (!LIST_EMPTY(&bp->b_dep)) 2546 buf_start(bp); 2547 } 2548 2549 #endif 2550 /* 2551 * Check for metadata that needs check-hashes and update them. 2552 */ 2553 switch (bp->b_xflags & BX_FSPRIV) { 2554 case BX_CYLGRP: 2555 ((struct cg *)bp->b_data)->cg_ckhash = 0; 2556 ((struct cg *)bp->b_data)->cg_ckhash = 2557 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2558 break; 2559 2560 case BX_SUPERBLOCK: 2561 case BX_INODE: 2562 case BX_INDIR: 2563 case BX_DIR: 2564 printf("Check-hash write is unimplemented!!!\n"); 2565 break; 2566 2567 case 0: 2568 break; 2569 2570 default: 2571 printf("multiple buffer types 0x%b\n", 2572 (u_int)(bp->b_xflags & BX_FSPRIV), 2573 PRINT_UFS_BUF_XFLAGS); 2574 break; 2575 } 2576 } 2577 if (bp->b_iocmd != BIO_READ && ffs_enxio_enable) 2578 bp->b_xflags |= BX_CVTENXIO; 2579 g_vfs_strategy(bo, bp); 2580 } 2581 2582 int 2583 ffs_own_mount(const struct mount *mp) 2584 { 2585 2586 if (mp->mnt_op == &ufs_vfsops) 2587 return (1); 2588 return (0); 2589 } 2590 2591 #ifdef DDB 2592 #ifdef SOFTUPDATES 2593 2594 /* defined in ffs_softdep.c */ 2595 extern void db_print_ffs(struct ufsmount *ump); 2596 2597 DB_SHOW_COMMAND(ffs, db_show_ffs) 2598 { 2599 struct mount *mp; 2600 struct ufsmount *ump; 2601 2602 if (have_addr) { 2603 ump = VFSTOUFS((struct mount *)addr); 2604 db_print_ffs(ump); 2605 return; 2606 } 2607 2608 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2609 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2610 db_print_ffs(VFSTOUFS(mp)); 2611 } 2612 } 2613 2614 #endif /* SOFTUPDATES */ 2615 #endif /* DDB */ 2616