1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1991, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_quota.h" 38 #include "opt_ufs.h" 39 #include "opt_ffs.h" 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/gsb_crc32.h> 44 #include <sys/systm.h> 45 #include <sys/namei.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/taskqueue.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/vnode.h> 52 #include <sys/mount.h> 53 #include <sys/bio.h> 54 #include <sys/buf.h> 55 #include <sys/conf.h> 56 #include <sys/fcntl.h> 57 #include <sys/ioccom.h> 58 #include <sys/malloc.h> 59 #include <sys/mutex.h> 60 #include <sys/rwlock.h> 61 #include <sys/sysctl.h> 62 #include <sys/vmmeter.h> 63 64 #include <security/mac/mac_framework.h> 65 66 #include <ufs/ufs/dir.h> 67 #include <ufs/ufs/extattr.h> 68 #include <ufs/ufs/gjournal.h> 69 #include <ufs/ufs/quota.h> 70 #include <ufs/ufs/ufsmount.h> 71 #include <ufs/ufs/inode.h> 72 #include <ufs/ufs/ufs_extern.h> 73 74 #include <ufs/ffs/fs.h> 75 #include <ufs/ffs/ffs_extern.h> 76 77 #include <vm/vm.h> 78 #include <vm/uma.h> 79 #include <vm/vm_page.h> 80 81 #include <geom/geom.h> 82 #include <geom/geom_vfs.h> 83 84 #include <ddb/ddb.h> 85 86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 87 VFS_SMR_DECLARE; 88 89 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 90 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 91 ufs2_daddr_t); 92 static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 93 static int ffs_sync_lazy(struct mount *mp); 94 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); 95 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); 96 97 static vfs_init_t ffs_init; 98 static vfs_uninit_t ffs_uninit; 99 static vfs_extattrctl_t ffs_extattrctl; 100 static vfs_cmount_t ffs_cmount; 101 static vfs_unmount_t ffs_unmount; 102 static vfs_mount_t ffs_mount; 103 static vfs_statfs_t ffs_statfs; 104 static vfs_fhtovp_t ffs_fhtovp; 105 static vfs_sync_t ffs_sync; 106 107 static struct vfsops ufs_vfsops = { 108 .vfs_extattrctl = ffs_extattrctl, 109 .vfs_fhtovp = ffs_fhtovp, 110 .vfs_init = ffs_init, 111 .vfs_mount = ffs_mount, 112 .vfs_cmount = ffs_cmount, 113 .vfs_quotactl = ufs_quotactl, 114 .vfs_root = vfs_cache_root, 115 .vfs_cachedroot = ufs_root, 116 .vfs_statfs = ffs_statfs, 117 .vfs_sync = ffs_sync, 118 .vfs_uninit = ffs_uninit, 119 .vfs_unmount = ffs_unmount, 120 .vfs_vget = ffs_vget, 121 .vfs_susp_clean = process_deferred_inactive, 122 }; 123 124 VFS_SET(ufs_vfsops, ufs, 0); 125 MODULE_VERSION(ufs, 1); 126 127 static b_strategy_t ffs_geom_strategy; 128 static b_write_t ffs_bufwrite; 129 130 static struct buf_ops ffs_ops = { 131 .bop_name = "FFS", 132 .bop_write = ffs_bufwrite, 133 .bop_strategy = ffs_geom_strategy, 134 .bop_sync = bufsync, 135 #ifdef NO_FFS_SNAPSHOT 136 .bop_bdflush = bufbdflush, 137 #else 138 .bop_bdflush = ffs_bdflush, 139 #endif 140 }; 141 142 /* 143 * Note that userquota and groupquota options are not currently used 144 * by UFS/FFS code and generally mount(8) does not pass those options 145 * from userland, but they can be passed by loader(8) via 146 * vfs.root.mountfrom.options. 147 */ 148 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 149 "noclusterw", "noexec", "export", "force", "from", "groupquota", 150 "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir", 151 "nosymfollow", "sync", "union", "userquota", "untrusted", NULL }; 152 153 static int ffs_enxio_enable = 1; 154 SYSCTL_DECL(_vfs_ffs); 155 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN, 156 &ffs_enxio_enable, 0, 157 "enable mapping of other disk I/O errors to ENXIO"); 158 159 /* 160 * Return buffer with the contents of block "offset" from the beginning of 161 * directory "ip". If "res" is non-zero, fill it in with a pointer to the 162 * remaining space in the directory. 163 */ 164 static int 165 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) 166 { 167 struct inode *ip; 168 struct fs *fs; 169 struct buf *bp; 170 ufs_lbn_t lbn; 171 int bsize, error; 172 173 ip = VTOI(vp); 174 fs = ITOFS(ip); 175 lbn = lblkno(fs, offset); 176 bsize = blksize(fs, ip, lbn); 177 178 *bpp = NULL; 179 error = bread(vp, lbn, bsize, NOCRED, &bp); 180 if (error) { 181 return (error); 182 } 183 if (res) 184 *res = (char *)bp->b_data + blkoff(fs, offset); 185 *bpp = bp; 186 return (0); 187 } 188 189 /* 190 * Load up the contents of an inode and copy the appropriate pieces 191 * to the incore copy. 192 */ 193 static int 194 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino) 195 { 196 struct ufs1_dinode *dip1; 197 struct ufs2_dinode *dip2; 198 int error; 199 200 if (I_IS_UFS1(ip)) { 201 dip1 = ip->i_din1; 202 *dip1 = 203 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 204 ip->i_mode = dip1->di_mode; 205 ip->i_nlink = dip1->di_nlink; 206 ip->i_effnlink = dip1->di_nlink; 207 ip->i_size = dip1->di_size; 208 ip->i_flags = dip1->di_flags; 209 ip->i_gen = dip1->di_gen; 210 ip->i_uid = dip1->di_uid; 211 ip->i_gid = dip1->di_gid; 212 return (0); 213 } 214 dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 215 if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 && 216 !ffs_fsfail_cleanup(ITOUMP(ip), error)) { 217 printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt, 218 (intmax_t)ino); 219 return (error); 220 } 221 *ip->i_din2 = *dip2; 222 dip2 = ip->i_din2; 223 ip->i_mode = dip2->di_mode; 224 ip->i_nlink = dip2->di_nlink; 225 ip->i_effnlink = dip2->di_nlink; 226 ip->i_size = dip2->di_size; 227 ip->i_flags = dip2->di_flags; 228 ip->i_gen = dip2->di_gen; 229 ip->i_uid = dip2->di_uid; 230 ip->i_gid = dip2->di_gid; 231 return (0); 232 } 233 234 /* 235 * Verify that a filesystem block number is a valid data block. 236 * This routine is only called on untrusted filesystems. 237 */ 238 static int 239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize) 240 { 241 struct fs *fs; 242 struct ufsmount *ump; 243 ufs2_daddr_t end_daddr; 244 int cg, havemtx; 245 246 KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0, 247 ("ffs_check_blkno called on a trusted file system")); 248 ump = VFSTOUFS(mp); 249 fs = ump->um_fs; 250 cg = dtog(fs, daddr); 251 end_daddr = daddr + numfrags(fs, blksize); 252 /* 253 * Verify that the block number is a valid data block. Also check 254 * that it does not point to an inode block or a superblock. Accept 255 * blocks that are unalloacted (0) or part of snapshot metadata 256 * (BLK_NOCOPY or BLK_SNAP). 257 * 258 * Thus, the block must be in a valid range for the filesystem and 259 * either in the space before a backup superblock (except the first 260 * cylinder group where that space is used by the bootstrap code) or 261 * after the inode blocks and before the end of the cylinder group. 262 */ 263 if ((uint64_t)daddr <= BLK_SNAP || 264 ((uint64_t)end_daddr <= fs->fs_size && 265 ((cg > 0 && end_daddr <= cgsblock(fs, cg)) || 266 (daddr >= cgdmin(fs, cg) && 267 end_daddr <= cgbase(fs, cg) + fs->fs_fpg)))) 268 return (0); 269 if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0) 270 UFS_LOCK(ump); 271 if (ppsratecheck(&ump->um_last_integritymsg, 272 &ump->um_secs_integritymsg, 1)) { 273 UFS_UNLOCK(ump); 274 uprintf("\n%s: inode %jd, out-of-range indirect block " 275 "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr); 276 if (havemtx) 277 UFS_LOCK(ump); 278 } else if (!havemtx) 279 UFS_UNLOCK(ump); 280 return (EINTEGRITY); 281 } 282 283 /* 284 * On first ENXIO error, initiate an asynchronous forcible unmount. 285 * Used to unmount filesystems whose underlying media has gone away. 286 * 287 * Return true if a cleanup is in progress. 288 */ 289 int 290 ffs_fsfail_cleanup(struct ufsmount *ump, int error) 291 { 292 int retval; 293 294 UFS_LOCK(ump); 295 retval = ffs_fsfail_cleanup_locked(ump, error); 296 UFS_UNLOCK(ump); 297 return (retval); 298 } 299 300 int 301 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error) 302 { 303 mtx_assert(UFS_MTX(ump), MA_OWNED); 304 if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) { 305 ump->um_flags |= UM_FSFAIL_CLEANUP; 306 /* 307 * Queue an async forced unmount. 308 */ 309 vfs_ref(ump->um_mountp); 310 dounmount(ump->um_mountp, 311 MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread); 312 printf("UFS: forcibly unmounting %s from %s\n", 313 ump->um_mountp->mnt_stat.f_mntfromname, 314 ump->um_mountp->mnt_stat.f_mntonname); 315 } 316 return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0); 317 } 318 319 /* 320 * Wrapper used during ENXIO cleanup to allocate empty buffers when 321 * the kernel is unable to read the real one. They are needed so that 322 * the soft updates code can use them to unwind its dependencies. 323 */ 324 int 325 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno, 326 daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt, 327 struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *), 328 struct buf **bpp) 329 { 330 int error; 331 332 flags |= GB_CVTENXIO; 333 error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt, 334 cred, flags, ckhashfunc, bpp); 335 if (error != 0 && ffs_fsfail_cleanup(ump, error)) { 336 error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp); 337 KASSERT(error == 0, ("getblkx failed")); 338 vfs_bio_bzero_buf(*bpp, 0, size); 339 } 340 return (error); 341 } 342 343 static int 344 ffs_mount(struct mount *mp) 345 { 346 struct vnode *devvp, *odevvp; 347 struct thread *td; 348 struct ufsmount *ump = NULL; 349 struct fs *fs; 350 int error, error1, flags; 351 uint64_t mntorflags, saved_mnt_flag; 352 accmode_t accmode; 353 struct nameidata ndp; 354 char *fspec; 355 bool mounted_softdep; 356 357 td = curthread; 358 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 359 return (EINVAL); 360 if (uma_inode == NULL) { 361 uma_inode = uma_zcreate("FFS inode", 362 sizeof(struct inode), NULL, NULL, NULL, NULL, 363 UMA_ALIGN_PTR, 0); 364 uma_ufs1 = uma_zcreate("FFS1 dinode", 365 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 366 UMA_ALIGN_PTR, 0); 367 uma_ufs2 = uma_zcreate("FFS2 dinode", 368 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 369 UMA_ALIGN_PTR, 0); 370 VFS_SMR_ZONE_SET(uma_inode); 371 } 372 373 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 374 vfs_deleteopt(mp->mnt_optnew, "userquota"); 375 376 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 377 if (error) 378 return (error); 379 380 mntorflags = 0; 381 if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0) 382 mntorflags |= MNT_UNTRUSTED; 383 384 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 385 mntorflags |= MNT_ACLS; 386 387 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 388 mntorflags |= MNT_SNAPSHOT; 389 /* 390 * Once we have set the MNT_SNAPSHOT flag, do not 391 * persist "snapshot" in the options list. 392 */ 393 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 394 vfs_deleteopt(mp->mnt_opt, "snapshot"); 395 } 396 397 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 398 if (mntorflags & MNT_ACLS) { 399 vfs_mount_error(mp, 400 "\"acls\" and \"nfsv4acls\" options " 401 "are mutually exclusive"); 402 return (EINVAL); 403 } 404 mntorflags |= MNT_NFS4ACLS; 405 } 406 407 MNT_ILOCK(mp); 408 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP; 409 mp->mnt_flag |= mntorflags; 410 MNT_IUNLOCK(mp); 411 /* 412 * If updating, check whether changing from read-only to 413 * read/write; if there is no device name, that's all we do. 414 */ 415 if (mp->mnt_flag & MNT_UPDATE) { 416 ump = VFSTOUFS(mp); 417 fs = ump->um_fs; 418 odevvp = ump->um_odevvp; 419 devvp = ump->um_devvp; 420 if (fs->fs_ronly == 0 && 421 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 422 /* 423 * Flush any dirty data and suspend filesystem. 424 */ 425 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 426 return (error); 427 error = vfs_write_suspend_umnt(mp); 428 if (error != 0) 429 return (error); 430 431 fs->fs_ronly = 1; 432 if (MOUNTEDSOFTDEP(mp)) { 433 MNT_ILOCK(mp); 434 mp->mnt_flag &= ~MNT_SOFTDEP; 435 MNT_IUNLOCK(mp); 436 mounted_softdep = true; 437 } else 438 mounted_softdep = false; 439 440 /* 441 * Check for and optionally get rid of files open 442 * for writing. 443 */ 444 flags = WRITECLOSE; 445 if (mp->mnt_flag & MNT_FORCE) 446 flags |= FORCECLOSE; 447 if (mounted_softdep) { 448 error = softdep_flushfiles(mp, flags, td); 449 } else { 450 error = ffs_flushfiles(mp, flags, td); 451 } 452 if (error) { 453 fs->fs_ronly = 0; 454 if (mounted_softdep) { 455 MNT_ILOCK(mp); 456 mp->mnt_flag |= MNT_SOFTDEP; 457 MNT_IUNLOCK(mp); 458 } 459 vfs_write_resume(mp, 0); 460 return (error); 461 } 462 463 if (fs->fs_pendingblocks != 0 || 464 fs->fs_pendinginodes != 0) { 465 printf("WARNING: %s Update error: blocks %jd " 466 "files %d\n", fs->fs_fsmnt, 467 (intmax_t)fs->fs_pendingblocks, 468 fs->fs_pendinginodes); 469 fs->fs_pendingblocks = 0; 470 fs->fs_pendinginodes = 0; 471 } 472 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 473 fs->fs_clean = 1; 474 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 475 fs->fs_ronly = 0; 476 fs->fs_clean = 0; 477 if (mounted_softdep) { 478 MNT_ILOCK(mp); 479 mp->mnt_flag |= MNT_SOFTDEP; 480 MNT_IUNLOCK(mp); 481 } 482 vfs_write_resume(mp, 0); 483 return (error); 484 } 485 if (mounted_softdep) 486 softdep_unmount(mp); 487 g_topology_lock(); 488 /* 489 * Drop our write and exclusive access. 490 */ 491 g_access(ump->um_cp, 0, -1, -1); 492 g_topology_unlock(); 493 MNT_ILOCK(mp); 494 mp->mnt_flag |= MNT_RDONLY; 495 MNT_IUNLOCK(mp); 496 /* 497 * Allow the writers to note that filesystem 498 * is ro now. 499 */ 500 vfs_write_resume(mp, 0); 501 } 502 if ((mp->mnt_flag & MNT_RELOAD) && 503 (error = ffs_reload(mp, 0)) != 0) 504 return (error); 505 if (fs->fs_ronly && 506 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 507 /* 508 * If upgrade to read-write by non-root, then verify 509 * that user has necessary permissions on the device. 510 */ 511 vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY); 512 error = VOP_ACCESS(odevvp, VREAD | VWRITE, 513 td->td_ucred, td); 514 if (error) 515 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 516 VOP_UNLOCK(odevvp); 517 if (error) { 518 return (error); 519 } 520 fs->fs_flags &= ~FS_UNCLEAN; 521 if (fs->fs_clean == 0) { 522 fs->fs_flags |= FS_UNCLEAN; 523 if ((mp->mnt_flag & MNT_FORCE) || 524 ((fs->fs_flags & 525 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 526 (fs->fs_flags & FS_DOSOFTDEP))) { 527 printf("WARNING: %s was not properly " 528 "dismounted\n", fs->fs_fsmnt); 529 } else { 530 vfs_mount_error(mp, 531 "R/W mount of %s denied. %s.%s", 532 fs->fs_fsmnt, 533 "Filesystem is not clean - run fsck", 534 (fs->fs_flags & FS_SUJ) == 0 ? "" : 535 " Forced mount will invalidate" 536 " journal contents"); 537 return (EPERM); 538 } 539 } 540 g_topology_lock(); 541 /* 542 * Request exclusive write access. 543 */ 544 error = g_access(ump->um_cp, 0, 1, 1); 545 g_topology_unlock(); 546 if (error) 547 return (error); 548 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 549 return (error); 550 error = vfs_write_suspend_umnt(mp); 551 if (error != 0) 552 return (error); 553 fs->fs_ronly = 0; 554 MNT_ILOCK(mp); 555 saved_mnt_flag = MNT_RDONLY; 556 if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag & 557 MNT_ASYNC) != 0) 558 saved_mnt_flag |= MNT_ASYNC; 559 mp->mnt_flag &= ~saved_mnt_flag; 560 MNT_IUNLOCK(mp); 561 fs->fs_mtime = time_second; 562 /* check to see if we need to start softdep */ 563 if ((fs->fs_flags & FS_DOSOFTDEP) && 564 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 565 fs->fs_ronly = 1; 566 MNT_ILOCK(mp); 567 mp->mnt_flag |= saved_mnt_flag; 568 MNT_IUNLOCK(mp); 569 vfs_write_resume(mp, 0); 570 return (error); 571 } 572 fs->fs_clean = 0; 573 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 574 fs->fs_ronly = 1; 575 if ((fs->fs_flags & FS_DOSOFTDEP) != 0) 576 softdep_unmount(mp); 577 MNT_ILOCK(mp); 578 mp->mnt_flag |= saved_mnt_flag; 579 MNT_IUNLOCK(mp); 580 vfs_write_resume(mp, 0); 581 return (error); 582 } 583 if (fs->fs_snapinum[0] != 0) 584 ffs_snapshot_mount(mp); 585 vfs_write_resume(mp, 0); 586 } 587 /* 588 * Soft updates is incompatible with "async", 589 * so if we are doing softupdates stop the user 590 * from setting the async flag in an update. 591 * Softdep_mount() clears it in an initial mount 592 * or ro->rw remount. 593 */ 594 if (MOUNTEDSOFTDEP(mp)) { 595 /* XXX: Reset too late ? */ 596 MNT_ILOCK(mp); 597 mp->mnt_flag &= ~MNT_ASYNC; 598 MNT_IUNLOCK(mp); 599 } 600 /* 601 * Keep MNT_ACLS flag if it is stored in superblock. 602 */ 603 if ((fs->fs_flags & FS_ACLS) != 0) { 604 /* XXX: Set too late ? */ 605 MNT_ILOCK(mp); 606 mp->mnt_flag |= MNT_ACLS; 607 MNT_IUNLOCK(mp); 608 } 609 610 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 611 /* XXX: Set too late ? */ 612 MNT_ILOCK(mp); 613 mp->mnt_flag |= MNT_NFS4ACLS; 614 MNT_IUNLOCK(mp); 615 } 616 617 /* 618 * If this is a snapshot request, take the snapshot. 619 */ 620 if (mp->mnt_flag & MNT_SNAPSHOT) 621 return (ffs_snapshot(mp, fspec)); 622 623 /* 624 * Must not call namei() while owning busy ref. 625 */ 626 vfs_unbusy(mp); 627 } 628 629 /* 630 * Not an update, or updating the name: look up the name 631 * and verify that it refers to a sensible disk device. 632 */ 633 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); 634 error = namei(&ndp); 635 if ((mp->mnt_flag & MNT_UPDATE) != 0) { 636 /* 637 * Unmount does not start if MNT_UPDATE is set. Mount 638 * update busies mp before setting MNT_UPDATE. We 639 * must be able to retain our busy ref succesfully, 640 * without sleep. 641 */ 642 error1 = vfs_busy(mp, MBF_NOWAIT); 643 MPASS(error1 == 0); 644 } 645 if (error != 0) 646 return (error); 647 NDFREE(&ndp, NDF_ONLY_PNBUF); 648 devvp = ndp.ni_vp; 649 if (!vn_isdisk_error(devvp, &error)) { 650 vput(devvp); 651 return (error); 652 } 653 654 /* 655 * If mount by non-root, then verify that user has necessary 656 * permissions on the device. 657 */ 658 accmode = VREAD; 659 if ((mp->mnt_flag & MNT_RDONLY) == 0) 660 accmode |= VWRITE; 661 error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); 662 if (error) 663 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 664 if (error) { 665 vput(devvp); 666 return (error); 667 } 668 669 if (mp->mnt_flag & MNT_UPDATE) { 670 /* 671 * Update only 672 * 673 * If it's not the same vnode, or at least the same device 674 * then it's not correct. 675 */ 676 677 if (devvp->v_rdev != ump->um_devvp->v_rdev) 678 error = EINVAL; /* needs translation */ 679 vput(devvp); 680 if (error) 681 return (error); 682 } else { 683 /* 684 * New mount 685 * 686 * We need the name for the mount point (also used for 687 * "last mounted on") copied in. If an error occurs, 688 * the mount point is discarded by the upper level code. 689 * Note that vfs_mount_alloc() populates f_mntonname for us. 690 */ 691 if ((error = ffs_mountfs(devvp, mp, td)) != 0) { 692 vrele(devvp); 693 return (error); 694 } 695 } 696 697 MNT_ILOCK(mp); 698 /* 699 * This is racy versus lookup, see ufs_fplookup_vexec for details. 700 */ 701 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0) 702 panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp); 703 if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0) 704 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 705 MNT_IUNLOCK(mp); 706 707 vfs_mountedfrom(mp, fspec); 708 return (0); 709 } 710 711 /* 712 * Compatibility with old mount system call. 713 */ 714 715 static int 716 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 717 { 718 struct ufs_args args; 719 int error; 720 721 if (data == NULL) 722 return (EINVAL); 723 error = copyin(data, &args, sizeof args); 724 if (error) 725 return (error); 726 727 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 728 ma = mount_arg(ma, "export", &args.export, sizeof(args.export)); 729 error = kernel_mount(ma, flags); 730 731 return (error); 732 } 733 734 /* 735 * Reload all incore data for a filesystem (used after running fsck on 736 * the root filesystem and finding things to fix). If the 'force' flag 737 * is 0, the filesystem must be mounted read-only. 738 * 739 * Things to do to update the mount: 740 * 1) invalidate all cached meta-data. 741 * 2) re-read superblock from disk. 742 * 3) re-read summary information from disk. 743 * 4) invalidate all inactive vnodes. 744 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary 745 * writers, if requested. 746 * 6) invalidate all cached file data. 747 * 7) re-read inode data for all active vnodes. 748 */ 749 int 750 ffs_reload(struct mount *mp, int flags) 751 { 752 struct vnode *vp, *mvp, *devvp; 753 struct inode *ip; 754 void *space; 755 struct buf *bp; 756 struct fs *fs, *newfs; 757 struct ufsmount *ump; 758 ufs2_daddr_t sblockloc; 759 int i, blks, error; 760 u_long size; 761 int32_t *lp; 762 763 ump = VFSTOUFS(mp); 764 765 MNT_ILOCK(mp); 766 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 767 MNT_IUNLOCK(mp); 768 return (EINVAL); 769 } 770 MNT_IUNLOCK(mp); 771 772 /* 773 * Step 1: invalidate all cached meta-data. 774 */ 775 devvp = VFSTOUFS(mp)->um_devvp; 776 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 777 if (vinvalbuf(devvp, 0, 0, 0) != 0) 778 panic("ffs_reload: dirty1"); 779 VOP_UNLOCK(devvp); 780 781 /* 782 * Step 2: re-read superblock from disk. 783 */ 784 fs = VFSTOUFS(mp)->um_fs; 785 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 786 NOCRED, &bp)) != 0) 787 return (error); 788 newfs = (struct fs *)bp->b_data; 789 if ((newfs->fs_magic != FS_UFS1_MAGIC && 790 newfs->fs_magic != FS_UFS2_MAGIC) || 791 newfs->fs_bsize > MAXBSIZE || 792 newfs->fs_bsize < sizeof(struct fs)) { 793 brelse(bp); 794 return (EIO); /* XXX needs translation */ 795 } 796 /* 797 * Preserve the summary information, read-only status, and 798 * superblock location by copying these fields into our new 799 * superblock before using it to update the existing superblock. 800 */ 801 newfs->fs_si = fs->fs_si; 802 newfs->fs_ronly = fs->fs_ronly; 803 sblockloc = fs->fs_sblockloc; 804 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 805 brelse(bp); 806 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 807 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 808 UFS_LOCK(ump); 809 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 810 printf("WARNING: %s: reload pending error: blocks %jd " 811 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 812 fs->fs_pendinginodes); 813 fs->fs_pendingblocks = 0; 814 fs->fs_pendinginodes = 0; 815 } 816 UFS_UNLOCK(ump); 817 818 /* 819 * Step 3: re-read summary information from disk. 820 */ 821 size = fs->fs_cssize; 822 blks = howmany(size, fs->fs_fsize); 823 if (fs->fs_contigsumsize > 0) 824 size += fs->fs_ncg * sizeof(int32_t); 825 size += fs->fs_ncg * sizeof(u_int8_t); 826 free(fs->fs_csp, M_UFSMNT); 827 space = malloc(size, M_UFSMNT, M_WAITOK); 828 fs->fs_csp = space; 829 for (i = 0; i < blks; i += fs->fs_frag) { 830 size = fs->fs_bsize; 831 if (i + fs->fs_frag > blks) 832 size = (blks - i) * fs->fs_fsize; 833 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 834 NOCRED, &bp); 835 if (error) 836 return (error); 837 bcopy(bp->b_data, space, (u_int)size); 838 space = (char *)space + size; 839 brelse(bp); 840 } 841 /* 842 * We no longer know anything about clusters per cylinder group. 843 */ 844 if (fs->fs_contigsumsize > 0) { 845 fs->fs_maxcluster = lp = space; 846 for (i = 0; i < fs->fs_ncg; i++) 847 *lp++ = fs->fs_contigsumsize; 848 space = lp; 849 } 850 size = fs->fs_ncg * sizeof(u_int8_t); 851 fs->fs_contigdirs = (u_int8_t *)space; 852 bzero(fs->fs_contigdirs, size); 853 if ((flags & FFSR_UNSUSPEND) != 0) { 854 MNT_ILOCK(mp); 855 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 856 wakeup(&mp->mnt_flag); 857 MNT_IUNLOCK(mp); 858 } 859 860 loop: 861 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 862 /* 863 * Skip syncer vnode. 864 */ 865 if (vp->v_type == VNON) { 866 VI_UNLOCK(vp); 867 continue; 868 } 869 /* 870 * Step 4: invalidate all cached file data. 871 */ 872 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 873 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 874 goto loop; 875 } 876 if (vinvalbuf(vp, 0, 0, 0)) 877 panic("ffs_reload: dirty2"); 878 /* 879 * Step 5: re-read inode data for all active vnodes. 880 */ 881 ip = VTOI(vp); 882 error = 883 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 884 (int)fs->fs_bsize, NOCRED, &bp); 885 if (error) { 886 vput(vp); 887 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 888 return (error); 889 } 890 if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) { 891 brelse(bp); 892 vput(vp); 893 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 894 return (error); 895 } 896 ip->i_effnlink = ip->i_nlink; 897 brelse(bp); 898 vput(vp); 899 } 900 return (0); 901 } 902 903 /* 904 * Common code for mount and mountroot 905 */ 906 static int 907 ffs_mountfs(odevvp, mp, td) 908 struct vnode *odevvp; 909 struct mount *mp; 910 struct thread *td; 911 { 912 struct ufsmount *ump; 913 struct fs *fs; 914 struct cdev *dev; 915 int error, i, len, ronly; 916 struct ucred *cred; 917 struct g_consumer *cp; 918 struct mount *nmp; 919 struct vnode *devvp; 920 int candelete, canspeedup; 921 off_t loc; 922 923 fs = NULL; 924 ump = NULL; 925 cred = td ? td->td_ucred : NOCRED; 926 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 927 928 devvp = mntfs_allocvp(mp, odevvp); 929 VOP_UNLOCK(odevvp); 930 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 931 dev = devvp->v_rdev; 932 KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data")); 933 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 934 (uintptr_t)mp) == 0) { 935 mntfs_freevp(devvp); 936 return (EBUSY); 937 } 938 g_topology_lock(); 939 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 940 g_topology_unlock(); 941 if (error != 0) { 942 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 943 mntfs_freevp(devvp); 944 return (error); 945 } 946 dev_ref(dev); 947 devvp->v_bufobj.bo_ops = &ffs_ops; 948 BO_LOCK(&odevvp->v_bufobj); 949 odevvp->v_bufobj.bo_flag |= BO_NOBUFS; 950 BO_UNLOCK(&odevvp->v_bufobj); 951 if (dev->si_iosize_max != 0) 952 mp->mnt_iosize_max = dev->si_iosize_max; 953 if (mp->mnt_iosize_max > maxphys) 954 mp->mnt_iosize_max = maxphys; 955 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 956 error = EINVAL; 957 vfs_mount_error(mp, 958 "Invalid sectorsize %d for superblock size %d", 959 cp->provider->sectorsize, SBLOCKSIZE); 960 goto out; 961 } 962 /* fetch the superblock and summary information */ 963 loc = STDSB; 964 if ((mp->mnt_flag & MNT_ROOTFS) != 0) 965 loc = STDSB_NOHASHFAIL; 966 if ((error = ffs_sbget(devvp, &fs, loc, M_UFSMNT, ffs_use_bread)) != 0) 967 goto out; 968 fs->fs_flags &= ~FS_UNCLEAN; 969 if (fs->fs_clean == 0) { 970 fs->fs_flags |= FS_UNCLEAN; 971 if (ronly || (mp->mnt_flag & MNT_FORCE) || 972 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 973 (fs->fs_flags & FS_DOSOFTDEP))) { 974 printf("WARNING: %s was not properly dismounted\n", 975 fs->fs_fsmnt); 976 } else { 977 vfs_mount_error(mp, "R/W mount of %s denied. %s%s", 978 fs->fs_fsmnt, "Filesystem is not clean - run fsck.", 979 (fs->fs_flags & FS_SUJ) == 0 ? "" : 980 " Forced mount will invalidate journal contents"); 981 error = EPERM; 982 goto out; 983 } 984 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 985 (mp->mnt_flag & MNT_FORCE)) { 986 printf("WARNING: %s: lost blocks %jd files %d\n", 987 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 988 fs->fs_pendinginodes); 989 fs->fs_pendingblocks = 0; 990 fs->fs_pendinginodes = 0; 991 } 992 } 993 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 994 printf("WARNING: %s: mount pending error: blocks %jd " 995 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 996 fs->fs_pendinginodes); 997 fs->fs_pendingblocks = 0; 998 fs->fs_pendinginodes = 0; 999 } 1000 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 1001 #ifdef UFS_GJOURNAL 1002 /* 1003 * Get journal provider name. 1004 */ 1005 len = 1024; 1006 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); 1007 if (g_io_getattr("GJOURNAL::provider", cp, &len, 1008 mp->mnt_gjprovider) == 0) { 1009 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 1010 M_UFSMNT, M_WAITOK); 1011 MNT_ILOCK(mp); 1012 mp->mnt_flag |= MNT_GJOURNAL; 1013 MNT_IUNLOCK(mp); 1014 } else { 1015 printf("WARNING: %s: GJOURNAL flag on fs " 1016 "but no gjournal provider below\n", 1017 mp->mnt_stat.f_mntonname); 1018 free(mp->mnt_gjprovider, M_UFSMNT); 1019 mp->mnt_gjprovider = NULL; 1020 } 1021 #else 1022 printf("WARNING: %s: GJOURNAL flag on fs but no " 1023 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 1024 #endif 1025 } else { 1026 mp->mnt_gjprovider = NULL; 1027 } 1028 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 1029 ump->um_cp = cp; 1030 ump->um_bo = &devvp->v_bufobj; 1031 ump->um_fs = fs; 1032 if (fs->fs_magic == FS_UFS1_MAGIC) { 1033 ump->um_fstype = UFS1; 1034 ump->um_balloc = ffs_balloc_ufs1; 1035 } else { 1036 ump->um_fstype = UFS2; 1037 ump->um_balloc = ffs_balloc_ufs2; 1038 } 1039 ump->um_blkatoff = ffs_blkatoff; 1040 ump->um_truncate = ffs_truncate; 1041 ump->um_update = ffs_update; 1042 ump->um_valloc = ffs_valloc; 1043 ump->um_vfree = ffs_vfree; 1044 ump->um_ifree = ffs_ifree; 1045 ump->um_rdonly = ffs_rdonly; 1046 ump->um_snapgone = ffs_snapgone; 1047 if ((mp->mnt_flag & MNT_UNTRUSTED) != 0) 1048 ump->um_check_blkno = ffs_check_blkno; 1049 else 1050 ump->um_check_blkno = NULL; 1051 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 1052 sx_init(&ump->um_checkpath_lock, "uchpth"); 1053 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc); 1054 fs->fs_ronly = ronly; 1055 fs->fs_active = NULL; 1056 mp->mnt_data = ump; 1057 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 1058 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 1059 nmp = NULL; 1060 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 1061 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 1062 if (nmp) 1063 vfs_rel(nmp); 1064 vfs_getnewfsid(mp); 1065 } 1066 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 1067 MNT_ILOCK(mp); 1068 mp->mnt_flag |= MNT_LOCAL; 1069 MNT_IUNLOCK(mp); 1070 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 1071 #ifdef MAC 1072 MNT_ILOCK(mp); 1073 mp->mnt_flag |= MNT_MULTILABEL; 1074 MNT_IUNLOCK(mp); 1075 #else 1076 printf("WARNING: %s: multilabel flag on fs but " 1077 "no MAC support\n", mp->mnt_stat.f_mntonname); 1078 #endif 1079 } 1080 if ((fs->fs_flags & FS_ACLS) != 0) { 1081 #ifdef UFS_ACL 1082 MNT_ILOCK(mp); 1083 1084 if (mp->mnt_flag & MNT_NFS4ACLS) 1085 printf("WARNING: %s: ACLs flag on fs conflicts with " 1086 "\"nfsv4acls\" mount option; option ignored\n", 1087 mp->mnt_stat.f_mntonname); 1088 mp->mnt_flag &= ~MNT_NFS4ACLS; 1089 mp->mnt_flag |= MNT_ACLS; 1090 1091 MNT_IUNLOCK(mp); 1092 #else 1093 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 1094 mp->mnt_stat.f_mntonname); 1095 #endif 1096 } 1097 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 1098 #ifdef UFS_ACL 1099 MNT_ILOCK(mp); 1100 1101 if (mp->mnt_flag & MNT_ACLS) 1102 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 1103 "with \"acls\" mount option; option ignored\n", 1104 mp->mnt_stat.f_mntonname); 1105 mp->mnt_flag &= ~MNT_ACLS; 1106 mp->mnt_flag |= MNT_NFS4ACLS; 1107 1108 MNT_IUNLOCK(mp); 1109 #else 1110 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 1111 "ACLs support\n", mp->mnt_stat.f_mntonname); 1112 #endif 1113 } 1114 if ((fs->fs_flags & FS_TRIM) != 0) { 1115 len = sizeof(int); 1116 if (g_io_getattr("GEOM::candelete", cp, &len, 1117 &candelete) == 0) { 1118 if (candelete) 1119 ump->um_flags |= UM_CANDELETE; 1120 else 1121 printf("WARNING: %s: TRIM flag on fs but disk " 1122 "does not support TRIM\n", 1123 mp->mnt_stat.f_mntonname); 1124 } else { 1125 printf("WARNING: %s: TRIM flag on fs but disk does " 1126 "not confirm that it supports TRIM\n", 1127 mp->mnt_stat.f_mntonname); 1128 } 1129 if (((ump->um_flags) & UM_CANDELETE) != 0) { 1130 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 1131 taskqueue_thread_enqueue, &ump->um_trim_tq); 1132 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 1133 "%s trim", mp->mnt_stat.f_mntonname); 1134 ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM, 1135 &ump->um_trimlisthashsize); 1136 } 1137 } 1138 1139 len = sizeof(int); 1140 if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) { 1141 if (canspeedup) 1142 ump->um_flags |= UM_CANSPEEDUP; 1143 } 1144 1145 ump->um_mountp = mp; 1146 ump->um_dev = dev; 1147 ump->um_devvp = devvp; 1148 ump->um_odevvp = odevvp; 1149 ump->um_nindir = fs->fs_nindir; 1150 ump->um_bptrtodb = fs->fs_fsbtodb; 1151 ump->um_seqinc = fs->fs_frag; 1152 for (i = 0; i < MAXQUOTAS; i++) 1153 ump->um_quotas[i] = NULLVP; 1154 #ifdef UFS_EXTATTR 1155 ufs_extattr_uepm_init(&ump->um_extattr); 1156 #endif 1157 /* 1158 * Set FS local "last mounted on" information (NULL pad) 1159 */ 1160 bzero(fs->fs_fsmnt, MAXMNTLEN); 1161 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1162 mp->mnt_stat.f_iosize = fs->fs_bsize; 1163 1164 if (mp->mnt_flag & MNT_ROOTFS) { 1165 /* 1166 * Root mount; update timestamp in mount structure. 1167 * this will be used by the common root mount code 1168 * to update the system clock. 1169 */ 1170 mp->mnt_time = fs->fs_time; 1171 } 1172 1173 if (ronly == 0) { 1174 fs->fs_mtime = time_second; 1175 if ((fs->fs_flags & FS_DOSOFTDEP) && 1176 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1177 ffs_flushfiles(mp, FORCECLOSE, td); 1178 goto out; 1179 } 1180 if (fs->fs_snapinum[0] != 0) 1181 ffs_snapshot_mount(mp); 1182 fs->fs_fmod = 1; 1183 fs->fs_clean = 0; 1184 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1185 } 1186 /* 1187 * Initialize filesystem state information in mount struct. 1188 */ 1189 MNT_ILOCK(mp); 1190 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1191 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1192 MNT_IUNLOCK(mp); 1193 #ifdef UFS_EXTATTR 1194 #ifdef UFS_EXTATTR_AUTOSTART 1195 /* 1196 * 1197 * Auto-starting does the following: 1198 * - check for /.attribute in the fs, and extattr_start if so 1199 * - for each file in .attribute, enable that file with 1200 * an attribute of the same name. 1201 * Not clear how to report errors -- probably eat them. 1202 * This would all happen while the filesystem was busy/not 1203 * available, so would effectively be "atomic". 1204 */ 1205 (void) ufs_extattr_autostart(mp, td); 1206 #endif /* !UFS_EXTATTR_AUTOSTART */ 1207 #endif /* !UFS_EXTATTR */ 1208 return (0); 1209 out: 1210 if (fs != NULL) { 1211 free(fs->fs_csp, M_UFSMNT); 1212 free(fs->fs_si, M_UFSMNT); 1213 free(fs, M_UFSMNT); 1214 } 1215 if (cp != NULL) { 1216 g_topology_lock(); 1217 g_vfs_close(cp); 1218 g_topology_unlock(); 1219 } 1220 if (ump != NULL) { 1221 mtx_destroy(UFS_MTX(ump)); 1222 sx_destroy(&ump->um_checkpath_lock); 1223 if (mp->mnt_gjprovider != NULL) { 1224 free(mp->mnt_gjprovider, M_UFSMNT); 1225 mp->mnt_gjprovider = NULL; 1226 } 1227 MPASS(ump->um_softdep == NULL); 1228 free(ump, M_UFSMNT); 1229 mp->mnt_data = NULL; 1230 } 1231 BO_LOCK(&odevvp->v_bufobj); 1232 odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1233 BO_UNLOCK(&odevvp->v_bufobj); 1234 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1235 mntfs_freevp(devvp); 1236 dev_rel(dev); 1237 return (error); 1238 } 1239 1240 /* 1241 * A read function for use by filesystem-layer routines. 1242 */ 1243 static int 1244 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) 1245 { 1246 struct buf *bp; 1247 int error; 1248 1249 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); 1250 *bufp = malloc(size, M_UFSMNT, M_WAITOK); 1251 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, 1252 &bp)) != 0) 1253 return (error); 1254 bcopy(bp->b_data, *bufp, size); 1255 bp->b_flags |= B_INVAL | B_NOCACHE; 1256 brelse(bp); 1257 return (0); 1258 } 1259 1260 static int bigcgs = 0; 1261 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1262 1263 /* 1264 * Sanity checks for loading old filesystem superblocks. 1265 * See ffs_oldfscompat_write below for unwound actions. 1266 * 1267 * XXX - Parts get retired eventually. 1268 * Unfortunately new bits get added. 1269 */ 1270 static void 1271 ffs_oldfscompat_read(fs, ump, sblockloc) 1272 struct fs *fs; 1273 struct ufsmount *ump; 1274 ufs2_daddr_t sblockloc; 1275 { 1276 off_t maxfilesize; 1277 1278 /* 1279 * If not yet done, update fs_flags location and value of fs_sblockloc. 1280 */ 1281 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1282 fs->fs_flags = fs->fs_old_flags; 1283 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1284 fs->fs_sblockloc = sblockloc; 1285 } 1286 /* 1287 * If not yet done, update UFS1 superblock with new wider fields. 1288 */ 1289 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1290 fs->fs_maxbsize = fs->fs_bsize; 1291 fs->fs_time = fs->fs_old_time; 1292 fs->fs_size = fs->fs_old_size; 1293 fs->fs_dsize = fs->fs_old_dsize; 1294 fs->fs_csaddr = fs->fs_old_csaddr; 1295 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1296 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1297 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1298 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1299 } 1300 if (fs->fs_magic == FS_UFS1_MAGIC && 1301 fs->fs_old_inodefmt < FS_44INODEFMT) { 1302 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1303 fs->fs_qbmask = ~fs->fs_bmask; 1304 fs->fs_qfmask = ~fs->fs_fmask; 1305 } 1306 if (fs->fs_magic == FS_UFS1_MAGIC) { 1307 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1308 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1309 if (fs->fs_maxfilesize > maxfilesize) 1310 fs->fs_maxfilesize = maxfilesize; 1311 } 1312 /* Compatibility for old filesystems */ 1313 if (fs->fs_avgfilesize <= 0) 1314 fs->fs_avgfilesize = AVFILESIZ; 1315 if (fs->fs_avgfpdir <= 0) 1316 fs->fs_avgfpdir = AFPDIR; 1317 if (bigcgs) { 1318 fs->fs_save_cgsize = fs->fs_cgsize; 1319 fs->fs_cgsize = fs->fs_bsize; 1320 } 1321 } 1322 1323 /* 1324 * Unwinding superblock updates for old filesystems. 1325 * See ffs_oldfscompat_read above for details. 1326 * 1327 * XXX - Parts get retired eventually. 1328 * Unfortunately new bits get added. 1329 */ 1330 void 1331 ffs_oldfscompat_write(fs, ump) 1332 struct fs *fs; 1333 struct ufsmount *ump; 1334 { 1335 1336 /* 1337 * Copy back UFS2 updated fields that UFS1 inspects. 1338 */ 1339 if (fs->fs_magic == FS_UFS1_MAGIC) { 1340 fs->fs_old_time = fs->fs_time; 1341 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1342 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1343 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1344 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1345 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1346 } 1347 if (bigcgs) { 1348 fs->fs_cgsize = fs->fs_save_cgsize; 1349 fs->fs_save_cgsize = 0; 1350 } 1351 } 1352 1353 /* 1354 * unmount system call 1355 */ 1356 static int 1357 ffs_unmount(mp, mntflags) 1358 struct mount *mp; 1359 int mntflags; 1360 { 1361 struct thread *td; 1362 struct ufsmount *ump = VFSTOUFS(mp); 1363 struct fs *fs; 1364 int error, flags, susp; 1365 #ifdef UFS_EXTATTR 1366 int e_restart; 1367 #endif 1368 1369 flags = 0; 1370 td = curthread; 1371 fs = ump->um_fs; 1372 if (mntflags & MNT_FORCE) 1373 flags |= FORCECLOSE; 1374 susp = fs->fs_ronly == 0; 1375 #ifdef UFS_EXTATTR 1376 if ((error = ufs_extattr_stop(mp, td))) { 1377 if (error != EOPNOTSUPP) 1378 printf("WARNING: unmount %s: ufs_extattr_stop " 1379 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1380 error); 1381 e_restart = 0; 1382 } else { 1383 ufs_extattr_uepm_destroy(&ump->um_extattr); 1384 e_restart = 1; 1385 } 1386 #endif 1387 if (susp) { 1388 error = vfs_write_suspend_umnt(mp); 1389 if (error != 0) 1390 goto fail1; 1391 } 1392 if (MOUNTEDSOFTDEP(mp)) 1393 error = softdep_flushfiles(mp, flags, td); 1394 else 1395 error = ffs_flushfiles(mp, flags, td); 1396 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) 1397 goto fail; 1398 1399 UFS_LOCK(ump); 1400 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1401 printf("WARNING: unmount %s: pending error: blocks %jd " 1402 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1403 fs->fs_pendinginodes); 1404 fs->fs_pendingblocks = 0; 1405 fs->fs_pendinginodes = 0; 1406 } 1407 UFS_UNLOCK(ump); 1408 if (MOUNTEDSOFTDEP(mp)) 1409 softdep_unmount(mp); 1410 MPASS(ump->um_softdep == NULL); 1411 if (fs->fs_ronly == 0) { 1412 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1413 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1414 if (ffs_fsfail_cleanup(ump, error)) 1415 error = 0; 1416 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) { 1417 fs->fs_clean = 0; 1418 goto fail; 1419 } 1420 } 1421 if (susp) 1422 vfs_write_resume(mp, VR_START_WRITE); 1423 if (ump->um_trim_tq != NULL) { 1424 while (ump->um_trim_inflight != 0) 1425 pause("ufsutr", hz); 1426 taskqueue_drain_all(ump->um_trim_tq); 1427 taskqueue_free(ump->um_trim_tq); 1428 free (ump->um_trimhash, M_TRIM); 1429 } 1430 g_topology_lock(); 1431 g_vfs_close(ump->um_cp); 1432 g_topology_unlock(); 1433 BO_LOCK(&ump->um_odevvp->v_bufobj); 1434 ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1435 BO_UNLOCK(&ump->um_odevvp->v_bufobj); 1436 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1437 mntfs_freevp(ump->um_devvp); 1438 vrele(ump->um_odevvp); 1439 dev_rel(ump->um_dev); 1440 mtx_destroy(UFS_MTX(ump)); 1441 sx_destroy(&ump->um_checkpath_lock); 1442 if (mp->mnt_gjprovider != NULL) { 1443 free(mp->mnt_gjprovider, M_UFSMNT); 1444 mp->mnt_gjprovider = NULL; 1445 } 1446 free(fs->fs_csp, M_UFSMNT); 1447 free(fs->fs_si, M_UFSMNT); 1448 free(fs, M_UFSMNT); 1449 free(ump, M_UFSMNT); 1450 mp->mnt_data = NULL; 1451 MNT_ILOCK(mp); 1452 mp->mnt_flag &= ~MNT_LOCAL; 1453 MNT_IUNLOCK(mp); 1454 if (td->td_su == mp) { 1455 td->td_su = NULL; 1456 vfs_rel(mp); 1457 } 1458 return (error); 1459 1460 fail: 1461 if (susp) 1462 vfs_write_resume(mp, VR_START_WRITE); 1463 fail1: 1464 #ifdef UFS_EXTATTR 1465 if (e_restart) { 1466 ufs_extattr_uepm_init(&ump->um_extattr); 1467 #ifdef UFS_EXTATTR_AUTOSTART 1468 (void) ufs_extattr_autostart(mp, td); 1469 #endif 1470 } 1471 #endif 1472 1473 return (error); 1474 } 1475 1476 /* 1477 * Flush out all the files in a filesystem. 1478 */ 1479 int 1480 ffs_flushfiles(mp, flags, td) 1481 struct mount *mp; 1482 int flags; 1483 struct thread *td; 1484 { 1485 struct ufsmount *ump; 1486 int qerror, error; 1487 1488 ump = VFSTOUFS(mp); 1489 qerror = 0; 1490 #ifdef QUOTA 1491 if (mp->mnt_flag & MNT_QUOTA) { 1492 int i; 1493 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1494 if (error) 1495 return (error); 1496 for (i = 0; i < MAXQUOTAS; i++) { 1497 error = quotaoff(td, mp, i); 1498 if (error != 0) { 1499 if ((flags & EARLYFLUSH) == 0) 1500 return (error); 1501 else 1502 qerror = error; 1503 } 1504 } 1505 1506 /* 1507 * Here we fall through to vflush again to ensure that 1508 * we have gotten rid of all the system vnodes, unless 1509 * quotas must not be closed. 1510 */ 1511 } 1512 #endif 1513 ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles"); 1514 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1515 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1516 return (error); 1517 ffs_snapshot_unmount(mp); 1518 flags |= FORCECLOSE; 1519 /* 1520 * Here we fall through to vflush again to ensure 1521 * that we have gotten rid of all the system vnodes. 1522 */ 1523 } 1524 1525 /* 1526 * Do not close system files if quotas were not closed, to be 1527 * able to sync the remaining dquots. The freeblks softupdate 1528 * workitems might hold a reference on a dquot, preventing 1529 * quotaoff() from completing. Next round of 1530 * softdep_flushworklist() iteration should process the 1531 * blockers, allowing the next run of quotaoff() to finally 1532 * flush held dquots. 1533 * 1534 * Otherwise, flush all the files. 1535 */ 1536 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1537 return (error); 1538 1539 /* 1540 * Flush filesystem metadata. 1541 */ 1542 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1543 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1544 VOP_UNLOCK(ump->um_devvp); 1545 return (error); 1546 } 1547 1548 /* 1549 * Get filesystem statistics. 1550 */ 1551 static int 1552 ffs_statfs(mp, sbp) 1553 struct mount *mp; 1554 struct statfs *sbp; 1555 { 1556 struct ufsmount *ump; 1557 struct fs *fs; 1558 1559 ump = VFSTOUFS(mp); 1560 fs = ump->um_fs; 1561 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1562 panic("ffs_statfs"); 1563 sbp->f_version = STATFS_VERSION; 1564 sbp->f_bsize = fs->fs_fsize; 1565 sbp->f_iosize = fs->fs_bsize; 1566 sbp->f_blocks = fs->fs_dsize; 1567 UFS_LOCK(ump); 1568 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1569 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1570 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1571 dbtofsb(fs, fs->fs_pendingblocks); 1572 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; 1573 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1574 UFS_UNLOCK(ump); 1575 sbp->f_namemax = UFS_MAXNAMLEN; 1576 return (0); 1577 } 1578 1579 static bool 1580 sync_doupdate(struct inode *ip) 1581 { 1582 1583 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1584 IN_UPDATE)) != 0); 1585 } 1586 1587 static int 1588 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused) 1589 { 1590 struct inode *ip; 1591 1592 /* 1593 * Flags are safe to access because ->v_data invalidation 1594 * is held off by listmtx. 1595 */ 1596 if (vp->v_type == VNON) 1597 return (false); 1598 ip = VTOI(vp); 1599 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) 1600 return (false); 1601 return (true); 1602 } 1603 1604 /* 1605 * For a lazy sync, we only care about access times, quotas and the 1606 * superblock. Other filesystem changes are already converted to 1607 * cylinder group blocks or inode blocks updates and are written to 1608 * disk by syncer. 1609 */ 1610 static int 1611 ffs_sync_lazy(mp) 1612 struct mount *mp; 1613 { 1614 struct vnode *mvp, *vp; 1615 struct inode *ip; 1616 struct thread *td; 1617 int allerror, error; 1618 1619 allerror = 0; 1620 td = curthread; 1621 if ((mp->mnt_flag & MNT_NOATIME) != 0) { 1622 #ifdef QUOTA 1623 qsync(mp); 1624 #endif 1625 goto sbupdate; 1626 } 1627 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) { 1628 if (vp->v_type == VNON) { 1629 VI_UNLOCK(vp); 1630 continue; 1631 } 1632 ip = VTOI(vp); 1633 1634 /* 1635 * The IN_ACCESS flag is converted to IN_MODIFIED by 1636 * ufs_close() and ufs_getattr() by the calls to 1637 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1638 * Test also all the other timestamp flags too, to pick up 1639 * any other cases that could be missed. 1640 */ 1641 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1642 VI_UNLOCK(vp); 1643 continue; 1644 } 1645 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0) 1646 continue; 1647 #ifdef QUOTA 1648 qsyncvp(vp); 1649 #endif 1650 if (sync_doupdate(ip)) 1651 error = ffs_update(vp, 0); 1652 if (error != 0) 1653 allerror = error; 1654 vput(vp); 1655 } 1656 sbupdate: 1657 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1658 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1659 allerror = error; 1660 return (allerror); 1661 } 1662 1663 /* 1664 * Go through the disk queues to initiate sandbagged IO; 1665 * go through the inodes to write those that have been modified; 1666 * initiate the writing of the super block if it has been modified. 1667 * 1668 * Note: we are always called with the filesystem marked busy using 1669 * vfs_busy(). 1670 */ 1671 static int 1672 ffs_sync(mp, waitfor) 1673 struct mount *mp; 1674 int waitfor; 1675 { 1676 struct vnode *mvp, *vp, *devvp; 1677 struct thread *td; 1678 struct inode *ip; 1679 struct ufsmount *ump = VFSTOUFS(mp); 1680 struct fs *fs; 1681 int error, count, lockreq, allerror = 0; 1682 int suspend; 1683 int suspended; 1684 int secondary_writes; 1685 int secondary_accwrites; 1686 int softdep_deps; 1687 int softdep_accdeps; 1688 struct bufobj *bo; 1689 1690 suspend = 0; 1691 suspended = 0; 1692 td = curthread; 1693 fs = ump->um_fs; 1694 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) 1695 panic("%s: ffs_sync: modification on read-only filesystem", 1696 fs->fs_fsmnt); 1697 if (waitfor == MNT_LAZY) { 1698 if (!rebooting) 1699 return (ffs_sync_lazy(mp)); 1700 waitfor = MNT_NOWAIT; 1701 } 1702 1703 /* 1704 * Write back each (modified) inode. 1705 */ 1706 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1707 if (waitfor == MNT_SUSPEND) { 1708 suspend = 1; 1709 waitfor = MNT_WAIT; 1710 } 1711 if (waitfor == MNT_WAIT) 1712 lockreq = LK_EXCLUSIVE; 1713 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1714 loop: 1715 /* Grab snapshot of secondary write counts */ 1716 MNT_ILOCK(mp); 1717 secondary_writes = mp->mnt_secondary_writes; 1718 secondary_accwrites = mp->mnt_secondary_accwrites; 1719 MNT_IUNLOCK(mp); 1720 1721 /* Grab snapshot of softdep dependency counts */ 1722 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1723 1724 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1725 /* 1726 * Depend on the vnode interlock to keep things stable enough 1727 * for a quick test. Since there might be hundreds of 1728 * thousands of vnodes, we cannot afford even a subroutine 1729 * call unless there's a good chance that we have work to do. 1730 */ 1731 if (vp->v_type == VNON) { 1732 VI_UNLOCK(vp); 1733 continue; 1734 } 1735 ip = VTOI(vp); 1736 if ((ip->i_flag & 1737 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1738 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1739 VI_UNLOCK(vp); 1740 continue; 1741 } 1742 if ((error = vget(vp, lockreq)) != 0) { 1743 if (error == ENOENT || error == ENOLCK) { 1744 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1745 goto loop; 1746 } 1747 continue; 1748 } 1749 #ifdef QUOTA 1750 qsyncvp(vp); 1751 #endif 1752 for (;;) { 1753 error = ffs_syncvnode(vp, waitfor, 0); 1754 if (error == ERELOOKUP) 1755 continue; 1756 if (error != 0) 1757 allerror = error; 1758 break; 1759 } 1760 vput(vp); 1761 } 1762 /* 1763 * Force stale filesystem control information to be flushed. 1764 */ 1765 if (waitfor == MNT_WAIT || rebooting) { 1766 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1767 allerror = error; 1768 if (ffs_fsfail_cleanup(ump, allerror)) 1769 allerror = 0; 1770 /* Flushed work items may create new vnodes to clean */ 1771 if (allerror == 0 && count) 1772 goto loop; 1773 } 1774 1775 devvp = ump->um_devvp; 1776 bo = &devvp->v_bufobj; 1777 BO_LOCK(bo); 1778 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1779 BO_UNLOCK(bo); 1780 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1781 error = VOP_FSYNC(devvp, waitfor, td); 1782 VOP_UNLOCK(devvp); 1783 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1784 error = ffs_sbupdate(ump, waitfor, 0); 1785 if (error != 0) 1786 allerror = error; 1787 if (ffs_fsfail_cleanup(ump, allerror)) 1788 allerror = 0; 1789 if (allerror == 0 && waitfor == MNT_WAIT) 1790 goto loop; 1791 } else if (suspend != 0) { 1792 if (softdep_check_suspend(mp, 1793 devvp, 1794 softdep_deps, 1795 softdep_accdeps, 1796 secondary_writes, 1797 secondary_accwrites) != 0) { 1798 MNT_IUNLOCK(mp); 1799 goto loop; /* More work needed */ 1800 } 1801 mtx_assert(MNT_MTX(mp), MA_OWNED); 1802 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1803 MNT_IUNLOCK(mp); 1804 suspended = 1; 1805 } else 1806 BO_UNLOCK(bo); 1807 /* 1808 * Write back modified superblock. 1809 */ 1810 if (fs->fs_fmod != 0 && 1811 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1812 allerror = error; 1813 if (ffs_fsfail_cleanup(ump, allerror)) 1814 allerror = 0; 1815 return (allerror); 1816 } 1817 1818 int 1819 ffs_vget(mp, ino, flags, vpp) 1820 struct mount *mp; 1821 ino_t ino; 1822 int flags; 1823 struct vnode **vpp; 1824 { 1825 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1826 } 1827 1828 int 1829 ffs_vgetf(mp, ino, flags, vpp, ffs_flags) 1830 struct mount *mp; 1831 ino_t ino; 1832 int flags; 1833 struct vnode **vpp; 1834 int ffs_flags; 1835 { 1836 struct fs *fs; 1837 struct inode *ip; 1838 struct ufsmount *ump; 1839 struct buf *bp; 1840 struct vnode *vp; 1841 daddr_t dbn; 1842 int error; 1843 1844 MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 || 1845 (flags & LK_EXCLUSIVE) != 0); 1846 1847 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1848 if (error != 0) 1849 return (error); 1850 if (*vpp != NULL) { 1851 if ((ffs_flags & FFSV_REPLACE) == 0 || 1852 ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 || 1853 !VN_IS_DOOMED(*vpp))) 1854 return (0); 1855 vgone(*vpp); 1856 vput(*vpp); 1857 } 1858 1859 /* 1860 * We must promote to an exclusive lock for vnode creation. This 1861 * can happen if lookup is passed LOCKSHARED. 1862 */ 1863 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1864 flags &= ~LK_TYPE_MASK; 1865 flags |= LK_EXCLUSIVE; 1866 } 1867 1868 /* 1869 * We do not lock vnode creation as it is believed to be too 1870 * expensive for such rare case as simultaneous creation of vnode 1871 * for same ino by different processes. We just allow them to race 1872 * and check later to decide who wins. Let the race begin! 1873 */ 1874 1875 ump = VFSTOUFS(mp); 1876 fs = ump->um_fs; 1877 ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO); 1878 1879 /* Allocate a new vnode/inode. */ 1880 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 1881 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1882 if (error) { 1883 *vpp = NULL; 1884 uma_zfree_smr(uma_inode, ip); 1885 return (error); 1886 } 1887 /* 1888 * FFS supports recursive locking. 1889 */ 1890 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 1891 VN_LOCK_AREC(vp); 1892 vp->v_data = ip; 1893 vp->v_bufobj.bo_bsize = fs->fs_bsize; 1894 ip->i_vnode = vp; 1895 ip->i_ump = ump; 1896 ip->i_number = ino; 1897 ip->i_ea_refs = 0; 1898 ip->i_nextclustercg = -1; 1899 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; 1900 ip->i_mode = 0; /* ensure error cases below throw away vnode */ 1901 cluster_init_vn(&ip->i_clusterw); 1902 #ifdef DIAGNOSTIC 1903 ufs_init_trackers(ip); 1904 #endif 1905 #ifdef QUOTA 1906 { 1907 int i; 1908 for (i = 0; i < MAXQUOTAS; i++) 1909 ip->i_dquot[i] = NODQUOT; 1910 } 1911 #endif 1912 1913 if (ffs_flags & FFSV_FORCEINSMQ) 1914 vp->v_vflag |= VV_FORCEINSMQ; 1915 error = insmntque(vp, mp); 1916 if (error != 0) { 1917 uma_zfree_smr(uma_inode, ip); 1918 *vpp = NULL; 1919 return (error); 1920 } 1921 vp->v_vflag &= ~VV_FORCEINSMQ; 1922 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 1923 if (error != 0) 1924 return (error); 1925 if (*vpp != NULL) { 1926 /* 1927 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set) 1928 * operate on empty inode, which must not be found by 1929 * other threads until fully filled. Vnode for empty 1930 * inode must be not re-inserted on the hash by other 1931 * thread, after removal by us at the beginning. 1932 */ 1933 MPASS((ffs_flags & FFSV_REPLACE) == 0); 1934 return (0); 1935 } 1936 1937 /* Read in the disk contents for the inode, copy into the inode. */ 1938 dbn = fsbtodb(fs, ino_to_fsba(fs, ino)); 1939 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize, 1940 NULL, NULL, 0, NOCRED, 0, NULL, &bp); 1941 if (error != 0) { 1942 /* 1943 * The inode does not contain anything useful, so it would 1944 * be misleading to leave it on its hash chain. With mode 1945 * still zero, it will be unlinked and returned to the free 1946 * list by vput(). 1947 */ 1948 vgone(vp); 1949 vput(vp); 1950 *vpp = NULL; 1951 return (error); 1952 } 1953 if (I_IS_UFS1(ip)) 1954 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 1955 else 1956 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1957 if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) { 1958 bqrelse(bp); 1959 vgone(vp); 1960 vput(vp); 1961 *vpp = NULL; 1962 return (error); 1963 } 1964 if (DOINGSOFTDEP(vp) && (!fs->fs_ronly || 1965 (ffs_flags & FFSV_FORCEINODEDEP) != 0)) 1966 softdep_load_inodeblock(ip); 1967 else 1968 ip->i_effnlink = ip->i_nlink; 1969 bqrelse(bp); 1970 1971 /* 1972 * Initialize the vnode from the inode, check for aliases. 1973 * Note that the underlying vnode may have changed. 1974 */ 1975 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, 1976 &vp); 1977 if (error) { 1978 vgone(vp); 1979 vput(vp); 1980 *vpp = NULL; 1981 return (error); 1982 } 1983 1984 /* 1985 * Finish inode initialization. 1986 */ 1987 if (vp->v_type != VFIFO) { 1988 /* FFS supports shared locking for all files except fifos. */ 1989 VN_LOCK_ASHARE(vp); 1990 } 1991 1992 /* 1993 * Set up a generation number for this inode if it does not 1994 * already have one. This should only happen on old filesystems. 1995 */ 1996 if (ip->i_gen == 0) { 1997 while (ip->i_gen == 0) 1998 ip->i_gen = arc4random(); 1999 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 2000 UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 2001 DIP_SET(ip, i_gen, ip->i_gen); 2002 } 2003 } 2004 #ifdef MAC 2005 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 2006 /* 2007 * If this vnode is already allocated, and we're running 2008 * multi-label, attempt to perform a label association 2009 * from the extended attributes on the inode. 2010 */ 2011 error = mac_vnode_associate_extattr(mp, vp); 2012 if (error) { 2013 /* ufs_inactive will release ip->i_devvp ref. */ 2014 vgone(vp); 2015 vput(vp); 2016 *vpp = NULL; 2017 return (error); 2018 } 2019 } 2020 #endif 2021 2022 *vpp = vp; 2023 return (0); 2024 } 2025 2026 /* 2027 * File handle to vnode 2028 * 2029 * Have to be really careful about stale file handles: 2030 * - check that the inode number is valid 2031 * - for UFS2 check that the inode number is initialized 2032 * - call ffs_vget() to get the locked inode 2033 * - check for an unallocated inode (i_mode == 0) 2034 * - check that the given client host has export rights and return 2035 * those rights via. exflagsp and credanonp 2036 */ 2037 static int 2038 ffs_fhtovp(mp, fhp, flags, vpp) 2039 struct mount *mp; 2040 struct fid *fhp; 2041 int flags; 2042 struct vnode **vpp; 2043 { 2044 struct ufid *ufhp; 2045 2046 ufhp = (struct ufid *)fhp; 2047 return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags, 2048 vpp, 0)); 2049 } 2050 2051 int 2052 ffs_inotovp(mp, ino, gen, lflags, vpp, ffs_flags) 2053 struct mount *mp; 2054 ino_t ino; 2055 u_int64_t gen; 2056 int lflags; 2057 struct vnode **vpp; 2058 int ffs_flags; 2059 { 2060 struct ufsmount *ump; 2061 struct vnode *nvp; 2062 struct inode *ip; 2063 struct fs *fs; 2064 struct cg *cgp; 2065 struct buf *bp; 2066 u_int cg; 2067 int error; 2068 2069 ump = VFSTOUFS(mp); 2070 fs = ump->um_fs; 2071 *vpp = NULL; 2072 2073 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) 2074 return (ESTALE); 2075 2076 /* 2077 * Need to check if inode is initialized because UFS2 does lazy 2078 * initialization and nfs_fhtovp can offer arbitrary inode numbers. 2079 */ 2080 if (fs->fs_magic == FS_UFS2_MAGIC) { 2081 cg = ino_to_cg(fs, ino); 2082 error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp); 2083 if (error != 0) 2084 return (error); 2085 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { 2086 brelse(bp); 2087 return (ESTALE); 2088 } 2089 brelse(bp); 2090 } 2091 2092 error = ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags); 2093 if (error != 0) 2094 return (error); 2095 2096 ip = VTOI(nvp); 2097 if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) { 2098 if (ip->i_mode == 0) 2099 vgone(nvp); 2100 vput(nvp); 2101 return (ESTALE); 2102 } 2103 2104 vnode_create_vobject(nvp, DIP(ip, i_size), curthread); 2105 *vpp = nvp; 2106 return (0); 2107 } 2108 2109 /* 2110 * Initialize the filesystem. 2111 */ 2112 static int 2113 ffs_init(vfsp) 2114 struct vfsconf *vfsp; 2115 { 2116 2117 ffs_susp_initialize(); 2118 softdep_initialize(); 2119 return (ufs_init(vfsp)); 2120 } 2121 2122 /* 2123 * Undo the work of ffs_init(). 2124 */ 2125 static int 2126 ffs_uninit(vfsp) 2127 struct vfsconf *vfsp; 2128 { 2129 int ret; 2130 2131 ret = ufs_uninit(vfsp); 2132 softdep_uninitialize(); 2133 ffs_susp_uninitialize(); 2134 taskqueue_drain_all(taskqueue_thread); 2135 return (ret); 2136 } 2137 2138 /* 2139 * Structure used to pass information from ffs_sbupdate to its 2140 * helper routine ffs_use_bwrite. 2141 */ 2142 struct devfd { 2143 struct ufsmount *ump; 2144 struct buf *sbbp; 2145 int waitfor; 2146 int suspended; 2147 int error; 2148 }; 2149 2150 /* 2151 * Write a superblock and associated information back to disk. 2152 */ 2153 int 2154 ffs_sbupdate(ump, waitfor, suspended) 2155 struct ufsmount *ump; 2156 int waitfor; 2157 int suspended; 2158 { 2159 struct fs *fs; 2160 struct buf *sbbp; 2161 struct devfd devfd; 2162 2163 fs = ump->um_fs; 2164 if (fs->fs_ronly == 1 && 2165 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 2166 (MNT_RDONLY | MNT_UPDATE)) 2167 panic("ffs_sbupdate: write read-only filesystem"); 2168 /* 2169 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 2170 */ 2171 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 2172 (int)fs->fs_sbsize, 0, 0, 0); 2173 /* 2174 * Initialize info needed for write function. 2175 */ 2176 devfd.ump = ump; 2177 devfd.sbbp = sbbp; 2178 devfd.waitfor = waitfor; 2179 devfd.suspended = suspended; 2180 devfd.error = 0; 2181 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); 2182 } 2183 2184 /* 2185 * Write function for use by filesystem-layer routines. 2186 */ 2187 static int 2188 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) 2189 { 2190 struct devfd *devfdp; 2191 struct ufsmount *ump; 2192 struct buf *bp; 2193 struct fs *fs; 2194 int error; 2195 2196 devfdp = devfd; 2197 ump = devfdp->ump; 2198 fs = ump->um_fs; 2199 /* 2200 * Writing the superblock summary information. 2201 */ 2202 if (loc != fs->fs_sblockloc) { 2203 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); 2204 bcopy(buf, bp->b_data, (u_int)size); 2205 if (devfdp->suspended) 2206 bp->b_flags |= B_VALIDSUSPWRT; 2207 if (devfdp->waitfor != MNT_WAIT) 2208 bawrite(bp); 2209 else if ((error = bwrite(bp)) != 0) 2210 devfdp->error = error; 2211 return (0); 2212 } 2213 /* 2214 * Writing the superblock itself. We need to do special checks for it. 2215 */ 2216 bp = devfdp->sbbp; 2217 if (ffs_fsfail_cleanup(ump, devfdp->error)) 2218 devfdp->error = 0; 2219 if (devfdp->error != 0) { 2220 brelse(bp); 2221 return (devfdp->error); 2222 } 2223 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 2224 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2225 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2226 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 2227 fs->fs_sblockloc = SBLOCK_UFS1; 2228 } 2229 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 2230 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2231 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2232 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 2233 fs->fs_sblockloc = SBLOCK_UFS2; 2234 } 2235 if (MOUNTEDSOFTDEP(ump->um_mountp)) 2236 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 2237 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 2238 fs = (struct fs *)bp->b_data; 2239 ffs_oldfscompat_write(fs, ump); 2240 fs->fs_si = NULL; 2241 /* Recalculate the superblock hash */ 2242 fs->fs_ckhash = ffs_calc_sbhash(fs); 2243 if (devfdp->suspended) 2244 bp->b_flags |= B_VALIDSUSPWRT; 2245 if (devfdp->waitfor != MNT_WAIT) 2246 bawrite(bp); 2247 else if ((error = bwrite(bp)) != 0) 2248 devfdp->error = error; 2249 return (devfdp->error); 2250 } 2251 2252 static int 2253 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 2254 int attrnamespace, const char *attrname) 2255 { 2256 2257 #ifdef UFS_EXTATTR 2258 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 2259 attrname)); 2260 #else 2261 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 2262 attrname)); 2263 #endif 2264 } 2265 2266 static void 2267 ffs_ifree(struct ufsmount *ump, struct inode *ip) 2268 { 2269 2270 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 2271 uma_zfree(uma_ufs1, ip->i_din1); 2272 else if (ip->i_din2 != NULL) 2273 uma_zfree(uma_ufs2, ip->i_din2); 2274 uma_zfree_smr(uma_inode, ip); 2275 } 2276 2277 static int dobkgrdwrite = 1; 2278 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2279 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2280 2281 /* 2282 * Complete a background write started from bwrite. 2283 */ 2284 static void 2285 ffs_backgroundwritedone(struct buf *bp) 2286 { 2287 struct bufobj *bufobj; 2288 struct buf *origbp; 2289 2290 #ifdef SOFTUPDATES 2291 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0) 2292 softdep_handle_error(bp); 2293 #endif 2294 2295 /* 2296 * Find the original buffer that we are writing. 2297 */ 2298 bufobj = bp->b_bufobj; 2299 BO_LOCK(bufobj); 2300 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2301 panic("backgroundwritedone: lost buffer"); 2302 2303 /* 2304 * We should mark the cylinder group buffer origbp as 2305 * dirty, to not lose the failed write. 2306 */ 2307 if ((bp->b_ioflags & BIO_ERROR) != 0) 2308 origbp->b_vflags |= BV_BKGRDERR; 2309 BO_UNLOCK(bufobj); 2310 /* 2311 * Process dependencies then return any unfinished ones. 2312 */ 2313 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2314 buf_complete(bp); 2315 #ifdef SOFTUPDATES 2316 if (!LIST_EMPTY(&bp->b_dep)) 2317 softdep_move_dependencies(bp, origbp); 2318 #endif 2319 /* 2320 * This buffer is marked B_NOCACHE so when it is released 2321 * by biodone it will be tossed. Clear B_IOSTARTED in case of error. 2322 */ 2323 bp->b_flags |= B_NOCACHE; 2324 bp->b_flags &= ~(B_CACHE | B_IOSTARTED); 2325 pbrelvp(bp); 2326 2327 /* 2328 * Prevent brelse() from trying to keep and re-dirtying bp on 2329 * errors. It causes b_bufobj dereference in 2330 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2331 * pbrelvp() above. 2332 */ 2333 if ((bp->b_ioflags & BIO_ERROR) != 0) 2334 bp->b_flags |= B_INVAL; 2335 bufdone(bp); 2336 BO_LOCK(bufobj); 2337 /* 2338 * Clear the BV_BKGRDINPROG flag in the original buffer 2339 * and awaken it if it is waiting for the write to complete. 2340 * If BV_BKGRDINPROG is not set in the original buffer it must 2341 * have been released and re-instantiated - which is not legal. 2342 */ 2343 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2344 ("backgroundwritedone: lost buffer2")); 2345 origbp->b_vflags &= ~BV_BKGRDINPROG; 2346 if (origbp->b_vflags & BV_BKGRDWAIT) { 2347 origbp->b_vflags &= ~BV_BKGRDWAIT; 2348 wakeup(&origbp->b_xflags); 2349 } 2350 BO_UNLOCK(bufobj); 2351 } 2352 2353 /* 2354 * Write, release buffer on completion. (Done by iodone 2355 * if async). Do not bother writing anything if the buffer 2356 * is invalid. 2357 * 2358 * Note that we set B_CACHE here, indicating that buffer is 2359 * fully valid and thus cacheable. This is true even of NFS 2360 * now so we set it generally. This could be set either here 2361 * or in biodone() since the I/O is synchronous. We put it 2362 * here. 2363 */ 2364 static int 2365 ffs_bufwrite(struct buf *bp) 2366 { 2367 struct buf *newbp; 2368 struct cg *cgp; 2369 2370 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2371 if (bp->b_flags & B_INVAL) { 2372 brelse(bp); 2373 return (0); 2374 } 2375 2376 if (!BUF_ISLOCKED(bp)) 2377 panic("bufwrite: buffer is not busy???"); 2378 /* 2379 * If a background write is already in progress, delay 2380 * writing this block if it is asynchronous. Otherwise 2381 * wait for the background write to complete. 2382 */ 2383 BO_LOCK(bp->b_bufobj); 2384 if (bp->b_vflags & BV_BKGRDINPROG) { 2385 if (bp->b_flags & B_ASYNC) { 2386 BO_UNLOCK(bp->b_bufobj); 2387 bdwrite(bp); 2388 return (0); 2389 } 2390 bp->b_vflags |= BV_BKGRDWAIT; 2391 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2392 "bwrbg", 0); 2393 if (bp->b_vflags & BV_BKGRDINPROG) 2394 panic("bufwrite: still writing"); 2395 } 2396 bp->b_vflags &= ~BV_BKGRDERR; 2397 BO_UNLOCK(bp->b_bufobj); 2398 2399 /* 2400 * If this buffer is marked for background writing and we 2401 * do not have to wait for it, make a copy and write the 2402 * copy so as to leave this buffer ready for further use. 2403 * 2404 * This optimization eats a lot of memory. If we have a page 2405 * or buffer shortfall we can't do it. 2406 */ 2407 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2408 (bp->b_flags & B_ASYNC) && 2409 !vm_page_count_severe() && 2410 !buf_dirty_count_severe()) { 2411 KASSERT(bp->b_iodone == NULL, 2412 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2413 2414 /* get a new block */ 2415 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2416 if (newbp == NULL) 2417 goto normal_write; 2418 2419 KASSERT(buf_mapped(bp), ("Unmapped cg")); 2420 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2421 BO_LOCK(bp->b_bufobj); 2422 bp->b_vflags |= BV_BKGRDINPROG; 2423 BO_UNLOCK(bp->b_bufobj); 2424 newbp->b_xflags |= 2425 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; 2426 newbp->b_lblkno = bp->b_lblkno; 2427 newbp->b_blkno = bp->b_blkno; 2428 newbp->b_offset = bp->b_offset; 2429 newbp->b_iodone = ffs_backgroundwritedone; 2430 newbp->b_flags |= B_ASYNC; 2431 newbp->b_flags &= ~B_INVAL; 2432 pbgetvp(bp->b_vp, newbp); 2433 2434 #ifdef SOFTUPDATES 2435 /* 2436 * Move over the dependencies. If there are rollbacks, 2437 * leave the parent buffer dirtied as it will need to 2438 * be written again. 2439 */ 2440 if (LIST_EMPTY(&bp->b_dep) || 2441 softdep_move_dependencies(bp, newbp) == 0) 2442 bundirty(bp); 2443 #else 2444 bundirty(bp); 2445 #endif 2446 2447 /* 2448 * Initiate write on the copy, release the original. The 2449 * BKGRDINPROG flag prevents it from going away until 2450 * the background write completes. We have to recalculate 2451 * its check hash in case the buffer gets freed and then 2452 * reconstituted from the buffer cache during a later read. 2453 */ 2454 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2455 cgp = (struct cg *)bp->b_data; 2456 cgp->cg_ckhash = 0; 2457 cgp->cg_ckhash = 2458 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2459 } 2460 bqrelse(bp); 2461 bp = newbp; 2462 } else 2463 /* Mark the buffer clean */ 2464 bundirty(bp); 2465 2466 /* Let the normal bufwrite do the rest for us */ 2467 normal_write: 2468 /* 2469 * If we are writing a cylinder group, update its time. 2470 */ 2471 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2472 cgp = (struct cg *)bp->b_data; 2473 cgp->cg_old_time = cgp->cg_time = time_second; 2474 } 2475 return (bufwrite(bp)); 2476 } 2477 2478 static void 2479 ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2480 { 2481 struct vnode *vp; 2482 struct buf *tbp; 2483 int error, nocopy; 2484 2485 /* 2486 * This is the bufobj strategy for the private VCHR vnodes 2487 * used by FFS to access the underlying storage device. 2488 * We override the default bufobj strategy and thus bypass 2489 * VOP_STRATEGY() for these vnodes. 2490 */ 2491 vp = bo2vnode(bo); 2492 KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR || 2493 bp->b_vp->v_rdev == NULL || 2494 bp->b_vp->v_rdev->si_mountpt == NULL || 2495 VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL || 2496 vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp, 2497 ("ffs_geom_strategy() with wrong vp")); 2498 if (bp->b_iocmd == BIO_WRITE) { 2499 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2500 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2501 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2502 panic("ffs_geom_strategy: bad I/O"); 2503 nocopy = bp->b_flags & B_NOCOPY; 2504 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2505 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2506 vp->v_rdev->si_snapdata != NULL) { 2507 if ((bp->b_flags & B_CLUSTER) != 0) { 2508 runningbufwakeup(bp); 2509 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2510 b_cluster.cluster_entry) { 2511 error = ffs_copyonwrite(vp, tbp); 2512 if (error != 0 && 2513 error != EOPNOTSUPP) { 2514 bp->b_error = error; 2515 bp->b_ioflags |= BIO_ERROR; 2516 bp->b_flags &= ~B_BARRIER; 2517 bufdone(bp); 2518 return; 2519 } 2520 } 2521 bp->b_runningbufspace = bp->b_bufsize; 2522 atomic_add_long(&runningbufspace, 2523 bp->b_runningbufspace); 2524 } else { 2525 error = ffs_copyonwrite(vp, bp); 2526 if (error != 0 && error != EOPNOTSUPP) { 2527 bp->b_error = error; 2528 bp->b_ioflags |= BIO_ERROR; 2529 bp->b_flags &= ~B_BARRIER; 2530 bufdone(bp); 2531 return; 2532 } 2533 } 2534 } 2535 #ifdef SOFTUPDATES 2536 if ((bp->b_flags & B_CLUSTER) != 0) { 2537 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2538 b_cluster.cluster_entry) { 2539 if (!LIST_EMPTY(&tbp->b_dep)) 2540 buf_start(tbp); 2541 } 2542 } else { 2543 if (!LIST_EMPTY(&bp->b_dep)) 2544 buf_start(bp); 2545 } 2546 2547 #endif 2548 /* 2549 * Check for metadata that needs check-hashes and update them. 2550 */ 2551 switch (bp->b_xflags & BX_FSPRIV) { 2552 case BX_CYLGRP: 2553 ((struct cg *)bp->b_data)->cg_ckhash = 0; 2554 ((struct cg *)bp->b_data)->cg_ckhash = 2555 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2556 break; 2557 2558 case BX_SUPERBLOCK: 2559 case BX_INODE: 2560 case BX_INDIR: 2561 case BX_DIR: 2562 printf("Check-hash write is unimplemented!!!\n"); 2563 break; 2564 2565 case 0: 2566 break; 2567 2568 default: 2569 printf("multiple buffer types 0x%b\n", 2570 (u_int)(bp->b_xflags & BX_FSPRIV), 2571 PRINT_UFS_BUF_XFLAGS); 2572 break; 2573 } 2574 } 2575 if (bp->b_iocmd != BIO_READ && ffs_enxio_enable) 2576 bp->b_xflags |= BX_CVTENXIO; 2577 g_vfs_strategy(bo, bp); 2578 } 2579 2580 int 2581 ffs_own_mount(const struct mount *mp) 2582 { 2583 2584 if (mp->mnt_op == &ufs_vfsops) 2585 return (1); 2586 return (0); 2587 } 2588 2589 #ifdef DDB 2590 #ifdef SOFTUPDATES 2591 2592 /* defined in ffs_softdep.c */ 2593 extern void db_print_ffs(struct ufsmount *ump); 2594 2595 DB_SHOW_COMMAND(ffs, db_show_ffs) 2596 { 2597 struct mount *mp; 2598 struct ufsmount *ump; 2599 2600 if (have_addr) { 2601 ump = VFSTOUFS((struct mount *)addr); 2602 db_print_ffs(ump); 2603 return; 2604 } 2605 2606 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2607 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2608 db_print_ffs(VFSTOUFS(mp)); 2609 } 2610 } 2611 2612 #endif /* SOFTUPDATES */ 2613 #endif /* DDB */ 2614