1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1991, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_quota.h" 38 #include "opt_ufs.h" 39 #include "opt_ffs.h" 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/gsb_crc32.h> 44 #include <sys/systm.h> 45 #include <sys/namei.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/taskqueue.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/vnode.h> 52 #include <sys/mount.h> 53 #include <sys/bio.h> 54 #include <sys/buf.h> 55 #include <sys/conf.h> 56 #include <sys/fcntl.h> 57 #include <sys/ioccom.h> 58 #include <sys/malloc.h> 59 #include <sys/mutex.h> 60 #include <sys/rwlock.h> 61 #include <sys/sysctl.h> 62 #include <sys/vmmeter.h> 63 64 #include <security/mac/mac_framework.h> 65 66 #include <ufs/ufs/dir.h> 67 #include <ufs/ufs/extattr.h> 68 #include <ufs/ufs/gjournal.h> 69 #include <ufs/ufs/quota.h> 70 #include <ufs/ufs/ufsmount.h> 71 #include <ufs/ufs/inode.h> 72 #include <ufs/ufs/ufs_extern.h> 73 74 #include <ufs/ffs/fs.h> 75 #include <ufs/ffs/ffs_extern.h> 76 77 #include <vm/vm.h> 78 #include <vm/uma.h> 79 #include <vm/vm_page.h> 80 81 #include <geom/geom.h> 82 #include <geom/geom_vfs.h> 83 84 #include <ddb/ddb.h> 85 86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 87 VFS_SMR_DECLARE; 88 89 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 90 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 91 ufs2_daddr_t); 92 static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 93 static int ffs_sync_lazy(struct mount *mp); 94 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); 95 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); 96 97 static vfs_init_t ffs_init; 98 static vfs_uninit_t ffs_uninit; 99 static vfs_extattrctl_t ffs_extattrctl; 100 static vfs_cmount_t ffs_cmount; 101 static vfs_unmount_t ffs_unmount; 102 static vfs_mount_t ffs_mount; 103 static vfs_statfs_t ffs_statfs; 104 static vfs_fhtovp_t ffs_fhtovp; 105 static vfs_sync_t ffs_sync; 106 107 static struct vfsops ufs_vfsops = { 108 .vfs_extattrctl = ffs_extattrctl, 109 .vfs_fhtovp = ffs_fhtovp, 110 .vfs_init = ffs_init, 111 .vfs_mount = ffs_mount, 112 .vfs_cmount = ffs_cmount, 113 .vfs_quotactl = ufs_quotactl, 114 .vfs_root = vfs_cache_root, 115 .vfs_cachedroot = ufs_root, 116 .vfs_statfs = ffs_statfs, 117 .vfs_sync = ffs_sync, 118 .vfs_uninit = ffs_uninit, 119 .vfs_unmount = ffs_unmount, 120 .vfs_vget = ffs_vget, 121 .vfs_susp_clean = process_deferred_inactive, 122 }; 123 124 VFS_SET(ufs_vfsops, ufs, 0); 125 MODULE_VERSION(ufs, 1); 126 127 static b_strategy_t ffs_geom_strategy; 128 static b_write_t ffs_bufwrite; 129 130 static struct buf_ops ffs_ops = { 131 .bop_name = "FFS", 132 .bop_write = ffs_bufwrite, 133 .bop_strategy = ffs_geom_strategy, 134 .bop_sync = bufsync, 135 #ifdef NO_FFS_SNAPSHOT 136 .bop_bdflush = bufbdflush, 137 #else 138 .bop_bdflush = ffs_bdflush, 139 #endif 140 }; 141 142 /* 143 * Note that userquota and groupquota options are not currently used 144 * by UFS/FFS code and generally mount(8) does not pass those options 145 * from userland, but they can be passed by loader(8) via 146 * vfs.root.mountfrom.options. 147 */ 148 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 149 "noclusterw", "noexec", "export", "force", "from", "groupquota", 150 "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir", 151 "nosymfollow", "sync", "union", "userquota", "untrusted", NULL }; 152 153 static int ffs_enxio_enable = 1; 154 SYSCTL_DECL(_vfs_ffs); 155 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN, 156 &ffs_enxio_enable, 0, 157 "enable mapping of other disk I/O errors to ENXIO"); 158 159 /* 160 * Return buffer with the contents of block "offset" from the beginning of 161 * directory "ip". If "res" is non-zero, fill it in with a pointer to the 162 * remaining space in the directory. 163 */ 164 static int 165 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) 166 { 167 struct inode *ip; 168 struct fs *fs; 169 struct buf *bp; 170 ufs_lbn_t lbn; 171 int bsize, error; 172 173 ip = VTOI(vp); 174 fs = ITOFS(ip); 175 lbn = lblkno(fs, offset); 176 bsize = blksize(fs, ip, lbn); 177 178 *bpp = NULL; 179 error = bread(vp, lbn, bsize, NOCRED, &bp); 180 if (error) { 181 return (error); 182 } 183 if (res) 184 *res = (char *)bp->b_data + blkoff(fs, offset); 185 *bpp = bp; 186 return (0); 187 } 188 189 /* 190 * Load up the contents of an inode and copy the appropriate pieces 191 * to the incore copy. 192 */ 193 static int 194 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino) 195 { 196 struct ufs1_dinode *dip1; 197 struct ufs2_dinode *dip2; 198 int error; 199 200 if (I_IS_UFS1(ip)) { 201 dip1 = ip->i_din1; 202 *dip1 = 203 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 204 ip->i_mode = dip1->di_mode; 205 ip->i_nlink = dip1->di_nlink; 206 ip->i_effnlink = dip1->di_nlink; 207 ip->i_size = dip1->di_size; 208 ip->i_flags = dip1->di_flags; 209 ip->i_gen = dip1->di_gen; 210 ip->i_uid = dip1->di_uid; 211 ip->i_gid = dip1->di_gid; 212 return (0); 213 } 214 dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 215 if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 && 216 !ffs_fsfail_cleanup(ITOUMP(ip), error)) { 217 printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt, 218 (intmax_t)ino); 219 return (error); 220 } 221 *ip->i_din2 = *dip2; 222 dip2 = ip->i_din2; 223 ip->i_mode = dip2->di_mode; 224 ip->i_nlink = dip2->di_nlink; 225 ip->i_effnlink = dip2->di_nlink; 226 ip->i_size = dip2->di_size; 227 ip->i_flags = dip2->di_flags; 228 ip->i_gen = dip2->di_gen; 229 ip->i_uid = dip2->di_uid; 230 ip->i_gid = dip2->di_gid; 231 return (0); 232 } 233 234 /* 235 * Verify that a filesystem block number is a valid data block. 236 * This routine is only called on untrusted filesystems. 237 */ 238 static int 239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize) 240 { 241 struct fs *fs; 242 struct ufsmount *ump; 243 ufs2_daddr_t end_daddr; 244 int cg, havemtx; 245 246 KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0, 247 ("ffs_check_blkno called on a trusted file system")); 248 ump = VFSTOUFS(mp); 249 fs = ump->um_fs; 250 cg = dtog(fs, daddr); 251 end_daddr = daddr + numfrags(fs, blksize); 252 /* 253 * Verify that the block number is a valid data block. Also check 254 * that it does not point to an inode block or a superblock. Accept 255 * blocks that are unalloacted (0) or part of snapshot metadata 256 * (BLK_NOCOPY or BLK_SNAP). 257 * 258 * Thus, the block must be in a valid range for the filesystem and 259 * either in the space before a backup superblock (except the first 260 * cylinder group where that space is used by the bootstrap code) or 261 * after the inode blocks and before the end of the cylinder group. 262 */ 263 if ((uint64_t)daddr <= BLK_SNAP || 264 ((uint64_t)end_daddr <= fs->fs_size && 265 ((cg > 0 && end_daddr <= cgsblock(fs, cg)) || 266 (daddr >= cgdmin(fs, cg) && 267 end_daddr <= cgbase(fs, cg) + fs->fs_fpg)))) 268 return (0); 269 if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0) 270 UFS_LOCK(ump); 271 if (ppsratecheck(&ump->um_last_integritymsg, 272 &ump->um_secs_integritymsg, 1)) { 273 UFS_UNLOCK(ump); 274 uprintf("\n%s: inode %jd, out-of-range indirect block " 275 "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr); 276 if (havemtx) 277 UFS_LOCK(ump); 278 } else if (!havemtx) 279 UFS_UNLOCK(ump); 280 return (EINTEGRITY); 281 } 282 283 /* 284 * On first ENXIO error, initiate an asynchronous forcible unmount. 285 * Used to unmount filesystems whose underlying media has gone away. 286 * 287 * Return true if a cleanup is in progress. 288 */ 289 int 290 ffs_fsfail_cleanup(struct ufsmount *ump, int error) 291 { 292 int retval; 293 294 UFS_LOCK(ump); 295 retval = ffs_fsfail_cleanup_locked(ump, error); 296 UFS_UNLOCK(ump); 297 return (retval); 298 } 299 300 int 301 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error) 302 { 303 mtx_assert(UFS_MTX(ump), MA_OWNED); 304 if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) { 305 ump->um_flags |= UM_FSFAIL_CLEANUP; 306 /* 307 * Queue an async forced unmount. 308 */ 309 vfs_ref(ump->um_mountp); 310 dounmount(ump->um_mountp, 311 MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread); 312 printf("UFS: forcibly unmounting %s from %s\n", 313 ump->um_mountp->mnt_stat.f_mntfromname, 314 ump->um_mountp->mnt_stat.f_mntonname); 315 } 316 return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0); 317 } 318 319 /* 320 * Wrapper used during ENXIO cleanup to allocate empty buffers when 321 * the kernel is unable to read the real one. They are needed so that 322 * the soft updates code can use them to unwind its dependencies. 323 */ 324 int 325 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno, 326 daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt, 327 struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *), 328 struct buf **bpp) 329 { 330 int error; 331 332 flags |= GB_CVTENXIO; 333 error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt, 334 cred, flags, ckhashfunc, bpp); 335 if (error != 0 && ffs_fsfail_cleanup(ump, error)) { 336 error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp); 337 KASSERT(error == 0, ("getblkx failed")); 338 vfs_bio_bzero_buf(*bpp, 0, size); 339 } 340 return (error); 341 } 342 343 static int 344 ffs_mount(struct mount *mp) 345 { 346 struct vnode *devvp, *odevvp; 347 struct thread *td; 348 struct ufsmount *ump = NULL; 349 struct fs *fs; 350 int error, flags; 351 int error1 __diagused; 352 uint64_t mntorflags, saved_mnt_flag; 353 accmode_t accmode; 354 struct nameidata ndp; 355 char *fspec; 356 bool mounted_softdep; 357 358 td = curthread; 359 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 360 return (EINVAL); 361 if (uma_inode == NULL) { 362 uma_inode = uma_zcreate("FFS inode", 363 sizeof(struct inode), NULL, NULL, NULL, NULL, 364 UMA_ALIGN_PTR, 0); 365 uma_ufs1 = uma_zcreate("FFS1 dinode", 366 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 367 UMA_ALIGN_PTR, 0); 368 uma_ufs2 = uma_zcreate("FFS2 dinode", 369 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 370 UMA_ALIGN_PTR, 0); 371 VFS_SMR_ZONE_SET(uma_inode); 372 } 373 374 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 375 vfs_deleteopt(mp->mnt_optnew, "userquota"); 376 377 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 378 if (error) 379 return (error); 380 381 mntorflags = 0; 382 if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0) 383 mntorflags |= MNT_UNTRUSTED; 384 385 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 386 mntorflags |= MNT_ACLS; 387 388 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 389 mntorflags |= MNT_SNAPSHOT; 390 /* 391 * Once we have set the MNT_SNAPSHOT flag, do not 392 * persist "snapshot" in the options list. 393 */ 394 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 395 vfs_deleteopt(mp->mnt_opt, "snapshot"); 396 } 397 398 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 399 if (mntorflags & MNT_ACLS) { 400 vfs_mount_error(mp, 401 "\"acls\" and \"nfsv4acls\" options " 402 "are mutually exclusive"); 403 return (EINVAL); 404 } 405 mntorflags |= MNT_NFS4ACLS; 406 } 407 408 MNT_ILOCK(mp); 409 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP; 410 mp->mnt_flag |= mntorflags; 411 MNT_IUNLOCK(mp); 412 /* 413 * If updating, check whether changing from read-only to 414 * read/write; if there is no device name, that's all we do. 415 */ 416 if (mp->mnt_flag & MNT_UPDATE) { 417 ump = VFSTOUFS(mp); 418 fs = ump->um_fs; 419 odevvp = ump->um_odevvp; 420 devvp = ump->um_devvp; 421 if (fs->fs_ronly == 0 && 422 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 423 /* 424 * Flush any dirty data and suspend filesystem. 425 */ 426 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 427 return (error); 428 error = vfs_write_suspend_umnt(mp); 429 if (error != 0) 430 return (error); 431 432 fs->fs_ronly = 1; 433 if (MOUNTEDSOFTDEP(mp)) { 434 MNT_ILOCK(mp); 435 mp->mnt_flag &= ~MNT_SOFTDEP; 436 MNT_IUNLOCK(mp); 437 mounted_softdep = true; 438 } else 439 mounted_softdep = false; 440 441 /* 442 * Check for and optionally get rid of files open 443 * for writing. 444 */ 445 flags = WRITECLOSE; 446 if (mp->mnt_flag & MNT_FORCE) 447 flags |= FORCECLOSE; 448 if (mounted_softdep) { 449 error = softdep_flushfiles(mp, flags, td); 450 } else { 451 error = ffs_flushfiles(mp, flags, td); 452 } 453 if (error) { 454 fs->fs_ronly = 0; 455 if (mounted_softdep) { 456 MNT_ILOCK(mp); 457 mp->mnt_flag |= MNT_SOFTDEP; 458 MNT_IUNLOCK(mp); 459 } 460 vfs_write_resume(mp, 0); 461 return (error); 462 } 463 464 if (fs->fs_pendingblocks != 0 || 465 fs->fs_pendinginodes != 0) { 466 printf("WARNING: %s Update error: blocks %jd " 467 "files %d\n", fs->fs_fsmnt, 468 (intmax_t)fs->fs_pendingblocks, 469 fs->fs_pendinginodes); 470 fs->fs_pendingblocks = 0; 471 fs->fs_pendinginodes = 0; 472 } 473 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 474 fs->fs_clean = 1; 475 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 476 fs->fs_ronly = 0; 477 fs->fs_clean = 0; 478 if (mounted_softdep) { 479 MNT_ILOCK(mp); 480 mp->mnt_flag |= MNT_SOFTDEP; 481 MNT_IUNLOCK(mp); 482 } 483 vfs_write_resume(mp, 0); 484 return (error); 485 } 486 if (mounted_softdep) 487 softdep_unmount(mp); 488 g_topology_lock(); 489 /* 490 * Drop our write and exclusive access. 491 */ 492 g_access(ump->um_cp, 0, -1, -1); 493 g_topology_unlock(); 494 MNT_ILOCK(mp); 495 mp->mnt_flag |= MNT_RDONLY; 496 MNT_IUNLOCK(mp); 497 /* 498 * Allow the writers to note that filesystem 499 * is ro now. 500 */ 501 vfs_write_resume(mp, 0); 502 } 503 if ((mp->mnt_flag & MNT_RELOAD) && 504 (error = ffs_reload(mp, 0)) != 0) 505 return (error); 506 if (fs->fs_ronly && 507 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 508 /* 509 * If upgrade to read-write by non-root, then verify 510 * that user has necessary permissions on the device. 511 */ 512 vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY); 513 error = VOP_ACCESS(odevvp, VREAD | VWRITE, 514 td->td_ucred, td); 515 if (error) 516 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 517 VOP_UNLOCK(odevvp); 518 if (error) { 519 return (error); 520 } 521 fs->fs_flags &= ~FS_UNCLEAN; 522 if (fs->fs_clean == 0) { 523 fs->fs_flags |= FS_UNCLEAN; 524 if ((mp->mnt_flag & MNT_FORCE) || 525 ((fs->fs_flags & 526 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 527 (fs->fs_flags & FS_DOSOFTDEP))) { 528 printf("WARNING: %s was not properly " 529 "dismounted\n", fs->fs_fsmnt); 530 } else { 531 vfs_mount_error(mp, 532 "R/W mount of %s denied. %s.%s", 533 fs->fs_fsmnt, 534 "Filesystem is not clean - run fsck", 535 (fs->fs_flags & FS_SUJ) == 0 ? "" : 536 " Forced mount will invalidate" 537 " journal contents"); 538 return (EPERM); 539 } 540 } 541 g_topology_lock(); 542 /* 543 * Request exclusive write access. 544 */ 545 error = g_access(ump->um_cp, 0, 1, 1); 546 g_topology_unlock(); 547 if (error) 548 return (error); 549 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 550 return (error); 551 error = vfs_write_suspend_umnt(mp); 552 if (error != 0) 553 return (error); 554 fs->fs_ronly = 0; 555 MNT_ILOCK(mp); 556 saved_mnt_flag = MNT_RDONLY; 557 if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag & 558 MNT_ASYNC) != 0) 559 saved_mnt_flag |= MNT_ASYNC; 560 mp->mnt_flag &= ~saved_mnt_flag; 561 MNT_IUNLOCK(mp); 562 fs->fs_mtime = time_second; 563 /* check to see if we need to start softdep */ 564 if ((fs->fs_flags & FS_DOSOFTDEP) && 565 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 566 fs->fs_ronly = 1; 567 MNT_ILOCK(mp); 568 mp->mnt_flag |= saved_mnt_flag; 569 MNT_IUNLOCK(mp); 570 vfs_write_resume(mp, 0); 571 return (error); 572 } 573 fs->fs_clean = 0; 574 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 575 fs->fs_ronly = 1; 576 if ((fs->fs_flags & FS_DOSOFTDEP) != 0) 577 softdep_unmount(mp); 578 MNT_ILOCK(mp); 579 mp->mnt_flag |= saved_mnt_flag; 580 MNT_IUNLOCK(mp); 581 vfs_write_resume(mp, 0); 582 return (error); 583 } 584 if (fs->fs_snapinum[0] != 0) 585 ffs_snapshot_mount(mp); 586 vfs_write_resume(mp, 0); 587 } 588 /* 589 * Soft updates is incompatible with "async", 590 * so if we are doing softupdates stop the user 591 * from setting the async flag in an update. 592 * Softdep_mount() clears it in an initial mount 593 * or ro->rw remount. 594 */ 595 if (MOUNTEDSOFTDEP(mp)) { 596 /* XXX: Reset too late ? */ 597 MNT_ILOCK(mp); 598 mp->mnt_flag &= ~MNT_ASYNC; 599 MNT_IUNLOCK(mp); 600 } 601 /* 602 * Keep MNT_ACLS flag if it is stored in superblock. 603 */ 604 if ((fs->fs_flags & FS_ACLS) != 0) { 605 /* XXX: Set too late ? */ 606 MNT_ILOCK(mp); 607 mp->mnt_flag |= MNT_ACLS; 608 MNT_IUNLOCK(mp); 609 } 610 611 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 612 /* XXX: Set too late ? */ 613 MNT_ILOCK(mp); 614 mp->mnt_flag |= MNT_NFS4ACLS; 615 MNT_IUNLOCK(mp); 616 } 617 618 /* 619 * If this is a snapshot request, take the snapshot. 620 */ 621 if (mp->mnt_flag & MNT_SNAPSHOT) 622 return (ffs_snapshot(mp, fspec)); 623 624 /* 625 * Must not call namei() while owning busy ref. 626 */ 627 vfs_unbusy(mp); 628 } 629 630 /* 631 * Not an update, or updating the name: look up the name 632 * and verify that it refers to a sensible disk device. 633 */ 634 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec); 635 error = namei(&ndp); 636 if ((mp->mnt_flag & MNT_UPDATE) != 0) { 637 /* 638 * Unmount does not start if MNT_UPDATE is set. Mount 639 * update busies mp before setting MNT_UPDATE. We 640 * must be able to retain our busy ref succesfully, 641 * without sleep. 642 */ 643 error1 = vfs_busy(mp, MBF_NOWAIT); 644 MPASS(error1 == 0); 645 } 646 if (error != 0) 647 return (error); 648 NDFREE(&ndp, NDF_ONLY_PNBUF); 649 devvp = ndp.ni_vp; 650 if (!vn_isdisk_error(devvp, &error)) { 651 vput(devvp); 652 return (error); 653 } 654 655 /* 656 * If mount by non-root, then verify that user has necessary 657 * permissions on the device. 658 */ 659 accmode = VREAD; 660 if ((mp->mnt_flag & MNT_RDONLY) == 0) 661 accmode |= VWRITE; 662 error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); 663 if (error) 664 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 665 if (error) { 666 vput(devvp); 667 return (error); 668 } 669 670 if (mp->mnt_flag & MNT_UPDATE) { 671 /* 672 * Update only 673 * 674 * If it's not the same vnode, or at least the same device 675 * then it's not correct. 676 */ 677 678 if (devvp->v_rdev != ump->um_devvp->v_rdev) 679 error = EINVAL; /* needs translation */ 680 vput(devvp); 681 if (error) 682 return (error); 683 } else { 684 /* 685 * New mount 686 * 687 * We need the name for the mount point (also used for 688 * "last mounted on") copied in. If an error occurs, 689 * the mount point is discarded by the upper level code. 690 * Note that vfs_mount_alloc() populates f_mntonname for us. 691 */ 692 if ((error = ffs_mountfs(devvp, mp, td)) != 0) { 693 vrele(devvp); 694 return (error); 695 } 696 } 697 698 MNT_ILOCK(mp); 699 /* 700 * This is racy versus lookup, see ufs_fplookup_vexec for details. 701 */ 702 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0) 703 panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp); 704 if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0) 705 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 706 MNT_IUNLOCK(mp); 707 708 vfs_mountedfrom(mp, fspec); 709 return (0); 710 } 711 712 /* 713 * Compatibility with old mount system call. 714 */ 715 716 static int 717 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 718 { 719 struct ufs_args args; 720 int error; 721 722 if (data == NULL) 723 return (EINVAL); 724 error = copyin(data, &args, sizeof args); 725 if (error) 726 return (error); 727 728 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 729 ma = mount_arg(ma, "export", &args.export, sizeof(args.export)); 730 error = kernel_mount(ma, flags); 731 732 return (error); 733 } 734 735 /* 736 * Reload all incore data for a filesystem (used after running fsck on 737 * the root filesystem and finding things to fix). If the 'force' flag 738 * is 0, the filesystem must be mounted read-only. 739 * 740 * Things to do to update the mount: 741 * 1) invalidate all cached meta-data. 742 * 2) re-read superblock from disk. 743 * 3) re-read summary information from disk. 744 * 4) invalidate all inactive vnodes. 745 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary 746 * writers, if requested. 747 * 6) invalidate all cached file data. 748 * 7) re-read inode data for all active vnodes. 749 */ 750 int 751 ffs_reload(struct mount *mp, int flags) 752 { 753 struct vnode *vp, *mvp, *devvp; 754 struct inode *ip; 755 void *space; 756 struct buf *bp; 757 struct fs *fs, *newfs; 758 struct ufsmount *ump; 759 ufs2_daddr_t sblockloc; 760 int i, blks, error; 761 u_long size; 762 int32_t *lp; 763 764 ump = VFSTOUFS(mp); 765 766 MNT_ILOCK(mp); 767 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 768 MNT_IUNLOCK(mp); 769 return (EINVAL); 770 } 771 MNT_IUNLOCK(mp); 772 773 /* 774 * Step 1: invalidate all cached meta-data. 775 */ 776 devvp = VFSTOUFS(mp)->um_devvp; 777 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 778 if (vinvalbuf(devvp, 0, 0, 0) != 0) 779 panic("ffs_reload: dirty1"); 780 VOP_UNLOCK(devvp); 781 782 /* 783 * Step 2: re-read superblock from disk. 784 */ 785 fs = VFSTOUFS(mp)->um_fs; 786 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 787 NOCRED, &bp)) != 0) 788 return (error); 789 newfs = (struct fs *)bp->b_data; 790 if ((newfs->fs_magic != FS_UFS1_MAGIC && 791 newfs->fs_magic != FS_UFS2_MAGIC) || 792 newfs->fs_bsize > MAXBSIZE || 793 newfs->fs_bsize < sizeof(struct fs)) { 794 brelse(bp); 795 return (EIO); /* XXX needs translation */ 796 } 797 /* 798 * Preserve the summary information, read-only status, and 799 * superblock location by copying these fields into our new 800 * superblock before using it to update the existing superblock. 801 */ 802 newfs->fs_si = fs->fs_si; 803 newfs->fs_ronly = fs->fs_ronly; 804 sblockloc = fs->fs_sblockloc; 805 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 806 brelse(bp); 807 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 808 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 809 UFS_LOCK(ump); 810 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 811 printf("WARNING: %s: reload pending error: blocks %jd " 812 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 813 fs->fs_pendinginodes); 814 fs->fs_pendingblocks = 0; 815 fs->fs_pendinginodes = 0; 816 } 817 UFS_UNLOCK(ump); 818 819 /* 820 * Step 3: re-read summary information from disk. 821 */ 822 size = fs->fs_cssize; 823 blks = howmany(size, fs->fs_fsize); 824 if (fs->fs_contigsumsize > 0) 825 size += fs->fs_ncg * sizeof(int32_t); 826 size += fs->fs_ncg * sizeof(u_int8_t); 827 free(fs->fs_csp, M_UFSMNT); 828 space = malloc(size, M_UFSMNT, M_WAITOK); 829 fs->fs_csp = space; 830 for (i = 0; i < blks; i += fs->fs_frag) { 831 size = fs->fs_bsize; 832 if (i + fs->fs_frag > blks) 833 size = (blks - i) * fs->fs_fsize; 834 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 835 NOCRED, &bp); 836 if (error) 837 return (error); 838 bcopy(bp->b_data, space, (u_int)size); 839 space = (char *)space + size; 840 brelse(bp); 841 } 842 /* 843 * We no longer know anything about clusters per cylinder group. 844 */ 845 if (fs->fs_contigsumsize > 0) { 846 fs->fs_maxcluster = lp = space; 847 for (i = 0; i < fs->fs_ncg; i++) 848 *lp++ = fs->fs_contigsumsize; 849 space = lp; 850 } 851 size = fs->fs_ncg * sizeof(u_int8_t); 852 fs->fs_contigdirs = (u_int8_t *)space; 853 bzero(fs->fs_contigdirs, size); 854 if ((flags & FFSR_UNSUSPEND) != 0) { 855 MNT_ILOCK(mp); 856 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 857 wakeup(&mp->mnt_flag); 858 MNT_IUNLOCK(mp); 859 } 860 861 loop: 862 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 863 /* 864 * Skip syncer vnode. 865 */ 866 if (vp->v_type == VNON) { 867 VI_UNLOCK(vp); 868 continue; 869 } 870 /* 871 * Step 4: invalidate all cached file data. 872 */ 873 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 874 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 875 goto loop; 876 } 877 if (vinvalbuf(vp, 0, 0, 0)) 878 panic("ffs_reload: dirty2"); 879 /* 880 * Step 5: re-read inode data for all active vnodes. 881 */ 882 ip = VTOI(vp); 883 error = 884 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 885 (int)fs->fs_bsize, NOCRED, &bp); 886 if (error) { 887 vput(vp); 888 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 889 return (error); 890 } 891 if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) { 892 brelse(bp); 893 vput(vp); 894 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 895 return (error); 896 } 897 ip->i_effnlink = ip->i_nlink; 898 brelse(bp); 899 vput(vp); 900 } 901 return (0); 902 } 903 904 /* 905 * Common code for mount and mountroot 906 */ 907 static int 908 ffs_mountfs(odevvp, mp, td) 909 struct vnode *odevvp; 910 struct mount *mp; 911 struct thread *td; 912 { 913 struct ufsmount *ump; 914 struct fs *fs; 915 struct cdev *dev; 916 int error, i, len, ronly; 917 struct ucred *cred; 918 struct g_consumer *cp; 919 struct mount *nmp; 920 struct vnode *devvp; 921 int candelete, canspeedup; 922 off_t loc; 923 924 fs = NULL; 925 ump = NULL; 926 cred = td ? td->td_ucred : NOCRED; 927 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 928 929 devvp = mntfs_allocvp(mp, odevvp); 930 VOP_UNLOCK(odevvp); 931 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 932 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 933 dev = devvp->v_rdev; 934 KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data")); 935 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 936 (uintptr_t)mp) == 0) { 937 mntfs_freevp(devvp); 938 return (EBUSY); 939 } 940 g_topology_lock(); 941 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 942 g_topology_unlock(); 943 if (error != 0) { 944 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 945 mntfs_freevp(devvp); 946 return (error); 947 } 948 dev_ref(dev); 949 devvp->v_bufobj.bo_ops = &ffs_ops; 950 BO_LOCK(&odevvp->v_bufobj); 951 odevvp->v_bufobj.bo_flag |= BO_NOBUFS; 952 BO_UNLOCK(&odevvp->v_bufobj); 953 VOP_UNLOCK(devvp); 954 if (dev->si_iosize_max != 0) 955 mp->mnt_iosize_max = dev->si_iosize_max; 956 if (mp->mnt_iosize_max > maxphys) 957 mp->mnt_iosize_max = maxphys; 958 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 959 error = EINVAL; 960 vfs_mount_error(mp, 961 "Invalid sectorsize %d for superblock size %d", 962 cp->provider->sectorsize, SBLOCKSIZE); 963 goto out; 964 } 965 /* fetch the superblock and summary information */ 966 loc = STDSB; 967 if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0) 968 loc = STDSB_NOHASHFAIL; 969 if ((error = ffs_sbget(devvp, &fs, loc, M_UFSMNT, ffs_use_bread)) != 0) 970 goto out; 971 fs->fs_flags &= ~FS_UNCLEAN; 972 if (fs->fs_clean == 0) { 973 fs->fs_flags |= FS_UNCLEAN; 974 if (ronly || (mp->mnt_flag & MNT_FORCE) || 975 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 976 (fs->fs_flags & FS_DOSOFTDEP))) { 977 printf("WARNING: %s was not properly dismounted\n", 978 fs->fs_fsmnt); 979 } else { 980 vfs_mount_error(mp, "R/W mount of %s denied. %s%s", 981 fs->fs_fsmnt, "Filesystem is not clean - run fsck.", 982 (fs->fs_flags & FS_SUJ) == 0 ? "" : 983 " Forced mount will invalidate journal contents"); 984 error = EPERM; 985 goto out; 986 } 987 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 988 (mp->mnt_flag & MNT_FORCE)) { 989 printf("WARNING: %s: lost blocks %jd files %d\n", 990 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 991 fs->fs_pendinginodes); 992 fs->fs_pendingblocks = 0; 993 fs->fs_pendinginodes = 0; 994 } 995 } 996 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 997 printf("WARNING: %s: mount pending error: blocks %jd " 998 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 999 fs->fs_pendinginodes); 1000 fs->fs_pendingblocks = 0; 1001 fs->fs_pendinginodes = 0; 1002 } 1003 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 1004 #ifdef UFS_GJOURNAL 1005 /* 1006 * Get journal provider name. 1007 */ 1008 len = 1024; 1009 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); 1010 if (g_io_getattr("GJOURNAL::provider", cp, &len, 1011 mp->mnt_gjprovider) == 0) { 1012 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 1013 M_UFSMNT, M_WAITOK); 1014 MNT_ILOCK(mp); 1015 mp->mnt_flag |= MNT_GJOURNAL; 1016 MNT_IUNLOCK(mp); 1017 } else { 1018 if ((mp->mnt_flag & MNT_RDONLY) == 0) 1019 printf("WARNING: %s: GJOURNAL flag on fs " 1020 "but no gjournal provider below\n", 1021 mp->mnt_stat.f_mntonname); 1022 free(mp->mnt_gjprovider, M_UFSMNT); 1023 mp->mnt_gjprovider = NULL; 1024 } 1025 #else 1026 printf("WARNING: %s: GJOURNAL flag on fs but no " 1027 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 1028 #endif 1029 } else { 1030 mp->mnt_gjprovider = NULL; 1031 } 1032 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 1033 ump->um_cp = cp; 1034 ump->um_bo = &devvp->v_bufobj; 1035 ump->um_fs = fs; 1036 if (fs->fs_magic == FS_UFS1_MAGIC) { 1037 ump->um_fstype = UFS1; 1038 ump->um_balloc = ffs_balloc_ufs1; 1039 } else { 1040 ump->um_fstype = UFS2; 1041 ump->um_balloc = ffs_balloc_ufs2; 1042 } 1043 ump->um_blkatoff = ffs_blkatoff; 1044 ump->um_truncate = ffs_truncate; 1045 ump->um_update = ffs_update; 1046 ump->um_valloc = ffs_valloc; 1047 ump->um_vfree = ffs_vfree; 1048 ump->um_ifree = ffs_ifree; 1049 ump->um_rdonly = ffs_rdonly; 1050 ump->um_snapgone = ffs_snapgone; 1051 if ((mp->mnt_flag & MNT_UNTRUSTED) != 0) 1052 ump->um_check_blkno = ffs_check_blkno; 1053 else 1054 ump->um_check_blkno = NULL; 1055 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 1056 sx_init(&ump->um_checkpath_lock, "uchpth"); 1057 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc); 1058 fs->fs_ronly = ronly; 1059 fs->fs_active = NULL; 1060 mp->mnt_data = ump; 1061 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 1062 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 1063 nmp = NULL; 1064 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 1065 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 1066 if (nmp) 1067 vfs_rel(nmp); 1068 vfs_getnewfsid(mp); 1069 } 1070 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 1071 MNT_ILOCK(mp); 1072 mp->mnt_flag |= MNT_LOCAL; 1073 MNT_IUNLOCK(mp); 1074 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 1075 #ifdef MAC 1076 MNT_ILOCK(mp); 1077 mp->mnt_flag |= MNT_MULTILABEL; 1078 MNT_IUNLOCK(mp); 1079 #else 1080 printf("WARNING: %s: multilabel flag on fs but " 1081 "no MAC support\n", mp->mnt_stat.f_mntonname); 1082 #endif 1083 } 1084 if ((fs->fs_flags & FS_ACLS) != 0) { 1085 #ifdef UFS_ACL 1086 MNT_ILOCK(mp); 1087 1088 if (mp->mnt_flag & MNT_NFS4ACLS) 1089 printf("WARNING: %s: ACLs flag on fs conflicts with " 1090 "\"nfsv4acls\" mount option; option ignored\n", 1091 mp->mnt_stat.f_mntonname); 1092 mp->mnt_flag &= ~MNT_NFS4ACLS; 1093 mp->mnt_flag |= MNT_ACLS; 1094 1095 MNT_IUNLOCK(mp); 1096 #else 1097 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 1098 mp->mnt_stat.f_mntonname); 1099 #endif 1100 } 1101 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 1102 #ifdef UFS_ACL 1103 MNT_ILOCK(mp); 1104 1105 if (mp->mnt_flag & MNT_ACLS) 1106 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 1107 "with \"acls\" mount option; option ignored\n", 1108 mp->mnt_stat.f_mntonname); 1109 mp->mnt_flag &= ~MNT_ACLS; 1110 mp->mnt_flag |= MNT_NFS4ACLS; 1111 1112 MNT_IUNLOCK(mp); 1113 #else 1114 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 1115 "ACLs support\n", mp->mnt_stat.f_mntonname); 1116 #endif 1117 } 1118 if ((fs->fs_flags & FS_TRIM) != 0) { 1119 len = sizeof(int); 1120 if (g_io_getattr("GEOM::candelete", cp, &len, 1121 &candelete) == 0) { 1122 if (candelete) 1123 ump->um_flags |= UM_CANDELETE; 1124 else 1125 printf("WARNING: %s: TRIM flag on fs but disk " 1126 "does not support TRIM\n", 1127 mp->mnt_stat.f_mntonname); 1128 } else { 1129 printf("WARNING: %s: TRIM flag on fs but disk does " 1130 "not confirm that it supports TRIM\n", 1131 mp->mnt_stat.f_mntonname); 1132 } 1133 if (((ump->um_flags) & UM_CANDELETE) != 0) { 1134 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 1135 taskqueue_thread_enqueue, &ump->um_trim_tq); 1136 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 1137 "%s trim", mp->mnt_stat.f_mntonname); 1138 ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM, 1139 &ump->um_trimlisthashsize); 1140 } 1141 } 1142 1143 len = sizeof(int); 1144 if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) { 1145 if (canspeedup) 1146 ump->um_flags |= UM_CANSPEEDUP; 1147 } 1148 1149 ump->um_mountp = mp; 1150 ump->um_dev = dev; 1151 ump->um_devvp = devvp; 1152 ump->um_odevvp = odevvp; 1153 ump->um_nindir = fs->fs_nindir; 1154 ump->um_bptrtodb = fs->fs_fsbtodb; 1155 ump->um_seqinc = fs->fs_frag; 1156 for (i = 0; i < MAXQUOTAS; i++) 1157 ump->um_quotas[i] = NULLVP; 1158 #ifdef UFS_EXTATTR 1159 ufs_extattr_uepm_init(&ump->um_extattr); 1160 #endif 1161 /* 1162 * Set FS local "last mounted on" information (NULL pad) 1163 */ 1164 bzero(fs->fs_fsmnt, MAXMNTLEN); 1165 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1166 mp->mnt_stat.f_iosize = fs->fs_bsize; 1167 1168 if (mp->mnt_flag & MNT_ROOTFS) { 1169 /* 1170 * Root mount; update timestamp in mount structure. 1171 * this will be used by the common root mount code 1172 * to update the system clock. 1173 */ 1174 mp->mnt_time = fs->fs_time; 1175 } 1176 1177 if (ronly == 0) { 1178 fs->fs_mtime = time_second; 1179 if ((fs->fs_flags & FS_DOSOFTDEP) && 1180 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1181 ffs_flushfiles(mp, FORCECLOSE, td); 1182 goto out; 1183 } 1184 if (fs->fs_snapinum[0] != 0) 1185 ffs_snapshot_mount(mp); 1186 fs->fs_fmod = 1; 1187 fs->fs_clean = 0; 1188 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1189 } 1190 /* 1191 * Initialize filesystem state information in mount struct. 1192 */ 1193 MNT_ILOCK(mp); 1194 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1195 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1196 MNT_IUNLOCK(mp); 1197 #ifdef UFS_EXTATTR 1198 #ifdef UFS_EXTATTR_AUTOSTART 1199 /* 1200 * 1201 * Auto-starting does the following: 1202 * - check for /.attribute in the fs, and extattr_start if so 1203 * - for each file in .attribute, enable that file with 1204 * an attribute of the same name. 1205 * Not clear how to report errors -- probably eat them. 1206 * This would all happen while the filesystem was busy/not 1207 * available, so would effectively be "atomic". 1208 */ 1209 (void) ufs_extattr_autostart(mp, td); 1210 #endif /* !UFS_EXTATTR_AUTOSTART */ 1211 #endif /* !UFS_EXTATTR */ 1212 return (0); 1213 out: 1214 if (fs != NULL) { 1215 free(fs->fs_csp, M_UFSMNT); 1216 free(fs->fs_si, M_UFSMNT); 1217 free(fs, M_UFSMNT); 1218 } 1219 if (cp != NULL) { 1220 g_topology_lock(); 1221 g_vfs_close(cp); 1222 g_topology_unlock(); 1223 } 1224 if (ump != NULL) { 1225 mtx_destroy(UFS_MTX(ump)); 1226 sx_destroy(&ump->um_checkpath_lock); 1227 if (mp->mnt_gjprovider != NULL) { 1228 free(mp->mnt_gjprovider, M_UFSMNT); 1229 mp->mnt_gjprovider = NULL; 1230 } 1231 MPASS(ump->um_softdep == NULL); 1232 free(ump, M_UFSMNT); 1233 mp->mnt_data = NULL; 1234 } 1235 BO_LOCK(&odevvp->v_bufobj); 1236 odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1237 BO_UNLOCK(&odevvp->v_bufobj); 1238 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1239 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1240 mntfs_freevp(devvp); 1241 dev_rel(dev); 1242 return (error); 1243 } 1244 1245 /* 1246 * A read function for use by filesystem-layer routines. 1247 */ 1248 static int 1249 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) 1250 { 1251 struct buf *bp; 1252 int error; 1253 1254 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); 1255 *bufp = malloc(size, M_UFSMNT, M_WAITOK); 1256 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, 1257 &bp)) != 0) 1258 return (error); 1259 bcopy(bp->b_data, *bufp, size); 1260 bp->b_flags |= B_INVAL | B_NOCACHE; 1261 brelse(bp); 1262 return (0); 1263 } 1264 1265 static int bigcgs = 0; 1266 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1267 1268 /* 1269 * Sanity checks for loading old filesystem superblocks. 1270 * See ffs_oldfscompat_write below for unwound actions. 1271 * 1272 * XXX - Parts get retired eventually. 1273 * Unfortunately new bits get added. 1274 */ 1275 static void 1276 ffs_oldfscompat_read(fs, ump, sblockloc) 1277 struct fs *fs; 1278 struct ufsmount *ump; 1279 ufs2_daddr_t sblockloc; 1280 { 1281 off_t maxfilesize; 1282 1283 /* 1284 * If not yet done, update fs_flags location and value of fs_sblockloc. 1285 */ 1286 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1287 fs->fs_flags = fs->fs_old_flags; 1288 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1289 fs->fs_sblockloc = sblockloc; 1290 } 1291 /* 1292 * If not yet done, update UFS1 superblock with new wider fields. 1293 */ 1294 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1295 fs->fs_maxbsize = fs->fs_bsize; 1296 fs->fs_time = fs->fs_old_time; 1297 fs->fs_size = fs->fs_old_size; 1298 fs->fs_dsize = fs->fs_old_dsize; 1299 fs->fs_csaddr = fs->fs_old_csaddr; 1300 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1301 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1302 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1303 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1304 } 1305 if (fs->fs_magic == FS_UFS1_MAGIC && 1306 fs->fs_old_inodefmt < FS_44INODEFMT) { 1307 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1308 fs->fs_qbmask = ~fs->fs_bmask; 1309 fs->fs_qfmask = ~fs->fs_fmask; 1310 } 1311 if (fs->fs_magic == FS_UFS1_MAGIC) { 1312 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1313 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1314 if (fs->fs_maxfilesize > maxfilesize) 1315 fs->fs_maxfilesize = maxfilesize; 1316 } 1317 /* Compatibility for old filesystems */ 1318 if (fs->fs_avgfilesize <= 0) 1319 fs->fs_avgfilesize = AVFILESIZ; 1320 if (fs->fs_avgfpdir <= 0) 1321 fs->fs_avgfpdir = AFPDIR; 1322 if (bigcgs) { 1323 fs->fs_save_cgsize = fs->fs_cgsize; 1324 fs->fs_cgsize = fs->fs_bsize; 1325 } 1326 } 1327 1328 /* 1329 * Unwinding superblock updates for old filesystems. 1330 * See ffs_oldfscompat_read above for details. 1331 * 1332 * XXX - Parts get retired eventually. 1333 * Unfortunately new bits get added. 1334 */ 1335 void 1336 ffs_oldfscompat_write(fs, ump) 1337 struct fs *fs; 1338 struct ufsmount *ump; 1339 { 1340 1341 /* 1342 * Copy back UFS2 updated fields that UFS1 inspects. 1343 */ 1344 if (fs->fs_magic == FS_UFS1_MAGIC) { 1345 fs->fs_old_time = fs->fs_time; 1346 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1347 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1348 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1349 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1350 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1351 } 1352 if (bigcgs) { 1353 fs->fs_cgsize = fs->fs_save_cgsize; 1354 fs->fs_save_cgsize = 0; 1355 } 1356 } 1357 1358 /* 1359 * unmount system call 1360 */ 1361 static int 1362 ffs_unmount(mp, mntflags) 1363 struct mount *mp; 1364 int mntflags; 1365 { 1366 struct thread *td; 1367 struct ufsmount *ump = VFSTOUFS(mp); 1368 struct fs *fs; 1369 int error, flags, susp; 1370 #ifdef UFS_EXTATTR 1371 int e_restart; 1372 #endif 1373 1374 flags = 0; 1375 td = curthread; 1376 fs = ump->um_fs; 1377 if (mntflags & MNT_FORCE) 1378 flags |= FORCECLOSE; 1379 susp = fs->fs_ronly == 0; 1380 #ifdef UFS_EXTATTR 1381 if ((error = ufs_extattr_stop(mp, td))) { 1382 if (error != EOPNOTSUPP) 1383 printf("WARNING: unmount %s: ufs_extattr_stop " 1384 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1385 error); 1386 e_restart = 0; 1387 } else { 1388 ufs_extattr_uepm_destroy(&ump->um_extattr); 1389 e_restart = 1; 1390 } 1391 #endif 1392 if (susp) { 1393 error = vfs_write_suspend_umnt(mp); 1394 if (error != 0) 1395 goto fail1; 1396 } 1397 if (MOUNTEDSOFTDEP(mp)) 1398 error = softdep_flushfiles(mp, flags, td); 1399 else 1400 error = ffs_flushfiles(mp, flags, td); 1401 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) 1402 goto fail; 1403 1404 UFS_LOCK(ump); 1405 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1406 printf("WARNING: unmount %s: pending error: blocks %jd " 1407 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1408 fs->fs_pendinginodes); 1409 fs->fs_pendingblocks = 0; 1410 fs->fs_pendinginodes = 0; 1411 } 1412 UFS_UNLOCK(ump); 1413 if (MOUNTEDSOFTDEP(mp)) 1414 softdep_unmount(mp); 1415 MPASS(ump->um_softdep == NULL); 1416 if (fs->fs_ronly == 0) { 1417 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1418 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1419 if (ffs_fsfail_cleanup(ump, error)) 1420 error = 0; 1421 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) { 1422 fs->fs_clean = 0; 1423 goto fail; 1424 } 1425 } 1426 if (susp) 1427 vfs_write_resume(mp, VR_START_WRITE); 1428 if (ump->um_trim_tq != NULL) { 1429 while (ump->um_trim_inflight != 0) 1430 pause("ufsutr", hz); 1431 taskqueue_drain_all(ump->um_trim_tq); 1432 taskqueue_free(ump->um_trim_tq); 1433 free (ump->um_trimhash, M_TRIM); 1434 } 1435 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1436 g_topology_lock(); 1437 g_vfs_close(ump->um_cp); 1438 g_topology_unlock(); 1439 BO_LOCK(&ump->um_odevvp->v_bufobj); 1440 ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1441 BO_UNLOCK(&ump->um_odevvp->v_bufobj); 1442 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1443 mntfs_freevp(ump->um_devvp); 1444 vrele(ump->um_odevvp); 1445 dev_rel(ump->um_dev); 1446 mtx_destroy(UFS_MTX(ump)); 1447 sx_destroy(&ump->um_checkpath_lock); 1448 if (mp->mnt_gjprovider != NULL) { 1449 free(mp->mnt_gjprovider, M_UFSMNT); 1450 mp->mnt_gjprovider = NULL; 1451 } 1452 free(fs->fs_csp, M_UFSMNT); 1453 free(fs->fs_si, M_UFSMNT); 1454 free(fs, M_UFSMNT); 1455 free(ump, M_UFSMNT); 1456 mp->mnt_data = NULL; 1457 MNT_ILOCK(mp); 1458 mp->mnt_flag &= ~MNT_LOCAL; 1459 MNT_IUNLOCK(mp); 1460 if (td->td_su == mp) { 1461 td->td_su = NULL; 1462 vfs_rel(mp); 1463 } 1464 return (error); 1465 1466 fail: 1467 if (susp) 1468 vfs_write_resume(mp, VR_START_WRITE); 1469 fail1: 1470 #ifdef UFS_EXTATTR 1471 if (e_restart) { 1472 ufs_extattr_uepm_init(&ump->um_extattr); 1473 #ifdef UFS_EXTATTR_AUTOSTART 1474 (void) ufs_extattr_autostart(mp, td); 1475 #endif 1476 } 1477 #endif 1478 1479 return (error); 1480 } 1481 1482 /* 1483 * Flush out all the files in a filesystem. 1484 */ 1485 int 1486 ffs_flushfiles(mp, flags, td) 1487 struct mount *mp; 1488 int flags; 1489 struct thread *td; 1490 { 1491 struct ufsmount *ump; 1492 int qerror, error; 1493 1494 ump = VFSTOUFS(mp); 1495 qerror = 0; 1496 #ifdef QUOTA 1497 if (mp->mnt_flag & MNT_QUOTA) { 1498 int i; 1499 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1500 if (error) 1501 return (error); 1502 for (i = 0; i < MAXQUOTAS; i++) { 1503 error = quotaoff(td, mp, i); 1504 if (error != 0) { 1505 if ((flags & EARLYFLUSH) == 0) 1506 return (error); 1507 else 1508 qerror = error; 1509 } 1510 } 1511 1512 /* 1513 * Here we fall through to vflush again to ensure that 1514 * we have gotten rid of all the system vnodes, unless 1515 * quotas must not be closed. 1516 */ 1517 } 1518 #endif 1519 /* devvp is not locked there */ 1520 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1521 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1522 return (error); 1523 ffs_snapshot_unmount(mp); 1524 flags |= FORCECLOSE; 1525 /* 1526 * Here we fall through to vflush again to ensure 1527 * that we have gotten rid of all the system vnodes. 1528 */ 1529 } 1530 1531 /* 1532 * Do not close system files if quotas were not closed, to be 1533 * able to sync the remaining dquots. The freeblks softupdate 1534 * workitems might hold a reference on a dquot, preventing 1535 * quotaoff() from completing. Next round of 1536 * softdep_flushworklist() iteration should process the 1537 * blockers, allowing the next run of quotaoff() to finally 1538 * flush held dquots. 1539 * 1540 * Otherwise, flush all the files. 1541 */ 1542 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1543 return (error); 1544 1545 /* 1546 * Flush filesystem metadata. 1547 */ 1548 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1549 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1550 VOP_UNLOCK(ump->um_devvp); 1551 return (error); 1552 } 1553 1554 /* 1555 * Get filesystem statistics. 1556 */ 1557 static int 1558 ffs_statfs(mp, sbp) 1559 struct mount *mp; 1560 struct statfs *sbp; 1561 { 1562 struct ufsmount *ump; 1563 struct fs *fs; 1564 1565 ump = VFSTOUFS(mp); 1566 fs = ump->um_fs; 1567 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1568 panic("ffs_statfs"); 1569 sbp->f_version = STATFS_VERSION; 1570 sbp->f_bsize = fs->fs_fsize; 1571 sbp->f_iosize = fs->fs_bsize; 1572 sbp->f_blocks = fs->fs_dsize; 1573 UFS_LOCK(ump); 1574 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1575 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1576 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1577 dbtofsb(fs, fs->fs_pendingblocks); 1578 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; 1579 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1580 UFS_UNLOCK(ump); 1581 sbp->f_namemax = UFS_MAXNAMLEN; 1582 return (0); 1583 } 1584 1585 static bool 1586 sync_doupdate(struct inode *ip) 1587 { 1588 1589 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1590 IN_UPDATE)) != 0); 1591 } 1592 1593 static int 1594 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused) 1595 { 1596 struct inode *ip; 1597 1598 /* 1599 * Flags are safe to access because ->v_data invalidation 1600 * is held off by listmtx. 1601 */ 1602 if (vp->v_type == VNON) 1603 return (false); 1604 ip = VTOI(vp); 1605 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) 1606 return (false); 1607 return (true); 1608 } 1609 1610 /* 1611 * For a lazy sync, we only care about access times, quotas and the 1612 * superblock. Other filesystem changes are already converted to 1613 * cylinder group blocks or inode blocks updates and are written to 1614 * disk by syncer. 1615 */ 1616 static int 1617 ffs_sync_lazy(mp) 1618 struct mount *mp; 1619 { 1620 struct vnode *mvp, *vp; 1621 struct inode *ip; 1622 int allerror, error; 1623 1624 allerror = 0; 1625 if ((mp->mnt_flag & MNT_NOATIME) != 0) { 1626 #ifdef QUOTA 1627 qsync(mp); 1628 #endif 1629 goto sbupdate; 1630 } 1631 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) { 1632 if (vp->v_type == VNON) { 1633 VI_UNLOCK(vp); 1634 continue; 1635 } 1636 ip = VTOI(vp); 1637 1638 /* 1639 * The IN_ACCESS flag is converted to IN_MODIFIED by 1640 * ufs_close() and ufs_getattr() by the calls to 1641 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1642 * Test also all the other timestamp flags too, to pick up 1643 * any other cases that could be missed. 1644 */ 1645 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1646 VI_UNLOCK(vp); 1647 continue; 1648 } 1649 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0) 1650 continue; 1651 #ifdef QUOTA 1652 qsyncvp(vp); 1653 #endif 1654 if (sync_doupdate(ip)) 1655 error = ffs_update(vp, 0); 1656 if (error != 0) 1657 allerror = error; 1658 vput(vp); 1659 } 1660 sbupdate: 1661 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1662 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1663 allerror = error; 1664 return (allerror); 1665 } 1666 1667 /* 1668 * Go through the disk queues to initiate sandbagged IO; 1669 * go through the inodes to write those that have been modified; 1670 * initiate the writing of the super block if it has been modified. 1671 * 1672 * Note: we are always called with the filesystem marked busy using 1673 * vfs_busy(). 1674 */ 1675 static int 1676 ffs_sync(mp, waitfor) 1677 struct mount *mp; 1678 int waitfor; 1679 { 1680 struct vnode *mvp, *vp, *devvp; 1681 struct thread *td; 1682 struct inode *ip; 1683 struct ufsmount *ump = VFSTOUFS(mp); 1684 struct fs *fs; 1685 int error, count, lockreq, allerror = 0; 1686 int suspend; 1687 int suspended; 1688 int secondary_writes; 1689 int secondary_accwrites; 1690 int softdep_deps; 1691 int softdep_accdeps; 1692 struct bufobj *bo; 1693 1694 suspend = 0; 1695 suspended = 0; 1696 td = curthread; 1697 fs = ump->um_fs; 1698 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) 1699 panic("%s: ffs_sync: modification on read-only filesystem", 1700 fs->fs_fsmnt); 1701 if (waitfor == MNT_LAZY) { 1702 if (!rebooting) 1703 return (ffs_sync_lazy(mp)); 1704 waitfor = MNT_NOWAIT; 1705 } 1706 1707 /* 1708 * Write back each (modified) inode. 1709 */ 1710 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1711 if (waitfor == MNT_SUSPEND) { 1712 suspend = 1; 1713 waitfor = MNT_WAIT; 1714 } 1715 if (waitfor == MNT_WAIT) 1716 lockreq = LK_EXCLUSIVE; 1717 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1718 loop: 1719 /* Grab snapshot of secondary write counts */ 1720 MNT_ILOCK(mp); 1721 secondary_writes = mp->mnt_secondary_writes; 1722 secondary_accwrites = mp->mnt_secondary_accwrites; 1723 MNT_IUNLOCK(mp); 1724 1725 /* Grab snapshot of softdep dependency counts */ 1726 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1727 1728 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1729 /* 1730 * Depend on the vnode interlock to keep things stable enough 1731 * for a quick test. Since there might be hundreds of 1732 * thousands of vnodes, we cannot afford even a subroutine 1733 * call unless there's a good chance that we have work to do. 1734 */ 1735 if (vp->v_type == VNON) { 1736 VI_UNLOCK(vp); 1737 continue; 1738 } 1739 ip = VTOI(vp); 1740 if ((ip->i_flag & 1741 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1742 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1743 VI_UNLOCK(vp); 1744 continue; 1745 } 1746 if ((error = vget(vp, lockreq)) != 0) { 1747 if (error == ENOENT || error == ENOLCK) { 1748 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1749 goto loop; 1750 } 1751 continue; 1752 } 1753 #ifdef QUOTA 1754 qsyncvp(vp); 1755 #endif 1756 for (;;) { 1757 error = ffs_syncvnode(vp, waitfor, 0); 1758 if (error == ERELOOKUP) 1759 continue; 1760 if (error != 0) 1761 allerror = error; 1762 break; 1763 } 1764 vput(vp); 1765 } 1766 /* 1767 * Force stale filesystem control information to be flushed. 1768 */ 1769 if (waitfor == MNT_WAIT || rebooting) { 1770 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1771 allerror = error; 1772 if (ffs_fsfail_cleanup(ump, allerror)) 1773 allerror = 0; 1774 /* Flushed work items may create new vnodes to clean */ 1775 if (allerror == 0 && count) 1776 goto loop; 1777 } 1778 1779 devvp = ump->um_devvp; 1780 bo = &devvp->v_bufobj; 1781 BO_LOCK(bo); 1782 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1783 BO_UNLOCK(bo); 1784 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1785 error = VOP_FSYNC(devvp, waitfor, td); 1786 VOP_UNLOCK(devvp); 1787 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1788 error = ffs_sbupdate(ump, waitfor, 0); 1789 if (error != 0) 1790 allerror = error; 1791 if (ffs_fsfail_cleanup(ump, allerror)) 1792 allerror = 0; 1793 if (allerror == 0 && waitfor == MNT_WAIT) 1794 goto loop; 1795 } else if (suspend != 0) { 1796 if (softdep_check_suspend(mp, 1797 devvp, 1798 softdep_deps, 1799 softdep_accdeps, 1800 secondary_writes, 1801 secondary_accwrites) != 0) { 1802 MNT_IUNLOCK(mp); 1803 goto loop; /* More work needed */ 1804 } 1805 mtx_assert(MNT_MTX(mp), MA_OWNED); 1806 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1807 MNT_IUNLOCK(mp); 1808 suspended = 1; 1809 } else 1810 BO_UNLOCK(bo); 1811 /* 1812 * Write back modified superblock. 1813 */ 1814 if (fs->fs_fmod != 0 && 1815 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1816 allerror = error; 1817 if (ffs_fsfail_cleanup(ump, allerror)) 1818 allerror = 0; 1819 return (allerror); 1820 } 1821 1822 int 1823 ffs_vget(mp, ino, flags, vpp) 1824 struct mount *mp; 1825 ino_t ino; 1826 int flags; 1827 struct vnode **vpp; 1828 { 1829 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1830 } 1831 1832 int 1833 ffs_vgetf(mp, ino, flags, vpp, ffs_flags) 1834 struct mount *mp; 1835 ino_t ino; 1836 int flags; 1837 struct vnode **vpp; 1838 int ffs_flags; 1839 { 1840 struct fs *fs; 1841 struct inode *ip; 1842 struct ufsmount *ump; 1843 struct buf *bp; 1844 struct vnode *vp; 1845 daddr_t dbn; 1846 int error; 1847 1848 MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 || 1849 (flags & LK_EXCLUSIVE) != 0); 1850 1851 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1852 if (error != 0) 1853 return (error); 1854 if (*vpp != NULL) { 1855 if ((ffs_flags & FFSV_REPLACE) == 0 || 1856 ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 || 1857 !VN_IS_DOOMED(*vpp))) 1858 return (0); 1859 vgone(*vpp); 1860 vput(*vpp); 1861 } 1862 1863 /* 1864 * We must promote to an exclusive lock for vnode creation. This 1865 * can happen if lookup is passed LOCKSHARED. 1866 */ 1867 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1868 flags &= ~LK_TYPE_MASK; 1869 flags |= LK_EXCLUSIVE; 1870 } 1871 1872 /* 1873 * We do not lock vnode creation as it is believed to be too 1874 * expensive for such rare case as simultaneous creation of vnode 1875 * for same ino by different processes. We just allow them to race 1876 * and check later to decide who wins. Let the race begin! 1877 */ 1878 1879 ump = VFSTOUFS(mp); 1880 fs = ump->um_fs; 1881 ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO); 1882 1883 /* Allocate a new vnode/inode. */ 1884 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 1885 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1886 if (error) { 1887 *vpp = NULL; 1888 uma_zfree_smr(uma_inode, ip); 1889 return (error); 1890 } 1891 /* 1892 * FFS supports recursive locking. 1893 */ 1894 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 1895 VN_LOCK_AREC(vp); 1896 vp->v_data = ip; 1897 vp->v_bufobj.bo_bsize = fs->fs_bsize; 1898 ip->i_vnode = vp; 1899 ip->i_ump = ump; 1900 ip->i_number = ino; 1901 ip->i_ea_refs = 0; 1902 ip->i_nextclustercg = -1; 1903 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; 1904 ip->i_mode = 0; /* ensure error cases below throw away vnode */ 1905 cluster_init_vn(&ip->i_clusterw); 1906 #ifdef DIAGNOSTIC 1907 ufs_init_trackers(ip); 1908 #endif 1909 #ifdef QUOTA 1910 { 1911 int i; 1912 for (i = 0; i < MAXQUOTAS; i++) 1913 ip->i_dquot[i] = NODQUOT; 1914 } 1915 #endif 1916 1917 if (ffs_flags & FFSV_FORCEINSMQ) 1918 vp->v_vflag |= VV_FORCEINSMQ; 1919 error = insmntque(vp, mp); 1920 if (error != 0) { 1921 uma_zfree_smr(uma_inode, ip); 1922 *vpp = NULL; 1923 return (error); 1924 } 1925 vp->v_vflag &= ~VV_FORCEINSMQ; 1926 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 1927 if (error != 0) 1928 return (error); 1929 if (*vpp != NULL) { 1930 /* 1931 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set) 1932 * operate on empty inode, which must not be found by 1933 * other threads until fully filled. Vnode for empty 1934 * inode must be not re-inserted on the hash by other 1935 * thread, after removal by us at the beginning. 1936 */ 1937 MPASS((ffs_flags & FFSV_REPLACE) == 0); 1938 return (0); 1939 } 1940 1941 /* Read in the disk contents for the inode, copy into the inode. */ 1942 dbn = fsbtodb(fs, ino_to_fsba(fs, ino)); 1943 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize, 1944 NULL, NULL, 0, NOCRED, 0, NULL, &bp); 1945 if (error != 0) { 1946 /* 1947 * The inode does not contain anything useful, so it would 1948 * be misleading to leave it on its hash chain. With mode 1949 * still zero, it will be unlinked and returned to the free 1950 * list by vput(). 1951 */ 1952 vgone(vp); 1953 vput(vp); 1954 *vpp = NULL; 1955 return (error); 1956 } 1957 if (I_IS_UFS1(ip)) 1958 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 1959 else 1960 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1961 if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) { 1962 bqrelse(bp); 1963 vgone(vp); 1964 vput(vp); 1965 *vpp = NULL; 1966 return (error); 1967 } 1968 if (DOINGSOFTDEP(vp) && (!fs->fs_ronly || 1969 (ffs_flags & FFSV_FORCEINODEDEP) != 0)) 1970 softdep_load_inodeblock(ip); 1971 else 1972 ip->i_effnlink = ip->i_nlink; 1973 bqrelse(bp); 1974 1975 /* 1976 * Initialize the vnode from the inode, check for aliases. 1977 * Note that the underlying vnode may have changed. 1978 */ 1979 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, 1980 &vp); 1981 if (error) { 1982 vgone(vp); 1983 vput(vp); 1984 *vpp = NULL; 1985 return (error); 1986 } 1987 1988 /* 1989 * Finish inode initialization. 1990 */ 1991 if (vp->v_type != VFIFO) { 1992 /* FFS supports shared locking for all files except fifos. */ 1993 VN_LOCK_ASHARE(vp); 1994 } 1995 1996 /* 1997 * Set up a generation number for this inode if it does not 1998 * already have one. This should only happen on old filesystems. 1999 */ 2000 if (ip->i_gen == 0) { 2001 while (ip->i_gen == 0) 2002 ip->i_gen = arc4random(); 2003 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 2004 UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 2005 DIP_SET(ip, i_gen, ip->i_gen); 2006 } 2007 } 2008 #ifdef MAC 2009 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 2010 /* 2011 * If this vnode is already allocated, and we're running 2012 * multi-label, attempt to perform a label association 2013 * from the extended attributes on the inode. 2014 */ 2015 error = mac_vnode_associate_extattr(mp, vp); 2016 if (error) { 2017 /* ufs_inactive will release ip->i_devvp ref. */ 2018 vgone(vp); 2019 vput(vp); 2020 *vpp = NULL; 2021 return (error); 2022 } 2023 } 2024 #endif 2025 2026 *vpp = vp; 2027 return (0); 2028 } 2029 2030 /* 2031 * File handle to vnode 2032 * 2033 * Have to be really careful about stale file handles: 2034 * - check that the inode number is valid 2035 * - for UFS2 check that the inode number is initialized 2036 * - call ffs_vget() to get the locked inode 2037 * - check for an unallocated inode (i_mode == 0) 2038 * - check that the given client host has export rights and return 2039 * those rights via. exflagsp and credanonp 2040 */ 2041 static int 2042 ffs_fhtovp(mp, fhp, flags, vpp) 2043 struct mount *mp; 2044 struct fid *fhp; 2045 int flags; 2046 struct vnode **vpp; 2047 { 2048 struct ufid *ufhp; 2049 2050 ufhp = (struct ufid *)fhp; 2051 return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags, 2052 vpp, 0)); 2053 } 2054 2055 int 2056 ffs_inotovp(mp, ino, gen, lflags, vpp, ffs_flags) 2057 struct mount *mp; 2058 ino_t ino; 2059 u_int64_t gen; 2060 int lflags; 2061 struct vnode **vpp; 2062 int ffs_flags; 2063 { 2064 struct ufsmount *ump; 2065 struct vnode *nvp; 2066 struct inode *ip; 2067 struct fs *fs; 2068 struct cg *cgp; 2069 struct buf *bp; 2070 u_int cg; 2071 int error; 2072 2073 ump = VFSTOUFS(mp); 2074 fs = ump->um_fs; 2075 *vpp = NULL; 2076 2077 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) 2078 return (ESTALE); 2079 2080 /* 2081 * Need to check if inode is initialized because UFS2 does lazy 2082 * initialization and nfs_fhtovp can offer arbitrary inode numbers. 2083 */ 2084 if (fs->fs_magic == FS_UFS2_MAGIC) { 2085 cg = ino_to_cg(fs, ino); 2086 error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp); 2087 if (error != 0) 2088 return (error); 2089 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { 2090 brelse(bp); 2091 return (ESTALE); 2092 } 2093 brelse(bp); 2094 } 2095 2096 error = ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags); 2097 if (error != 0) 2098 return (error); 2099 2100 ip = VTOI(nvp); 2101 if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) { 2102 if (ip->i_mode == 0) 2103 vgone(nvp); 2104 vput(nvp); 2105 return (ESTALE); 2106 } 2107 2108 vnode_create_vobject(nvp, DIP(ip, i_size), curthread); 2109 *vpp = nvp; 2110 return (0); 2111 } 2112 2113 /* 2114 * Initialize the filesystem. 2115 */ 2116 static int 2117 ffs_init(vfsp) 2118 struct vfsconf *vfsp; 2119 { 2120 2121 ffs_susp_initialize(); 2122 softdep_initialize(); 2123 return (ufs_init(vfsp)); 2124 } 2125 2126 /* 2127 * Undo the work of ffs_init(). 2128 */ 2129 static int 2130 ffs_uninit(vfsp) 2131 struct vfsconf *vfsp; 2132 { 2133 int ret; 2134 2135 ret = ufs_uninit(vfsp); 2136 softdep_uninitialize(); 2137 ffs_susp_uninitialize(); 2138 taskqueue_drain_all(taskqueue_thread); 2139 return (ret); 2140 } 2141 2142 /* 2143 * Structure used to pass information from ffs_sbupdate to its 2144 * helper routine ffs_use_bwrite. 2145 */ 2146 struct devfd { 2147 struct ufsmount *ump; 2148 struct buf *sbbp; 2149 int waitfor; 2150 int suspended; 2151 int error; 2152 }; 2153 2154 /* 2155 * Write a superblock and associated information back to disk. 2156 */ 2157 int 2158 ffs_sbupdate(ump, waitfor, suspended) 2159 struct ufsmount *ump; 2160 int waitfor; 2161 int suspended; 2162 { 2163 struct fs *fs; 2164 struct buf *sbbp; 2165 struct devfd devfd; 2166 2167 fs = ump->um_fs; 2168 if (fs->fs_ronly == 1 && 2169 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 2170 (MNT_RDONLY | MNT_UPDATE)) 2171 panic("ffs_sbupdate: write read-only filesystem"); 2172 /* 2173 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 2174 */ 2175 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 2176 (int)fs->fs_sbsize, 0, 0, 0); 2177 /* 2178 * Initialize info needed for write function. 2179 */ 2180 devfd.ump = ump; 2181 devfd.sbbp = sbbp; 2182 devfd.waitfor = waitfor; 2183 devfd.suspended = suspended; 2184 devfd.error = 0; 2185 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); 2186 } 2187 2188 /* 2189 * Write function for use by filesystem-layer routines. 2190 */ 2191 static int 2192 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) 2193 { 2194 struct devfd *devfdp; 2195 struct ufsmount *ump; 2196 struct buf *bp; 2197 struct fs *fs; 2198 int error; 2199 2200 devfdp = devfd; 2201 ump = devfdp->ump; 2202 fs = ump->um_fs; 2203 /* 2204 * Writing the superblock summary information. 2205 */ 2206 if (loc != fs->fs_sblockloc) { 2207 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); 2208 bcopy(buf, bp->b_data, (u_int)size); 2209 if (devfdp->suspended) 2210 bp->b_flags |= B_VALIDSUSPWRT; 2211 if (devfdp->waitfor != MNT_WAIT) 2212 bawrite(bp); 2213 else if ((error = bwrite(bp)) != 0) 2214 devfdp->error = error; 2215 return (0); 2216 } 2217 /* 2218 * Writing the superblock itself. We need to do special checks for it. 2219 */ 2220 bp = devfdp->sbbp; 2221 if (ffs_fsfail_cleanup(ump, devfdp->error)) 2222 devfdp->error = 0; 2223 if (devfdp->error != 0) { 2224 brelse(bp); 2225 return (devfdp->error); 2226 } 2227 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 2228 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2229 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2230 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 2231 fs->fs_sblockloc = SBLOCK_UFS1; 2232 } 2233 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 2234 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2235 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2236 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 2237 fs->fs_sblockloc = SBLOCK_UFS2; 2238 } 2239 if (MOUNTEDSOFTDEP(ump->um_mountp)) 2240 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 2241 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 2242 fs = (struct fs *)bp->b_data; 2243 ffs_oldfscompat_write(fs, ump); 2244 fs->fs_si = NULL; 2245 /* Recalculate the superblock hash */ 2246 fs->fs_ckhash = ffs_calc_sbhash(fs); 2247 if (devfdp->suspended) 2248 bp->b_flags |= B_VALIDSUSPWRT; 2249 if (devfdp->waitfor != MNT_WAIT) 2250 bawrite(bp); 2251 else if ((error = bwrite(bp)) != 0) 2252 devfdp->error = error; 2253 return (devfdp->error); 2254 } 2255 2256 static int 2257 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 2258 int attrnamespace, const char *attrname) 2259 { 2260 2261 #ifdef UFS_EXTATTR 2262 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 2263 attrname)); 2264 #else 2265 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 2266 attrname)); 2267 #endif 2268 } 2269 2270 static void 2271 ffs_ifree(struct ufsmount *ump, struct inode *ip) 2272 { 2273 2274 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 2275 uma_zfree(uma_ufs1, ip->i_din1); 2276 else if (ip->i_din2 != NULL) 2277 uma_zfree(uma_ufs2, ip->i_din2); 2278 uma_zfree_smr(uma_inode, ip); 2279 } 2280 2281 static int dobkgrdwrite = 1; 2282 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2283 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2284 2285 /* 2286 * Complete a background write started from bwrite. 2287 */ 2288 static void 2289 ffs_backgroundwritedone(struct buf *bp) 2290 { 2291 struct bufobj *bufobj; 2292 struct buf *origbp; 2293 2294 #ifdef SOFTUPDATES 2295 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0) 2296 softdep_handle_error(bp); 2297 #endif 2298 2299 /* 2300 * Find the original buffer that we are writing. 2301 */ 2302 bufobj = bp->b_bufobj; 2303 BO_LOCK(bufobj); 2304 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2305 panic("backgroundwritedone: lost buffer"); 2306 2307 /* 2308 * We should mark the cylinder group buffer origbp as 2309 * dirty, to not lose the failed write. 2310 */ 2311 if ((bp->b_ioflags & BIO_ERROR) != 0) 2312 origbp->b_vflags |= BV_BKGRDERR; 2313 BO_UNLOCK(bufobj); 2314 /* 2315 * Process dependencies then return any unfinished ones. 2316 */ 2317 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2318 buf_complete(bp); 2319 #ifdef SOFTUPDATES 2320 if (!LIST_EMPTY(&bp->b_dep)) 2321 softdep_move_dependencies(bp, origbp); 2322 #endif 2323 /* 2324 * This buffer is marked B_NOCACHE so when it is released 2325 * by biodone it will be tossed. Clear B_IOSTARTED in case of error. 2326 */ 2327 bp->b_flags |= B_NOCACHE; 2328 bp->b_flags &= ~(B_CACHE | B_IOSTARTED); 2329 pbrelvp(bp); 2330 2331 /* 2332 * Prevent brelse() from trying to keep and re-dirtying bp on 2333 * errors. It causes b_bufobj dereference in 2334 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2335 * pbrelvp() above. 2336 */ 2337 if ((bp->b_ioflags & BIO_ERROR) != 0) 2338 bp->b_flags |= B_INVAL; 2339 bufdone(bp); 2340 BO_LOCK(bufobj); 2341 /* 2342 * Clear the BV_BKGRDINPROG flag in the original buffer 2343 * and awaken it if it is waiting for the write to complete. 2344 * If BV_BKGRDINPROG is not set in the original buffer it must 2345 * have been released and re-instantiated - which is not legal. 2346 */ 2347 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2348 ("backgroundwritedone: lost buffer2")); 2349 origbp->b_vflags &= ~BV_BKGRDINPROG; 2350 if (origbp->b_vflags & BV_BKGRDWAIT) { 2351 origbp->b_vflags &= ~BV_BKGRDWAIT; 2352 wakeup(&origbp->b_xflags); 2353 } 2354 BO_UNLOCK(bufobj); 2355 } 2356 2357 /* 2358 * Write, release buffer on completion. (Done by iodone 2359 * if async). Do not bother writing anything if the buffer 2360 * is invalid. 2361 * 2362 * Note that we set B_CACHE here, indicating that buffer is 2363 * fully valid and thus cacheable. This is true even of NFS 2364 * now so we set it generally. This could be set either here 2365 * or in biodone() since the I/O is synchronous. We put it 2366 * here. 2367 */ 2368 static int 2369 ffs_bufwrite(struct buf *bp) 2370 { 2371 struct buf *newbp; 2372 struct cg *cgp; 2373 2374 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2375 if (bp->b_flags & B_INVAL) { 2376 brelse(bp); 2377 return (0); 2378 } 2379 2380 if (!BUF_ISLOCKED(bp)) 2381 panic("bufwrite: buffer is not busy???"); 2382 /* 2383 * If a background write is already in progress, delay 2384 * writing this block if it is asynchronous. Otherwise 2385 * wait for the background write to complete. 2386 */ 2387 BO_LOCK(bp->b_bufobj); 2388 if (bp->b_vflags & BV_BKGRDINPROG) { 2389 if (bp->b_flags & B_ASYNC) { 2390 BO_UNLOCK(bp->b_bufobj); 2391 bdwrite(bp); 2392 return (0); 2393 } 2394 bp->b_vflags |= BV_BKGRDWAIT; 2395 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2396 "bwrbg", 0); 2397 if (bp->b_vflags & BV_BKGRDINPROG) 2398 panic("bufwrite: still writing"); 2399 } 2400 bp->b_vflags &= ~BV_BKGRDERR; 2401 BO_UNLOCK(bp->b_bufobj); 2402 2403 /* 2404 * If this buffer is marked for background writing and we 2405 * do not have to wait for it, make a copy and write the 2406 * copy so as to leave this buffer ready for further use. 2407 * 2408 * This optimization eats a lot of memory. If we have a page 2409 * or buffer shortfall we can't do it. 2410 */ 2411 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2412 (bp->b_flags & B_ASYNC) && 2413 !vm_page_count_severe() && 2414 !buf_dirty_count_severe()) { 2415 KASSERT(bp->b_iodone == NULL, 2416 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2417 2418 /* get a new block */ 2419 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2420 if (newbp == NULL) 2421 goto normal_write; 2422 2423 KASSERT(buf_mapped(bp), ("Unmapped cg")); 2424 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2425 BO_LOCK(bp->b_bufobj); 2426 bp->b_vflags |= BV_BKGRDINPROG; 2427 BO_UNLOCK(bp->b_bufobj); 2428 newbp->b_xflags |= 2429 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; 2430 newbp->b_lblkno = bp->b_lblkno; 2431 newbp->b_blkno = bp->b_blkno; 2432 newbp->b_offset = bp->b_offset; 2433 newbp->b_iodone = ffs_backgroundwritedone; 2434 newbp->b_flags |= B_ASYNC; 2435 newbp->b_flags &= ~B_INVAL; 2436 pbgetvp(bp->b_vp, newbp); 2437 2438 #ifdef SOFTUPDATES 2439 /* 2440 * Move over the dependencies. If there are rollbacks, 2441 * leave the parent buffer dirtied as it will need to 2442 * be written again. 2443 */ 2444 if (LIST_EMPTY(&bp->b_dep) || 2445 softdep_move_dependencies(bp, newbp) == 0) 2446 bundirty(bp); 2447 #else 2448 bundirty(bp); 2449 #endif 2450 2451 /* 2452 * Initiate write on the copy, release the original. The 2453 * BKGRDINPROG flag prevents it from going away until 2454 * the background write completes. We have to recalculate 2455 * its check hash in case the buffer gets freed and then 2456 * reconstituted from the buffer cache during a later read. 2457 */ 2458 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2459 cgp = (struct cg *)bp->b_data; 2460 cgp->cg_ckhash = 0; 2461 cgp->cg_ckhash = 2462 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2463 } 2464 bqrelse(bp); 2465 bp = newbp; 2466 } else 2467 /* Mark the buffer clean */ 2468 bundirty(bp); 2469 2470 /* Let the normal bufwrite do the rest for us */ 2471 normal_write: 2472 /* 2473 * If we are writing a cylinder group, update its time. 2474 */ 2475 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2476 cgp = (struct cg *)bp->b_data; 2477 cgp->cg_old_time = cgp->cg_time = time_second; 2478 } 2479 return (bufwrite(bp)); 2480 } 2481 2482 static void 2483 ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2484 { 2485 struct vnode *vp; 2486 struct buf *tbp; 2487 int error, nocopy; 2488 2489 /* 2490 * This is the bufobj strategy for the private VCHR vnodes 2491 * used by FFS to access the underlying storage device. 2492 * We override the default bufobj strategy and thus bypass 2493 * VOP_STRATEGY() for these vnodes. 2494 */ 2495 vp = bo2vnode(bo); 2496 KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR || 2497 bp->b_vp->v_rdev == NULL || 2498 bp->b_vp->v_rdev->si_mountpt == NULL || 2499 VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL || 2500 vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp, 2501 ("ffs_geom_strategy() with wrong vp")); 2502 if (bp->b_iocmd == BIO_WRITE) { 2503 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2504 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2505 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2506 panic("ffs_geom_strategy: bad I/O"); 2507 nocopy = bp->b_flags & B_NOCOPY; 2508 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2509 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2510 vp->v_rdev->si_snapdata != NULL) { 2511 if ((bp->b_flags & B_CLUSTER) != 0) { 2512 runningbufwakeup(bp); 2513 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2514 b_cluster.cluster_entry) { 2515 error = ffs_copyonwrite(vp, tbp); 2516 if (error != 0 && 2517 error != EOPNOTSUPP) { 2518 bp->b_error = error; 2519 bp->b_ioflags |= BIO_ERROR; 2520 bp->b_flags &= ~B_BARRIER; 2521 bufdone(bp); 2522 return; 2523 } 2524 } 2525 bp->b_runningbufspace = bp->b_bufsize; 2526 atomic_add_long(&runningbufspace, 2527 bp->b_runningbufspace); 2528 } else { 2529 error = ffs_copyonwrite(vp, bp); 2530 if (error != 0 && error != EOPNOTSUPP) { 2531 bp->b_error = error; 2532 bp->b_ioflags |= BIO_ERROR; 2533 bp->b_flags &= ~B_BARRIER; 2534 bufdone(bp); 2535 return; 2536 } 2537 } 2538 } 2539 #ifdef SOFTUPDATES 2540 if ((bp->b_flags & B_CLUSTER) != 0) { 2541 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2542 b_cluster.cluster_entry) { 2543 if (!LIST_EMPTY(&tbp->b_dep)) 2544 buf_start(tbp); 2545 } 2546 } else { 2547 if (!LIST_EMPTY(&bp->b_dep)) 2548 buf_start(bp); 2549 } 2550 2551 #endif 2552 /* 2553 * Check for metadata that needs check-hashes and update them. 2554 */ 2555 switch (bp->b_xflags & BX_FSPRIV) { 2556 case BX_CYLGRP: 2557 ((struct cg *)bp->b_data)->cg_ckhash = 0; 2558 ((struct cg *)bp->b_data)->cg_ckhash = 2559 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2560 break; 2561 2562 case BX_SUPERBLOCK: 2563 case BX_INODE: 2564 case BX_INDIR: 2565 case BX_DIR: 2566 printf("Check-hash write is unimplemented!!!\n"); 2567 break; 2568 2569 case 0: 2570 break; 2571 2572 default: 2573 printf("multiple buffer types 0x%b\n", 2574 (u_int)(bp->b_xflags & BX_FSPRIV), 2575 PRINT_UFS_BUF_XFLAGS); 2576 break; 2577 } 2578 } 2579 if (bp->b_iocmd != BIO_READ && ffs_enxio_enable) 2580 bp->b_xflags |= BX_CVTENXIO; 2581 g_vfs_strategy(bo, bp); 2582 } 2583 2584 int 2585 ffs_own_mount(const struct mount *mp) 2586 { 2587 2588 if (mp->mnt_op == &ufs_vfsops) 2589 return (1); 2590 return (0); 2591 } 2592 2593 #ifdef DDB 2594 #ifdef SOFTUPDATES 2595 2596 /* defined in ffs_softdep.c */ 2597 extern void db_print_ffs(struct ufsmount *ump); 2598 2599 DB_SHOW_COMMAND(ffs, db_show_ffs) 2600 { 2601 struct mount *mp; 2602 struct ufsmount *ump; 2603 2604 if (have_addr) { 2605 ump = VFSTOUFS((struct mount *)addr); 2606 db_print_ffs(ump); 2607 return; 2608 } 2609 2610 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2611 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2612 db_print_ffs(VFSTOUFS(mp)); 2613 } 2614 } 2615 2616 #endif /* SOFTUPDATES */ 2617 #endif /* DDB */ 2618