1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1991, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_quota.h" 38 #include "opt_ufs.h" 39 #include "opt_ffs.h" 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/gsb_crc32.h> 44 #include <sys/systm.h> 45 #include <sys/namei.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/taskqueue.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/vnode.h> 52 #include <sys/mount.h> 53 #include <sys/bio.h> 54 #include <sys/buf.h> 55 #include <sys/conf.h> 56 #include <sys/fcntl.h> 57 #include <sys/ioccom.h> 58 #include <sys/malloc.h> 59 #include <sys/mutex.h> 60 #include <sys/rwlock.h> 61 #include <sys/sysctl.h> 62 #include <sys/vmmeter.h> 63 64 #include <security/mac/mac_framework.h> 65 66 #include <ufs/ufs/dir.h> 67 #include <ufs/ufs/extattr.h> 68 #include <ufs/ufs/gjournal.h> 69 #include <ufs/ufs/quota.h> 70 #include <ufs/ufs/ufsmount.h> 71 #include <ufs/ufs/inode.h> 72 #include <ufs/ufs/ufs_extern.h> 73 74 #include <ufs/ffs/fs.h> 75 #include <ufs/ffs/ffs_extern.h> 76 77 #include <vm/vm.h> 78 #include <vm/uma.h> 79 #include <vm/vm_page.h> 80 81 #include <geom/geom.h> 82 #include <geom/geom_vfs.h> 83 84 #include <ddb/ddb.h> 85 86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 87 VFS_SMR_DECLARE; 88 89 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 90 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 91 ufs2_daddr_t); 92 static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 93 static int ffs_sync_lazy(struct mount *mp); 94 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); 95 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); 96 97 static vfs_init_t ffs_init; 98 static vfs_uninit_t ffs_uninit; 99 static vfs_extattrctl_t ffs_extattrctl; 100 static vfs_cmount_t ffs_cmount; 101 static vfs_unmount_t ffs_unmount; 102 static vfs_mount_t ffs_mount; 103 static vfs_statfs_t ffs_statfs; 104 static vfs_fhtovp_t ffs_fhtovp; 105 static vfs_sync_t ffs_sync; 106 107 static struct vfsops ufs_vfsops = { 108 .vfs_extattrctl = ffs_extattrctl, 109 .vfs_fhtovp = ffs_fhtovp, 110 .vfs_init = ffs_init, 111 .vfs_mount = ffs_mount, 112 .vfs_cmount = ffs_cmount, 113 .vfs_quotactl = ufs_quotactl, 114 .vfs_root = vfs_cache_root, 115 .vfs_cachedroot = ufs_root, 116 .vfs_statfs = ffs_statfs, 117 .vfs_sync = ffs_sync, 118 .vfs_uninit = ffs_uninit, 119 .vfs_unmount = ffs_unmount, 120 .vfs_vget = ffs_vget, 121 .vfs_susp_clean = process_deferred_inactive, 122 }; 123 124 VFS_SET(ufs_vfsops, ufs, 0); 125 MODULE_VERSION(ufs, 1); 126 127 static b_strategy_t ffs_geom_strategy; 128 static b_write_t ffs_bufwrite; 129 130 static struct buf_ops ffs_ops = { 131 .bop_name = "FFS", 132 .bop_write = ffs_bufwrite, 133 .bop_strategy = ffs_geom_strategy, 134 .bop_sync = bufsync, 135 #ifdef NO_FFS_SNAPSHOT 136 .bop_bdflush = bufbdflush, 137 #else 138 .bop_bdflush = ffs_bdflush, 139 #endif 140 }; 141 142 /* 143 * Note that userquota and groupquota options are not currently used 144 * by UFS/FFS code and generally mount(8) does not pass those options 145 * from userland, but they can be passed by loader(8) via 146 * vfs.root.mountfrom.options. 147 */ 148 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 149 "noclusterw", "noexec", "export", "force", "from", "groupquota", 150 "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir", 151 "nosymfollow", "sync", "union", "userquota", "untrusted", NULL }; 152 153 static int ffs_enxio_enable = 1; 154 SYSCTL_DECL(_vfs_ffs); 155 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN, 156 &ffs_enxio_enable, 0, 157 "enable mapping of other disk I/O errors to ENXIO"); 158 159 /* 160 * Return buffer with the contents of block "offset" from the beginning of 161 * directory "ip". If "res" is non-zero, fill it in with a pointer to the 162 * remaining space in the directory. 163 */ 164 static int 165 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) 166 { 167 struct inode *ip; 168 struct fs *fs; 169 struct buf *bp; 170 ufs_lbn_t lbn; 171 int bsize, error; 172 173 ip = VTOI(vp); 174 fs = ITOFS(ip); 175 lbn = lblkno(fs, offset); 176 bsize = blksize(fs, ip, lbn); 177 178 *bpp = NULL; 179 error = bread(vp, lbn, bsize, NOCRED, &bp); 180 if (error) { 181 return (error); 182 } 183 if (res) 184 *res = (char *)bp->b_data + blkoff(fs, offset); 185 *bpp = bp; 186 return (0); 187 } 188 189 /* 190 * Load up the contents of an inode and copy the appropriate pieces 191 * to the incore copy. 192 */ 193 static int 194 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino) 195 { 196 struct ufs1_dinode *dip1; 197 struct ufs2_dinode *dip2; 198 int error; 199 200 if (I_IS_UFS1(ip)) { 201 dip1 = ip->i_din1; 202 *dip1 = 203 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 204 ip->i_mode = dip1->di_mode; 205 ip->i_nlink = dip1->di_nlink; 206 ip->i_effnlink = dip1->di_nlink; 207 ip->i_size = dip1->di_size; 208 ip->i_flags = dip1->di_flags; 209 ip->i_gen = dip1->di_gen; 210 ip->i_uid = dip1->di_uid; 211 ip->i_gid = dip1->di_gid; 212 return (0); 213 } 214 dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 215 if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 && 216 !ffs_fsfail_cleanup(ITOUMP(ip), error)) { 217 printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt, 218 (intmax_t)ino); 219 return (error); 220 } 221 *ip->i_din2 = *dip2; 222 dip2 = ip->i_din2; 223 ip->i_mode = dip2->di_mode; 224 ip->i_nlink = dip2->di_nlink; 225 ip->i_effnlink = dip2->di_nlink; 226 ip->i_size = dip2->di_size; 227 ip->i_flags = dip2->di_flags; 228 ip->i_gen = dip2->di_gen; 229 ip->i_uid = dip2->di_uid; 230 ip->i_gid = dip2->di_gid; 231 return (0); 232 } 233 234 /* 235 * Verify that a filesystem block number is a valid data block. 236 * This routine is only called on untrusted filesystems. 237 */ 238 static int 239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize) 240 { 241 struct fs *fs; 242 struct ufsmount *ump; 243 ufs2_daddr_t end_daddr; 244 int cg, havemtx; 245 246 KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0, 247 ("ffs_check_blkno called on a trusted file system")); 248 ump = VFSTOUFS(mp); 249 fs = ump->um_fs; 250 cg = dtog(fs, daddr); 251 end_daddr = daddr + numfrags(fs, blksize); 252 /* 253 * Verify that the block number is a valid data block. Also check 254 * that it does not point to an inode block or a superblock. Accept 255 * blocks that are unalloacted (0) or part of snapshot metadata 256 * (BLK_NOCOPY or BLK_SNAP). 257 * 258 * Thus, the block must be in a valid range for the filesystem and 259 * either in the space before a backup superblock (except the first 260 * cylinder group where that space is used by the bootstrap code) or 261 * after the inode blocks and before the end of the cylinder group. 262 */ 263 if ((uint64_t)daddr <= BLK_SNAP || 264 ((uint64_t)end_daddr <= fs->fs_size && 265 ((cg > 0 && end_daddr <= cgsblock(fs, cg)) || 266 (daddr >= cgdmin(fs, cg) && 267 end_daddr <= cgbase(fs, cg) + fs->fs_fpg)))) 268 return (0); 269 if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0) 270 UFS_LOCK(ump); 271 if (ppsratecheck(&ump->um_last_integritymsg, 272 &ump->um_secs_integritymsg, 1)) { 273 UFS_UNLOCK(ump); 274 uprintf("\n%s: inode %jd, out-of-range indirect block " 275 "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr); 276 if (havemtx) 277 UFS_LOCK(ump); 278 } else if (!havemtx) 279 UFS_UNLOCK(ump); 280 return (EINTEGRITY); 281 } 282 283 /* 284 * On first ENXIO error, initiate an asynchronous forcible unmount. 285 * Used to unmount filesystems whose underlying media has gone away. 286 * 287 * Return true if a cleanup is in progress. 288 */ 289 int 290 ffs_fsfail_cleanup(struct ufsmount *ump, int error) 291 { 292 int retval; 293 294 UFS_LOCK(ump); 295 retval = ffs_fsfail_cleanup_locked(ump, error); 296 UFS_UNLOCK(ump); 297 return (retval); 298 } 299 300 int 301 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error) 302 { 303 mtx_assert(UFS_MTX(ump), MA_OWNED); 304 if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) { 305 ump->um_flags |= UM_FSFAIL_CLEANUP; 306 /* 307 * Queue an async forced unmount. 308 */ 309 vfs_ref(ump->um_mountp); 310 dounmount(ump->um_mountp, 311 MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread); 312 printf("UFS: forcibly unmounting %s from %s\n", 313 ump->um_mountp->mnt_stat.f_mntfromname, 314 ump->um_mountp->mnt_stat.f_mntonname); 315 } 316 return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0); 317 } 318 319 /* 320 * Wrapper used during ENXIO cleanup to allocate empty buffers when 321 * the kernel is unable to read the real one. They are needed so that 322 * the soft updates code can use them to unwind its dependencies. 323 */ 324 int 325 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno, 326 daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt, 327 struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *), 328 struct buf **bpp) 329 { 330 int error; 331 332 flags |= GB_CVTENXIO; 333 error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt, 334 cred, flags, ckhashfunc, bpp); 335 if (error != 0 && ffs_fsfail_cleanup(ump, error)) { 336 error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp); 337 KASSERT(error == 0, ("getblkx failed")); 338 vfs_bio_bzero_buf(*bpp, 0, size); 339 } 340 return (error); 341 } 342 343 static int 344 ffs_mount(struct mount *mp) 345 { 346 struct vnode *devvp, *odevvp; 347 struct thread *td; 348 struct ufsmount *ump = NULL; 349 struct fs *fs; 350 int error, flags; 351 int error1 __diagused; 352 uint64_t mntorflags, saved_mnt_flag; 353 accmode_t accmode; 354 struct nameidata ndp; 355 char *fspec; 356 bool mounted_softdep; 357 358 td = curthread; 359 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 360 return (EINVAL); 361 if (uma_inode == NULL) { 362 uma_inode = uma_zcreate("FFS inode", 363 sizeof(struct inode), NULL, NULL, NULL, NULL, 364 UMA_ALIGN_PTR, 0); 365 uma_ufs1 = uma_zcreate("FFS1 dinode", 366 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 367 UMA_ALIGN_PTR, 0); 368 uma_ufs2 = uma_zcreate("FFS2 dinode", 369 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 370 UMA_ALIGN_PTR, 0); 371 VFS_SMR_ZONE_SET(uma_inode); 372 } 373 374 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 375 vfs_deleteopt(mp->mnt_optnew, "userquota"); 376 377 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 378 if (error) 379 return (error); 380 381 mntorflags = 0; 382 if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0) 383 mntorflags |= MNT_UNTRUSTED; 384 385 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 386 mntorflags |= MNT_ACLS; 387 388 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 389 mntorflags |= MNT_SNAPSHOT; 390 /* 391 * Once we have set the MNT_SNAPSHOT flag, do not 392 * persist "snapshot" in the options list. 393 */ 394 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 395 vfs_deleteopt(mp->mnt_opt, "snapshot"); 396 } 397 398 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 399 if (mntorflags & MNT_ACLS) { 400 vfs_mount_error(mp, 401 "\"acls\" and \"nfsv4acls\" options " 402 "are mutually exclusive"); 403 return (EINVAL); 404 } 405 mntorflags |= MNT_NFS4ACLS; 406 } 407 408 MNT_ILOCK(mp); 409 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP; 410 mp->mnt_flag |= mntorflags; 411 MNT_IUNLOCK(mp); 412 /* 413 * If updating, check whether changing from read-only to 414 * read/write; if there is no device name, that's all we do. 415 */ 416 if (mp->mnt_flag & MNT_UPDATE) { 417 ump = VFSTOUFS(mp); 418 fs = ump->um_fs; 419 odevvp = ump->um_odevvp; 420 devvp = ump->um_devvp; 421 if (fs->fs_ronly == 0 && 422 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 423 /* 424 * Flush any dirty data and suspend filesystem. 425 */ 426 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 427 return (error); 428 error = vfs_write_suspend_umnt(mp); 429 if (error != 0) 430 return (error); 431 432 fs->fs_ronly = 1; 433 if (MOUNTEDSOFTDEP(mp)) { 434 MNT_ILOCK(mp); 435 mp->mnt_flag &= ~MNT_SOFTDEP; 436 MNT_IUNLOCK(mp); 437 mounted_softdep = true; 438 } else 439 mounted_softdep = false; 440 441 /* 442 * Check for and optionally get rid of files open 443 * for writing. 444 */ 445 flags = WRITECLOSE; 446 if (mp->mnt_flag & MNT_FORCE) 447 flags |= FORCECLOSE; 448 if (mounted_softdep) { 449 error = softdep_flushfiles(mp, flags, td); 450 } else { 451 error = ffs_flushfiles(mp, flags, td); 452 } 453 if (error) { 454 fs->fs_ronly = 0; 455 if (mounted_softdep) { 456 MNT_ILOCK(mp); 457 mp->mnt_flag |= MNT_SOFTDEP; 458 MNT_IUNLOCK(mp); 459 } 460 vfs_write_resume(mp, 0); 461 return (error); 462 } 463 464 if (fs->fs_pendingblocks != 0 || 465 fs->fs_pendinginodes != 0) { 466 printf("WARNING: %s Update error: blocks %jd " 467 "files %d\n", fs->fs_fsmnt, 468 (intmax_t)fs->fs_pendingblocks, 469 fs->fs_pendinginodes); 470 fs->fs_pendingblocks = 0; 471 fs->fs_pendinginodes = 0; 472 } 473 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 474 fs->fs_clean = 1; 475 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 476 fs->fs_ronly = 0; 477 fs->fs_clean = 0; 478 if (mounted_softdep) { 479 MNT_ILOCK(mp); 480 mp->mnt_flag |= MNT_SOFTDEP; 481 MNT_IUNLOCK(mp); 482 } 483 vfs_write_resume(mp, 0); 484 return (error); 485 } 486 if (mounted_softdep) 487 softdep_unmount(mp); 488 g_topology_lock(); 489 /* 490 * Drop our write and exclusive access. 491 */ 492 g_access(ump->um_cp, 0, -1, -1); 493 g_topology_unlock(); 494 MNT_ILOCK(mp); 495 mp->mnt_flag |= MNT_RDONLY; 496 MNT_IUNLOCK(mp); 497 /* 498 * Allow the writers to note that filesystem 499 * is ro now. 500 */ 501 vfs_write_resume(mp, 0); 502 } 503 if ((mp->mnt_flag & MNT_RELOAD) && 504 (error = ffs_reload(mp, 0)) != 0) 505 return (error); 506 if (fs->fs_ronly && 507 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 508 /* 509 * If upgrade to read-write by non-root, then verify 510 * that user has necessary permissions on the device. 511 */ 512 vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY); 513 error = VOP_ACCESS(odevvp, VREAD | VWRITE, 514 td->td_ucred, td); 515 if (error) 516 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 517 VOP_UNLOCK(odevvp); 518 if (error) { 519 return (error); 520 } 521 fs->fs_flags &= ~FS_UNCLEAN; 522 if (fs->fs_clean == 0) { 523 fs->fs_flags |= FS_UNCLEAN; 524 if ((mp->mnt_flag & MNT_FORCE) || 525 ((fs->fs_flags & 526 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 527 (fs->fs_flags & FS_DOSOFTDEP))) { 528 printf("WARNING: %s was not properly " 529 "dismounted\n", fs->fs_fsmnt); 530 } else { 531 vfs_mount_error(mp, 532 "R/W mount of %s denied. %s.%s", 533 fs->fs_fsmnt, 534 "Filesystem is not clean - run fsck", 535 (fs->fs_flags & FS_SUJ) == 0 ? "" : 536 " Forced mount will invalidate" 537 " journal contents"); 538 return (EPERM); 539 } 540 } 541 g_topology_lock(); 542 /* 543 * Request exclusive write access. 544 */ 545 error = g_access(ump->um_cp, 0, 1, 1); 546 g_topology_unlock(); 547 if (error) 548 return (error); 549 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 550 return (error); 551 error = vfs_write_suspend_umnt(mp); 552 if (error != 0) 553 return (error); 554 fs->fs_ronly = 0; 555 MNT_ILOCK(mp); 556 saved_mnt_flag = MNT_RDONLY; 557 if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag & 558 MNT_ASYNC) != 0) 559 saved_mnt_flag |= MNT_ASYNC; 560 mp->mnt_flag &= ~saved_mnt_flag; 561 MNT_IUNLOCK(mp); 562 fs->fs_mtime = time_second; 563 /* check to see if we need to start softdep */ 564 if ((fs->fs_flags & FS_DOSOFTDEP) && 565 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 566 fs->fs_ronly = 1; 567 MNT_ILOCK(mp); 568 mp->mnt_flag |= saved_mnt_flag; 569 MNT_IUNLOCK(mp); 570 vfs_write_resume(mp, 0); 571 return (error); 572 } 573 fs->fs_clean = 0; 574 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 575 fs->fs_ronly = 1; 576 if ((fs->fs_flags & FS_DOSOFTDEP) != 0) 577 softdep_unmount(mp); 578 MNT_ILOCK(mp); 579 mp->mnt_flag |= saved_mnt_flag; 580 MNT_IUNLOCK(mp); 581 vfs_write_resume(mp, 0); 582 return (error); 583 } 584 if (fs->fs_snapinum[0] != 0) 585 ffs_snapshot_mount(mp); 586 vfs_write_resume(mp, 0); 587 } 588 /* 589 * Soft updates is incompatible with "async", 590 * so if we are doing softupdates stop the user 591 * from setting the async flag in an update. 592 * Softdep_mount() clears it in an initial mount 593 * or ro->rw remount. 594 */ 595 if (MOUNTEDSOFTDEP(mp)) { 596 /* XXX: Reset too late ? */ 597 MNT_ILOCK(mp); 598 mp->mnt_flag &= ~MNT_ASYNC; 599 MNT_IUNLOCK(mp); 600 } 601 /* 602 * Keep MNT_ACLS flag if it is stored in superblock. 603 */ 604 if ((fs->fs_flags & FS_ACLS) != 0) { 605 /* XXX: Set too late ? */ 606 MNT_ILOCK(mp); 607 mp->mnt_flag |= MNT_ACLS; 608 MNT_IUNLOCK(mp); 609 } 610 611 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 612 /* XXX: Set too late ? */ 613 MNT_ILOCK(mp); 614 mp->mnt_flag |= MNT_NFS4ACLS; 615 MNT_IUNLOCK(mp); 616 } 617 618 /* 619 * If this is a snapshot request, take the snapshot. 620 */ 621 if (mp->mnt_flag & MNT_SNAPSHOT) 622 return (ffs_snapshot(mp, fspec)); 623 624 /* 625 * Must not call namei() while owning busy ref. 626 */ 627 vfs_unbusy(mp); 628 } 629 630 /* 631 * Not an update, or updating the name: look up the name 632 * and verify that it refers to a sensible disk device. 633 */ 634 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec); 635 error = namei(&ndp); 636 if ((mp->mnt_flag & MNT_UPDATE) != 0) { 637 /* 638 * Unmount does not start if MNT_UPDATE is set. Mount 639 * update busies mp before setting MNT_UPDATE. We 640 * must be able to retain our busy ref succesfully, 641 * without sleep. 642 */ 643 error1 = vfs_busy(mp, MBF_NOWAIT); 644 MPASS(error1 == 0); 645 } 646 if (error != 0) 647 return (error); 648 NDFREE(&ndp, NDF_ONLY_PNBUF); 649 devvp = ndp.ni_vp; 650 if (!vn_isdisk_error(devvp, &error)) { 651 vput(devvp); 652 return (error); 653 } 654 655 /* 656 * If mount by non-root, then verify that user has necessary 657 * permissions on the device. 658 */ 659 accmode = VREAD; 660 if ((mp->mnt_flag & MNT_RDONLY) == 0) 661 accmode |= VWRITE; 662 error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); 663 if (error) 664 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 665 if (error) { 666 vput(devvp); 667 return (error); 668 } 669 670 if (mp->mnt_flag & MNT_UPDATE) { 671 /* 672 * Update only 673 * 674 * If it's not the same vnode, or at least the same device 675 * then it's not correct. 676 */ 677 678 if (devvp->v_rdev != ump->um_devvp->v_rdev) 679 error = EINVAL; /* needs translation */ 680 vput(devvp); 681 if (error) 682 return (error); 683 } else { 684 /* 685 * New mount 686 * 687 * We need the name for the mount point (also used for 688 * "last mounted on") copied in. If an error occurs, 689 * the mount point is discarded by the upper level code. 690 * Note that vfs_mount_alloc() populates f_mntonname for us. 691 */ 692 if ((error = ffs_mountfs(devvp, mp, td)) != 0) { 693 vrele(devvp); 694 return (error); 695 } 696 } 697 698 MNT_ILOCK(mp); 699 /* 700 * This is racy versus lookup, see ufs_fplookup_vexec for details. 701 */ 702 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0) 703 panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp); 704 if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0) 705 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 706 MNT_IUNLOCK(mp); 707 708 vfs_mountedfrom(mp, fspec); 709 return (0); 710 } 711 712 /* 713 * Compatibility with old mount system call. 714 */ 715 716 static int 717 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 718 { 719 struct ufs_args args; 720 int error; 721 722 if (data == NULL) 723 return (EINVAL); 724 error = copyin(data, &args, sizeof args); 725 if (error) 726 return (error); 727 728 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 729 ma = mount_arg(ma, "export", &args.export, sizeof(args.export)); 730 error = kernel_mount(ma, flags); 731 732 return (error); 733 } 734 735 /* 736 * Reload all incore data for a filesystem (used after running fsck on 737 * the root filesystem and finding things to fix). If the 'force' flag 738 * is 0, the filesystem must be mounted read-only. 739 * 740 * Things to do to update the mount: 741 * 1) invalidate all cached meta-data. 742 * 2) re-read superblock from disk. 743 * 3) re-read summary information from disk. 744 * 4) invalidate all inactive vnodes. 745 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary 746 * writers, if requested. 747 * 6) invalidate all cached file data. 748 * 7) re-read inode data for all active vnodes. 749 */ 750 int 751 ffs_reload(struct mount *mp, int flags) 752 { 753 struct vnode *vp, *mvp, *devvp; 754 struct inode *ip; 755 void *space; 756 struct buf *bp; 757 struct fs *fs, *newfs; 758 struct ufsmount *ump; 759 ufs2_daddr_t sblockloc; 760 int i, blks, error; 761 u_long size; 762 int32_t *lp; 763 764 ump = VFSTOUFS(mp); 765 766 MNT_ILOCK(mp); 767 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 768 MNT_IUNLOCK(mp); 769 return (EINVAL); 770 } 771 MNT_IUNLOCK(mp); 772 773 /* 774 * Step 1: invalidate all cached meta-data. 775 */ 776 devvp = VFSTOUFS(mp)->um_devvp; 777 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 778 if (vinvalbuf(devvp, 0, 0, 0) != 0) 779 panic("ffs_reload: dirty1"); 780 VOP_UNLOCK(devvp); 781 782 /* 783 * Step 2: re-read superblock from disk. 784 */ 785 fs = VFSTOUFS(mp)->um_fs; 786 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 787 NOCRED, &bp)) != 0) 788 return (error); 789 newfs = (struct fs *)bp->b_data; 790 if ((newfs->fs_magic != FS_UFS1_MAGIC && 791 newfs->fs_magic != FS_UFS2_MAGIC) || 792 newfs->fs_bsize > MAXBSIZE || 793 newfs->fs_bsize < sizeof(struct fs)) { 794 brelse(bp); 795 return (EIO); /* XXX needs translation */ 796 } 797 /* 798 * Preserve the summary information, read-only status, and 799 * superblock location by copying these fields into our new 800 * superblock before using it to update the existing superblock. 801 */ 802 newfs->fs_si = fs->fs_si; 803 newfs->fs_ronly = fs->fs_ronly; 804 sblockloc = fs->fs_sblockloc; 805 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 806 brelse(bp); 807 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 808 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 809 UFS_LOCK(ump); 810 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 811 printf("WARNING: %s: reload pending error: blocks %jd " 812 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 813 fs->fs_pendinginodes); 814 fs->fs_pendingblocks = 0; 815 fs->fs_pendinginodes = 0; 816 } 817 UFS_UNLOCK(ump); 818 819 /* 820 * Step 3: re-read summary information from disk. 821 */ 822 size = fs->fs_cssize; 823 blks = howmany(size, fs->fs_fsize); 824 if (fs->fs_contigsumsize > 0) 825 size += fs->fs_ncg * sizeof(int32_t); 826 size += fs->fs_ncg * sizeof(u_int8_t); 827 free(fs->fs_csp, M_UFSMNT); 828 space = malloc(size, M_UFSMNT, M_WAITOK); 829 fs->fs_csp = space; 830 for (i = 0; i < blks; i += fs->fs_frag) { 831 size = fs->fs_bsize; 832 if (i + fs->fs_frag > blks) 833 size = (blks - i) * fs->fs_fsize; 834 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 835 NOCRED, &bp); 836 if (error) 837 return (error); 838 bcopy(bp->b_data, space, (u_int)size); 839 space = (char *)space + size; 840 brelse(bp); 841 } 842 /* 843 * We no longer know anything about clusters per cylinder group. 844 */ 845 if (fs->fs_contigsumsize > 0) { 846 fs->fs_maxcluster = lp = space; 847 for (i = 0; i < fs->fs_ncg; i++) 848 *lp++ = fs->fs_contigsumsize; 849 space = lp; 850 } 851 size = fs->fs_ncg * sizeof(u_int8_t); 852 fs->fs_contigdirs = (u_int8_t *)space; 853 bzero(fs->fs_contigdirs, size); 854 if ((flags & FFSR_UNSUSPEND) != 0) { 855 MNT_ILOCK(mp); 856 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 857 wakeup(&mp->mnt_flag); 858 MNT_IUNLOCK(mp); 859 } 860 861 loop: 862 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 863 /* 864 * Skip syncer vnode. 865 */ 866 if (vp->v_type == VNON) { 867 VI_UNLOCK(vp); 868 continue; 869 } 870 /* 871 * Step 4: invalidate all cached file data. 872 */ 873 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 874 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 875 goto loop; 876 } 877 if (vinvalbuf(vp, 0, 0, 0)) 878 panic("ffs_reload: dirty2"); 879 /* 880 * Step 5: re-read inode data for all active vnodes. 881 */ 882 ip = VTOI(vp); 883 error = 884 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 885 (int)fs->fs_bsize, NOCRED, &bp); 886 if (error) { 887 vput(vp); 888 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 889 return (error); 890 } 891 if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) { 892 brelse(bp); 893 vput(vp); 894 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 895 return (error); 896 } 897 ip->i_effnlink = ip->i_nlink; 898 brelse(bp); 899 vput(vp); 900 } 901 return (0); 902 } 903 904 /* 905 * Common code for mount and mountroot 906 */ 907 static int 908 ffs_mountfs(odevvp, mp, td) 909 struct vnode *odevvp; 910 struct mount *mp; 911 struct thread *td; 912 { 913 struct ufsmount *ump; 914 struct fs *fs; 915 struct cdev *dev; 916 int error, i, len, ronly; 917 struct ucred *cred; 918 struct g_consumer *cp; 919 struct mount *nmp; 920 struct vnode *devvp; 921 int candelete, canspeedup; 922 off_t loc; 923 924 fs = NULL; 925 ump = NULL; 926 cred = td ? td->td_ucred : NOCRED; 927 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 928 929 devvp = mntfs_allocvp(mp, odevvp); 930 VOP_UNLOCK(odevvp); 931 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 932 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 933 dev = devvp->v_rdev; 934 KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data")); 935 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 936 (uintptr_t)mp) == 0) { 937 mntfs_freevp(devvp); 938 return (EBUSY); 939 } 940 g_topology_lock(); 941 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 942 g_topology_unlock(); 943 if (error != 0) { 944 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 945 mntfs_freevp(devvp); 946 return (error); 947 } 948 dev_ref(dev); 949 devvp->v_bufobj.bo_ops = &ffs_ops; 950 BO_LOCK(&odevvp->v_bufobj); 951 odevvp->v_bufobj.bo_flag |= BO_NOBUFS; 952 BO_UNLOCK(&odevvp->v_bufobj); 953 VOP_UNLOCK(devvp); 954 if (dev->si_iosize_max != 0) 955 mp->mnt_iosize_max = dev->si_iosize_max; 956 if (mp->mnt_iosize_max > maxphys) 957 mp->mnt_iosize_max = maxphys; 958 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 959 error = EINVAL; 960 vfs_mount_error(mp, 961 "Invalid sectorsize %d for superblock size %d", 962 cp->provider->sectorsize, SBLOCKSIZE); 963 goto out; 964 } 965 /* fetch the superblock and summary information */ 966 loc = STDSB; 967 if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0) 968 loc = STDSB_NOHASHFAIL; 969 if ((error = ffs_sbget(devvp, &fs, loc, M_UFSMNT, ffs_use_bread)) != 0) 970 goto out; 971 fs->fs_flags &= ~FS_UNCLEAN; 972 if (fs->fs_clean == 0) { 973 fs->fs_flags |= FS_UNCLEAN; 974 if (ronly || (mp->mnt_flag & MNT_FORCE) || 975 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 976 (fs->fs_flags & FS_DOSOFTDEP))) { 977 printf("WARNING: %s was not properly dismounted\n", 978 fs->fs_fsmnt); 979 } else { 980 vfs_mount_error(mp, "R/W mount of %s denied. %s%s", 981 fs->fs_fsmnt, "Filesystem is not clean - run fsck.", 982 (fs->fs_flags & FS_SUJ) == 0 ? "" : 983 " Forced mount will invalidate journal contents"); 984 error = EPERM; 985 goto out; 986 } 987 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 988 (mp->mnt_flag & MNT_FORCE)) { 989 printf("WARNING: %s: lost blocks %jd files %d\n", 990 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 991 fs->fs_pendinginodes); 992 fs->fs_pendingblocks = 0; 993 fs->fs_pendinginodes = 0; 994 } 995 } 996 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 997 printf("WARNING: %s: mount pending error: blocks %jd " 998 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 999 fs->fs_pendinginodes); 1000 fs->fs_pendingblocks = 0; 1001 fs->fs_pendinginodes = 0; 1002 } 1003 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 1004 #ifdef UFS_GJOURNAL 1005 /* 1006 * Get journal provider name. 1007 */ 1008 len = 1024; 1009 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); 1010 if (g_io_getattr("GJOURNAL::provider", cp, &len, 1011 mp->mnt_gjprovider) == 0) { 1012 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 1013 M_UFSMNT, M_WAITOK); 1014 MNT_ILOCK(mp); 1015 mp->mnt_flag |= MNT_GJOURNAL; 1016 MNT_IUNLOCK(mp); 1017 } else { 1018 printf("WARNING: %s: GJOURNAL flag on fs " 1019 "but no gjournal provider below\n", 1020 mp->mnt_stat.f_mntonname); 1021 free(mp->mnt_gjprovider, M_UFSMNT); 1022 mp->mnt_gjprovider = NULL; 1023 } 1024 #else 1025 printf("WARNING: %s: GJOURNAL flag on fs but no " 1026 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 1027 #endif 1028 } else { 1029 mp->mnt_gjprovider = NULL; 1030 } 1031 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 1032 ump->um_cp = cp; 1033 ump->um_bo = &devvp->v_bufobj; 1034 ump->um_fs = fs; 1035 if (fs->fs_magic == FS_UFS1_MAGIC) { 1036 ump->um_fstype = UFS1; 1037 ump->um_balloc = ffs_balloc_ufs1; 1038 } else { 1039 ump->um_fstype = UFS2; 1040 ump->um_balloc = ffs_balloc_ufs2; 1041 } 1042 ump->um_blkatoff = ffs_blkatoff; 1043 ump->um_truncate = ffs_truncate; 1044 ump->um_update = ffs_update; 1045 ump->um_valloc = ffs_valloc; 1046 ump->um_vfree = ffs_vfree; 1047 ump->um_ifree = ffs_ifree; 1048 ump->um_rdonly = ffs_rdonly; 1049 ump->um_snapgone = ffs_snapgone; 1050 if ((mp->mnt_flag & MNT_UNTRUSTED) != 0) 1051 ump->um_check_blkno = ffs_check_blkno; 1052 else 1053 ump->um_check_blkno = NULL; 1054 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 1055 sx_init(&ump->um_checkpath_lock, "uchpth"); 1056 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc); 1057 fs->fs_ronly = ronly; 1058 fs->fs_active = NULL; 1059 mp->mnt_data = ump; 1060 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 1061 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 1062 nmp = NULL; 1063 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 1064 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 1065 if (nmp) 1066 vfs_rel(nmp); 1067 vfs_getnewfsid(mp); 1068 } 1069 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 1070 MNT_ILOCK(mp); 1071 mp->mnt_flag |= MNT_LOCAL; 1072 MNT_IUNLOCK(mp); 1073 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 1074 #ifdef MAC 1075 MNT_ILOCK(mp); 1076 mp->mnt_flag |= MNT_MULTILABEL; 1077 MNT_IUNLOCK(mp); 1078 #else 1079 printf("WARNING: %s: multilabel flag on fs but " 1080 "no MAC support\n", mp->mnt_stat.f_mntonname); 1081 #endif 1082 } 1083 if ((fs->fs_flags & FS_ACLS) != 0) { 1084 #ifdef UFS_ACL 1085 MNT_ILOCK(mp); 1086 1087 if (mp->mnt_flag & MNT_NFS4ACLS) 1088 printf("WARNING: %s: ACLs flag on fs conflicts with " 1089 "\"nfsv4acls\" mount option; option ignored\n", 1090 mp->mnt_stat.f_mntonname); 1091 mp->mnt_flag &= ~MNT_NFS4ACLS; 1092 mp->mnt_flag |= MNT_ACLS; 1093 1094 MNT_IUNLOCK(mp); 1095 #else 1096 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 1097 mp->mnt_stat.f_mntonname); 1098 #endif 1099 } 1100 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 1101 #ifdef UFS_ACL 1102 MNT_ILOCK(mp); 1103 1104 if (mp->mnt_flag & MNT_ACLS) 1105 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 1106 "with \"acls\" mount option; option ignored\n", 1107 mp->mnt_stat.f_mntonname); 1108 mp->mnt_flag &= ~MNT_ACLS; 1109 mp->mnt_flag |= MNT_NFS4ACLS; 1110 1111 MNT_IUNLOCK(mp); 1112 #else 1113 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 1114 "ACLs support\n", mp->mnt_stat.f_mntonname); 1115 #endif 1116 } 1117 if ((fs->fs_flags & FS_TRIM) != 0) { 1118 len = sizeof(int); 1119 if (g_io_getattr("GEOM::candelete", cp, &len, 1120 &candelete) == 0) { 1121 if (candelete) 1122 ump->um_flags |= UM_CANDELETE; 1123 else 1124 printf("WARNING: %s: TRIM flag on fs but disk " 1125 "does not support TRIM\n", 1126 mp->mnt_stat.f_mntonname); 1127 } else { 1128 printf("WARNING: %s: TRIM flag on fs but disk does " 1129 "not confirm that it supports TRIM\n", 1130 mp->mnt_stat.f_mntonname); 1131 } 1132 if (((ump->um_flags) & UM_CANDELETE) != 0) { 1133 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 1134 taskqueue_thread_enqueue, &ump->um_trim_tq); 1135 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 1136 "%s trim", mp->mnt_stat.f_mntonname); 1137 ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM, 1138 &ump->um_trimlisthashsize); 1139 } 1140 } 1141 1142 len = sizeof(int); 1143 if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) { 1144 if (canspeedup) 1145 ump->um_flags |= UM_CANSPEEDUP; 1146 } 1147 1148 ump->um_mountp = mp; 1149 ump->um_dev = dev; 1150 ump->um_devvp = devvp; 1151 ump->um_odevvp = odevvp; 1152 ump->um_nindir = fs->fs_nindir; 1153 ump->um_bptrtodb = fs->fs_fsbtodb; 1154 ump->um_seqinc = fs->fs_frag; 1155 for (i = 0; i < MAXQUOTAS; i++) 1156 ump->um_quotas[i] = NULLVP; 1157 #ifdef UFS_EXTATTR 1158 ufs_extattr_uepm_init(&ump->um_extattr); 1159 #endif 1160 /* 1161 * Set FS local "last mounted on" information (NULL pad) 1162 */ 1163 bzero(fs->fs_fsmnt, MAXMNTLEN); 1164 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1165 mp->mnt_stat.f_iosize = fs->fs_bsize; 1166 1167 if (mp->mnt_flag & MNT_ROOTFS) { 1168 /* 1169 * Root mount; update timestamp in mount structure. 1170 * this will be used by the common root mount code 1171 * to update the system clock. 1172 */ 1173 mp->mnt_time = fs->fs_time; 1174 } 1175 1176 if (ronly == 0) { 1177 fs->fs_mtime = time_second; 1178 if ((fs->fs_flags & FS_DOSOFTDEP) && 1179 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1180 ffs_flushfiles(mp, FORCECLOSE, td); 1181 goto out; 1182 } 1183 if (fs->fs_snapinum[0] != 0) 1184 ffs_snapshot_mount(mp); 1185 fs->fs_fmod = 1; 1186 fs->fs_clean = 0; 1187 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1188 } 1189 /* 1190 * Initialize filesystem state information in mount struct. 1191 */ 1192 MNT_ILOCK(mp); 1193 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1194 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1195 MNT_IUNLOCK(mp); 1196 #ifdef UFS_EXTATTR 1197 #ifdef UFS_EXTATTR_AUTOSTART 1198 /* 1199 * 1200 * Auto-starting does the following: 1201 * - check for /.attribute in the fs, and extattr_start if so 1202 * - for each file in .attribute, enable that file with 1203 * an attribute of the same name. 1204 * Not clear how to report errors -- probably eat them. 1205 * This would all happen while the filesystem was busy/not 1206 * available, so would effectively be "atomic". 1207 */ 1208 (void) ufs_extattr_autostart(mp, td); 1209 #endif /* !UFS_EXTATTR_AUTOSTART */ 1210 #endif /* !UFS_EXTATTR */ 1211 return (0); 1212 out: 1213 if (fs != NULL) { 1214 free(fs->fs_csp, M_UFSMNT); 1215 free(fs->fs_si, M_UFSMNT); 1216 free(fs, M_UFSMNT); 1217 } 1218 if (cp != NULL) { 1219 g_topology_lock(); 1220 g_vfs_close(cp); 1221 g_topology_unlock(); 1222 } 1223 if (ump != NULL) { 1224 mtx_destroy(UFS_MTX(ump)); 1225 sx_destroy(&ump->um_checkpath_lock); 1226 if (mp->mnt_gjprovider != NULL) { 1227 free(mp->mnt_gjprovider, M_UFSMNT); 1228 mp->mnt_gjprovider = NULL; 1229 } 1230 MPASS(ump->um_softdep == NULL); 1231 free(ump, M_UFSMNT); 1232 mp->mnt_data = NULL; 1233 } 1234 BO_LOCK(&odevvp->v_bufobj); 1235 odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1236 BO_UNLOCK(&odevvp->v_bufobj); 1237 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1238 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1239 mntfs_freevp(devvp); 1240 dev_rel(dev); 1241 return (error); 1242 } 1243 1244 /* 1245 * A read function for use by filesystem-layer routines. 1246 */ 1247 static int 1248 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) 1249 { 1250 struct buf *bp; 1251 int error; 1252 1253 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); 1254 *bufp = malloc(size, M_UFSMNT, M_WAITOK); 1255 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, 1256 &bp)) != 0) 1257 return (error); 1258 bcopy(bp->b_data, *bufp, size); 1259 bp->b_flags |= B_INVAL | B_NOCACHE; 1260 brelse(bp); 1261 return (0); 1262 } 1263 1264 static int bigcgs = 0; 1265 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1266 1267 /* 1268 * Sanity checks for loading old filesystem superblocks. 1269 * See ffs_oldfscompat_write below for unwound actions. 1270 * 1271 * XXX - Parts get retired eventually. 1272 * Unfortunately new bits get added. 1273 */ 1274 static void 1275 ffs_oldfscompat_read(fs, ump, sblockloc) 1276 struct fs *fs; 1277 struct ufsmount *ump; 1278 ufs2_daddr_t sblockloc; 1279 { 1280 off_t maxfilesize; 1281 1282 /* 1283 * If not yet done, update fs_flags location and value of fs_sblockloc. 1284 */ 1285 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1286 fs->fs_flags = fs->fs_old_flags; 1287 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1288 fs->fs_sblockloc = sblockloc; 1289 } 1290 /* 1291 * If not yet done, update UFS1 superblock with new wider fields. 1292 */ 1293 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1294 fs->fs_maxbsize = fs->fs_bsize; 1295 fs->fs_time = fs->fs_old_time; 1296 fs->fs_size = fs->fs_old_size; 1297 fs->fs_dsize = fs->fs_old_dsize; 1298 fs->fs_csaddr = fs->fs_old_csaddr; 1299 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1300 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1301 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1302 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1303 } 1304 if (fs->fs_magic == FS_UFS1_MAGIC && 1305 fs->fs_old_inodefmt < FS_44INODEFMT) { 1306 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1307 fs->fs_qbmask = ~fs->fs_bmask; 1308 fs->fs_qfmask = ~fs->fs_fmask; 1309 } 1310 if (fs->fs_magic == FS_UFS1_MAGIC) { 1311 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1312 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1313 if (fs->fs_maxfilesize > maxfilesize) 1314 fs->fs_maxfilesize = maxfilesize; 1315 } 1316 /* Compatibility for old filesystems */ 1317 if (fs->fs_avgfilesize <= 0) 1318 fs->fs_avgfilesize = AVFILESIZ; 1319 if (fs->fs_avgfpdir <= 0) 1320 fs->fs_avgfpdir = AFPDIR; 1321 if (bigcgs) { 1322 fs->fs_save_cgsize = fs->fs_cgsize; 1323 fs->fs_cgsize = fs->fs_bsize; 1324 } 1325 } 1326 1327 /* 1328 * Unwinding superblock updates for old filesystems. 1329 * See ffs_oldfscompat_read above for details. 1330 * 1331 * XXX - Parts get retired eventually. 1332 * Unfortunately new bits get added. 1333 */ 1334 void 1335 ffs_oldfscompat_write(fs, ump) 1336 struct fs *fs; 1337 struct ufsmount *ump; 1338 { 1339 1340 /* 1341 * Copy back UFS2 updated fields that UFS1 inspects. 1342 */ 1343 if (fs->fs_magic == FS_UFS1_MAGIC) { 1344 fs->fs_old_time = fs->fs_time; 1345 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1346 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1347 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1348 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1349 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1350 } 1351 if (bigcgs) { 1352 fs->fs_cgsize = fs->fs_save_cgsize; 1353 fs->fs_save_cgsize = 0; 1354 } 1355 } 1356 1357 /* 1358 * unmount system call 1359 */ 1360 static int 1361 ffs_unmount(mp, mntflags) 1362 struct mount *mp; 1363 int mntflags; 1364 { 1365 struct thread *td; 1366 struct ufsmount *ump = VFSTOUFS(mp); 1367 struct fs *fs; 1368 int error, flags, susp; 1369 #ifdef UFS_EXTATTR 1370 int e_restart; 1371 #endif 1372 1373 flags = 0; 1374 td = curthread; 1375 fs = ump->um_fs; 1376 if (mntflags & MNT_FORCE) 1377 flags |= FORCECLOSE; 1378 susp = fs->fs_ronly == 0; 1379 #ifdef UFS_EXTATTR 1380 if ((error = ufs_extattr_stop(mp, td))) { 1381 if (error != EOPNOTSUPP) 1382 printf("WARNING: unmount %s: ufs_extattr_stop " 1383 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1384 error); 1385 e_restart = 0; 1386 } else { 1387 ufs_extattr_uepm_destroy(&ump->um_extattr); 1388 e_restart = 1; 1389 } 1390 #endif 1391 if (susp) { 1392 error = vfs_write_suspend_umnt(mp); 1393 if (error != 0) 1394 goto fail1; 1395 } 1396 if (MOUNTEDSOFTDEP(mp)) 1397 error = softdep_flushfiles(mp, flags, td); 1398 else 1399 error = ffs_flushfiles(mp, flags, td); 1400 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) 1401 goto fail; 1402 1403 UFS_LOCK(ump); 1404 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1405 printf("WARNING: unmount %s: pending error: blocks %jd " 1406 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1407 fs->fs_pendinginodes); 1408 fs->fs_pendingblocks = 0; 1409 fs->fs_pendinginodes = 0; 1410 } 1411 UFS_UNLOCK(ump); 1412 if (MOUNTEDSOFTDEP(mp)) 1413 softdep_unmount(mp); 1414 MPASS(ump->um_softdep == NULL); 1415 if (fs->fs_ronly == 0) { 1416 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1417 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1418 if (ffs_fsfail_cleanup(ump, error)) 1419 error = 0; 1420 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) { 1421 fs->fs_clean = 0; 1422 goto fail; 1423 } 1424 } 1425 if (susp) 1426 vfs_write_resume(mp, VR_START_WRITE); 1427 if (ump->um_trim_tq != NULL) { 1428 while (ump->um_trim_inflight != 0) 1429 pause("ufsutr", hz); 1430 taskqueue_drain_all(ump->um_trim_tq); 1431 taskqueue_free(ump->um_trim_tq); 1432 free (ump->um_trimhash, M_TRIM); 1433 } 1434 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1435 g_topology_lock(); 1436 g_vfs_close(ump->um_cp); 1437 g_topology_unlock(); 1438 BO_LOCK(&ump->um_odevvp->v_bufobj); 1439 ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1440 BO_UNLOCK(&ump->um_odevvp->v_bufobj); 1441 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1442 mntfs_freevp(ump->um_devvp); 1443 vrele(ump->um_odevvp); 1444 dev_rel(ump->um_dev); 1445 mtx_destroy(UFS_MTX(ump)); 1446 sx_destroy(&ump->um_checkpath_lock); 1447 if (mp->mnt_gjprovider != NULL) { 1448 free(mp->mnt_gjprovider, M_UFSMNT); 1449 mp->mnt_gjprovider = NULL; 1450 } 1451 free(fs->fs_csp, M_UFSMNT); 1452 free(fs->fs_si, M_UFSMNT); 1453 free(fs, M_UFSMNT); 1454 free(ump, M_UFSMNT); 1455 mp->mnt_data = NULL; 1456 MNT_ILOCK(mp); 1457 mp->mnt_flag &= ~MNT_LOCAL; 1458 MNT_IUNLOCK(mp); 1459 if (td->td_su == mp) { 1460 td->td_su = NULL; 1461 vfs_rel(mp); 1462 } 1463 return (error); 1464 1465 fail: 1466 if (susp) 1467 vfs_write_resume(mp, VR_START_WRITE); 1468 fail1: 1469 #ifdef UFS_EXTATTR 1470 if (e_restart) { 1471 ufs_extattr_uepm_init(&ump->um_extattr); 1472 #ifdef UFS_EXTATTR_AUTOSTART 1473 (void) ufs_extattr_autostart(mp, td); 1474 #endif 1475 } 1476 #endif 1477 1478 return (error); 1479 } 1480 1481 /* 1482 * Flush out all the files in a filesystem. 1483 */ 1484 int 1485 ffs_flushfiles(mp, flags, td) 1486 struct mount *mp; 1487 int flags; 1488 struct thread *td; 1489 { 1490 struct ufsmount *ump; 1491 int qerror, error; 1492 1493 ump = VFSTOUFS(mp); 1494 qerror = 0; 1495 #ifdef QUOTA 1496 if (mp->mnt_flag & MNT_QUOTA) { 1497 int i; 1498 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1499 if (error) 1500 return (error); 1501 for (i = 0; i < MAXQUOTAS; i++) { 1502 error = quotaoff(td, mp, i); 1503 if (error != 0) { 1504 if ((flags & EARLYFLUSH) == 0) 1505 return (error); 1506 else 1507 qerror = error; 1508 } 1509 } 1510 1511 /* 1512 * Here we fall through to vflush again to ensure that 1513 * we have gotten rid of all the system vnodes, unless 1514 * quotas must not be closed. 1515 */ 1516 } 1517 #endif 1518 /* devvp is not locked there */ 1519 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1520 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1521 return (error); 1522 ffs_snapshot_unmount(mp); 1523 flags |= FORCECLOSE; 1524 /* 1525 * Here we fall through to vflush again to ensure 1526 * that we have gotten rid of all the system vnodes. 1527 */ 1528 } 1529 1530 /* 1531 * Do not close system files if quotas were not closed, to be 1532 * able to sync the remaining dquots. The freeblks softupdate 1533 * workitems might hold a reference on a dquot, preventing 1534 * quotaoff() from completing. Next round of 1535 * softdep_flushworklist() iteration should process the 1536 * blockers, allowing the next run of quotaoff() to finally 1537 * flush held dquots. 1538 * 1539 * Otherwise, flush all the files. 1540 */ 1541 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1542 return (error); 1543 1544 /* 1545 * Flush filesystem metadata. 1546 */ 1547 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1548 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1549 VOP_UNLOCK(ump->um_devvp); 1550 return (error); 1551 } 1552 1553 /* 1554 * Get filesystem statistics. 1555 */ 1556 static int 1557 ffs_statfs(mp, sbp) 1558 struct mount *mp; 1559 struct statfs *sbp; 1560 { 1561 struct ufsmount *ump; 1562 struct fs *fs; 1563 1564 ump = VFSTOUFS(mp); 1565 fs = ump->um_fs; 1566 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1567 panic("ffs_statfs"); 1568 sbp->f_version = STATFS_VERSION; 1569 sbp->f_bsize = fs->fs_fsize; 1570 sbp->f_iosize = fs->fs_bsize; 1571 sbp->f_blocks = fs->fs_dsize; 1572 UFS_LOCK(ump); 1573 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1574 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1575 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1576 dbtofsb(fs, fs->fs_pendingblocks); 1577 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; 1578 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1579 UFS_UNLOCK(ump); 1580 sbp->f_namemax = UFS_MAXNAMLEN; 1581 return (0); 1582 } 1583 1584 static bool 1585 sync_doupdate(struct inode *ip) 1586 { 1587 1588 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1589 IN_UPDATE)) != 0); 1590 } 1591 1592 static int 1593 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused) 1594 { 1595 struct inode *ip; 1596 1597 /* 1598 * Flags are safe to access because ->v_data invalidation 1599 * is held off by listmtx. 1600 */ 1601 if (vp->v_type == VNON) 1602 return (false); 1603 ip = VTOI(vp); 1604 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) 1605 return (false); 1606 return (true); 1607 } 1608 1609 /* 1610 * For a lazy sync, we only care about access times, quotas and the 1611 * superblock. Other filesystem changes are already converted to 1612 * cylinder group blocks or inode blocks updates and are written to 1613 * disk by syncer. 1614 */ 1615 static int 1616 ffs_sync_lazy(mp) 1617 struct mount *mp; 1618 { 1619 struct vnode *mvp, *vp; 1620 struct inode *ip; 1621 int allerror, error; 1622 1623 allerror = 0; 1624 if ((mp->mnt_flag & MNT_NOATIME) != 0) { 1625 #ifdef QUOTA 1626 qsync(mp); 1627 #endif 1628 goto sbupdate; 1629 } 1630 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) { 1631 if (vp->v_type == VNON) { 1632 VI_UNLOCK(vp); 1633 continue; 1634 } 1635 ip = VTOI(vp); 1636 1637 /* 1638 * The IN_ACCESS flag is converted to IN_MODIFIED by 1639 * ufs_close() and ufs_getattr() by the calls to 1640 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1641 * Test also all the other timestamp flags too, to pick up 1642 * any other cases that could be missed. 1643 */ 1644 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1645 VI_UNLOCK(vp); 1646 continue; 1647 } 1648 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0) 1649 continue; 1650 #ifdef QUOTA 1651 qsyncvp(vp); 1652 #endif 1653 if (sync_doupdate(ip)) 1654 error = ffs_update(vp, 0); 1655 if (error != 0) 1656 allerror = error; 1657 vput(vp); 1658 } 1659 sbupdate: 1660 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1661 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1662 allerror = error; 1663 return (allerror); 1664 } 1665 1666 /* 1667 * Go through the disk queues to initiate sandbagged IO; 1668 * go through the inodes to write those that have been modified; 1669 * initiate the writing of the super block if it has been modified. 1670 * 1671 * Note: we are always called with the filesystem marked busy using 1672 * vfs_busy(). 1673 */ 1674 static int 1675 ffs_sync(mp, waitfor) 1676 struct mount *mp; 1677 int waitfor; 1678 { 1679 struct vnode *mvp, *vp, *devvp; 1680 struct thread *td; 1681 struct inode *ip; 1682 struct ufsmount *ump = VFSTOUFS(mp); 1683 struct fs *fs; 1684 int error, count, lockreq, allerror = 0; 1685 int suspend; 1686 int suspended; 1687 int secondary_writes; 1688 int secondary_accwrites; 1689 int softdep_deps; 1690 int softdep_accdeps; 1691 struct bufobj *bo; 1692 1693 suspend = 0; 1694 suspended = 0; 1695 td = curthread; 1696 fs = ump->um_fs; 1697 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) 1698 panic("%s: ffs_sync: modification on read-only filesystem", 1699 fs->fs_fsmnt); 1700 if (waitfor == MNT_LAZY) { 1701 if (!rebooting) 1702 return (ffs_sync_lazy(mp)); 1703 waitfor = MNT_NOWAIT; 1704 } 1705 1706 /* 1707 * Write back each (modified) inode. 1708 */ 1709 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1710 if (waitfor == MNT_SUSPEND) { 1711 suspend = 1; 1712 waitfor = MNT_WAIT; 1713 } 1714 if (waitfor == MNT_WAIT) 1715 lockreq = LK_EXCLUSIVE; 1716 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1717 loop: 1718 /* Grab snapshot of secondary write counts */ 1719 MNT_ILOCK(mp); 1720 secondary_writes = mp->mnt_secondary_writes; 1721 secondary_accwrites = mp->mnt_secondary_accwrites; 1722 MNT_IUNLOCK(mp); 1723 1724 /* Grab snapshot of softdep dependency counts */ 1725 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1726 1727 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1728 /* 1729 * Depend on the vnode interlock to keep things stable enough 1730 * for a quick test. Since there might be hundreds of 1731 * thousands of vnodes, we cannot afford even a subroutine 1732 * call unless there's a good chance that we have work to do. 1733 */ 1734 if (vp->v_type == VNON) { 1735 VI_UNLOCK(vp); 1736 continue; 1737 } 1738 ip = VTOI(vp); 1739 if ((ip->i_flag & 1740 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1741 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1742 VI_UNLOCK(vp); 1743 continue; 1744 } 1745 if ((error = vget(vp, lockreq)) != 0) { 1746 if (error == ENOENT || error == ENOLCK) { 1747 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1748 goto loop; 1749 } 1750 continue; 1751 } 1752 #ifdef QUOTA 1753 qsyncvp(vp); 1754 #endif 1755 for (;;) { 1756 error = ffs_syncvnode(vp, waitfor, 0); 1757 if (error == ERELOOKUP) 1758 continue; 1759 if (error != 0) 1760 allerror = error; 1761 break; 1762 } 1763 vput(vp); 1764 } 1765 /* 1766 * Force stale filesystem control information to be flushed. 1767 */ 1768 if (waitfor == MNT_WAIT || rebooting) { 1769 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1770 allerror = error; 1771 if (ffs_fsfail_cleanup(ump, allerror)) 1772 allerror = 0; 1773 /* Flushed work items may create new vnodes to clean */ 1774 if (allerror == 0 && count) 1775 goto loop; 1776 } 1777 1778 devvp = ump->um_devvp; 1779 bo = &devvp->v_bufobj; 1780 BO_LOCK(bo); 1781 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1782 BO_UNLOCK(bo); 1783 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1784 error = VOP_FSYNC(devvp, waitfor, td); 1785 VOP_UNLOCK(devvp); 1786 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1787 error = ffs_sbupdate(ump, waitfor, 0); 1788 if (error != 0) 1789 allerror = error; 1790 if (ffs_fsfail_cleanup(ump, allerror)) 1791 allerror = 0; 1792 if (allerror == 0 && waitfor == MNT_WAIT) 1793 goto loop; 1794 } else if (suspend != 0) { 1795 if (softdep_check_suspend(mp, 1796 devvp, 1797 softdep_deps, 1798 softdep_accdeps, 1799 secondary_writes, 1800 secondary_accwrites) != 0) { 1801 MNT_IUNLOCK(mp); 1802 goto loop; /* More work needed */ 1803 } 1804 mtx_assert(MNT_MTX(mp), MA_OWNED); 1805 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1806 MNT_IUNLOCK(mp); 1807 suspended = 1; 1808 } else 1809 BO_UNLOCK(bo); 1810 /* 1811 * Write back modified superblock. 1812 */ 1813 if (fs->fs_fmod != 0 && 1814 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1815 allerror = error; 1816 if (ffs_fsfail_cleanup(ump, allerror)) 1817 allerror = 0; 1818 return (allerror); 1819 } 1820 1821 int 1822 ffs_vget(mp, ino, flags, vpp) 1823 struct mount *mp; 1824 ino_t ino; 1825 int flags; 1826 struct vnode **vpp; 1827 { 1828 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1829 } 1830 1831 int 1832 ffs_vgetf(mp, ino, flags, vpp, ffs_flags) 1833 struct mount *mp; 1834 ino_t ino; 1835 int flags; 1836 struct vnode **vpp; 1837 int ffs_flags; 1838 { 1839 struct fs *fs; 1840 struct inode *ip; 1841 struct ufsmount *ump; 1842 struct buf *bp; 1843 struct vnode *vp; 1844 daddr_t dbn; 1845 int error; 1846 1847 MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 || 1848 (flags & LK_EXCLUSIVE) != 0); 1849 1850 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1851 if (error != 0) 1852 return (error); 1853 if (*vpp != NULL) { 1854 if ((ffs_flags & FFSV_REPLACE) == 0 || 1855 ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 || 1856 !VN_IS_DOOMED(*vpp))) 1857 return (0); 1858 vgone(*vpp); 1859 vput(*vpp); 1860 } 1861 1862 /* 1863 * We must promote to an exclusive lock for vnode creation. This 1864 * can happen if lookup is passed LOCKSHARED. 1865 */ 1866 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1867 flags &= ~LK_TYPE_MASK; 1868 flags |= LK_EXCLUSIVE; 1869 } 1870 1871 /* 1872 * We do not lock vnode creation as it is believed to be too 1873 * expensive for such rare case as simultaneous creation of vnode 1874 * for same ino by different processes. We just allow them to race 1875 * and check later to decide who wins. Let the race begin! 1876 */ 1877 1878 ump = VFSTOUFS(mp); 1879 fs = ump->um_fs; 1880 ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO); 1881 1882 /* Allocate a new vnode/inode. */ 1883 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 1884 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1885 if (error) { 1886 *vpp = NULL; 1887 uma_zfree_smr(uma_inode, ip); 1888 return (error); 1889 } 1890 /* 1891 * FFS supports recursive locking. 1892 */ 1893 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 1894 VN_LOCK_AREC(vp); 1895 vp->v_data = ip; 1896 vp->v_bufobj.bo_bsize = fs->fs_bsize; 1897 ip->i_vnode = vp; 1898 ip->i_ump = ump; 1899 ip->i_number = ino; 1900 ip->i_ea_refs = 0; 1901 ip->i_nextclustercg = -1; 1902 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; 1903 ip->i_mode = 0; /* ensure error cases below throw away vnode */ 1904 cluster_init_vn(&ip->i_clusterw); 1905 #ifdef DIAGNOSTIC 1906 ufs_init_trackers(ip); 1907 #endif 1908 #ifdef QUOTA 1909 { 1910 int i; 1911 for (i = 0; i < MAXQUOTAS; i++) 1912 ip->i_dquot[i] = NODQUOT; 1913 } 1914 #endif 1915 1916 if (ffs_flags & FFSV_FORCEINSMQ) 1917 vp->v_vflag |= VV_FORCEINSMQ; 1918 error = insmntque(vp, mp); 1919 if (error != 0) { 1920 uma_zfree_smr(uma_inode, ip); 1921 *vpp = NULL; 1922 return (error); 1923 } 1924 vp->v_vflag &= ~VV_FORCEINSMQ; 1925 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 1926 if (error != 0) 1927 return (error); 1928 if (*vpp != NULL) { 1929 /* 1930 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set) 1931 * operate on empty inode, which must not be found by 1932 * other threads until fully filled. Vnode for empty 1933 * inode must be not re-inserted on the hash by other 1934 * thread, after removal by us at the beginning. 1935 */ 1936 MPASS((ffs_flags & FFSV_REPLACE) == 0); 1937 return (0); 1938 } 1939 1940 /* Read in the disk contents for the inode, copy into the inode. */ 1941 dbn = fsbtodb(fs, ino_to_fsba(fs, ino)); 1942 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize, 1943 NULL, NULL, 0, NOCRED, 0, NULL, &bp); 1944 if (error != 0) { 1945 /* 1946 * The inode does not contain anything useful, so it would 1947 * be misleading to leave it on its hash chain. With mode 1948 * still zero, it will be unlinked and returned to the free 1949 * list by vput(). 1950 */ 1951 vgone(vp); 1952 vput(vp); 1953 *vpp = NULL; 1954 return (error); 1955 } 1956 if (I_IS_UFS1(ip)) 1957 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 1958 else 1959 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1960 if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) { 1961 bqrelse(bp); 1962 vgone(vp); 1963 vput(vp); 1964 *vpp = NULL; 1965 return (error); 1966 } 1967 if (DOINGSOFTDEP(vp) && (!fs->fs_ronly || 1968 (ffs_flags & FFSV_FORCEINODEDEP) != 0)) 1969 softdep_load_inodeblock(ip); 1970 else 1971 ip->i_effnlink = ip->i_nlink; 1972 bqrelse(bp); 1973 1974 /* 1975 * Initialize the vnode from the inode, check for aliases. 1976 * Note that the underlying vnode may have changed. 1977 */ 1978 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, 1979 &vp); 1980 if (error) { 1981 vgone(vp); 1982 vput(vp); 1983 *vpp = NULL; 1984 return (error); 1985 } 1986 1987 /* 1988 * Finish inode initialization. 1989 */ 1990 if (vp->v_type != VFIFO) { 1991 /* FFS supports shared locking for all files except fifos. */ 1992 VN_LOCK_ASHARE(vp); 1993 } 1994 1995 /* 1996 * Set up a generation number for this inode if it does not 1997 * already have one. This should only happen on old filesystems. 1998 */ 1999 if (ip->i_gen == 0) { 2000 while (ip->i_gen == 0) 2001 ip->i_gen = arc4random(); 2002 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 2003 UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 2004 DIP_SET(ip, i_gen, ip->i_gen); 2005 } 2006 } 2007 #ifdef MAC 2008 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 2009 /* 2010 * If this vnode is already allocated, and we're running 2011 * multi-label, attempt to perform a label association 2012 * from the extended attributes on the inode. 2013 */ 2014 error = mac_vnode_associate_extattr(mp, vp); 2015 if (error) { 2016 /* ufs_inactive will release ip->i_devvp ref. */ 2017 vgone(vp); 2018 vput(vp); 2019 *vpp = NULL; 2020 return (error); 2021 } 2022 } 2023 #endif 2024 2025 *vpp = vp; 2026 return (0); 2027 } 2028 2029 /* 2030 * File handle to vnode 2031 * 2032 * Have to be really careful about stale file handles: 2033 * - check that the inode number is valid 2034 * - for UFS2 check that the inode number is initialized 2035 * - call ffs_vget() to get the locked inode 2036 * - check for an unallocated inode (i_mode == 0) 2037 * - check that the given client host has export rights and return 2038 * those rights via. exflagsp and credanonp 2039 */ 2040 static int 2041 ffs_fhtovp(mp, fhp, flags, vpp) 2042 struct mount *mp; 2043 struct fid *fhp; 2044 int flags; 2045 struct vnode **vpp; 2046 { 2047 struct ufid *ufhp; 2048 2049 ufhp = (struct ufid *)fhp; 2050 return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags, 2051 vpp, 0)); 2052 } 2053 2054 int 2055 ffs_inotovp(mp, ino, gen, lflags, vpp, ffs_flags) 2056 struct mount *mp; 2057 ino_t ino; 2058 u_int64_t gen; 2059 int lflags; 2060 struct vnode **vpp; 2061 int ffs_flags; 2062 { 2063 struct ufsmount *ump; 2064 struct vnode *nvp; 2065 struct inode *ip; 2066 struct fs *fs; 2067 struct cg *cgp; 2068 struct buf *bp; 2069 u_int cg; 2070 int error; 2071 2072 ump = VFSTOUFS(mp); 2073 fs = ump->um_fs; 2074 *vpp = NULL; 2075 2076 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) 2077 return (ESTALE); 2078 2079 /* 2080 * Need to check if inode is initialized because UFS2 does lazy 2081 * initialization and nfs_fhtovp can offer arbitrary inode numbers. 2082 */ 2083 if (fs->fs_magic == FS_UFS2_MAGIC) { 2084 cg = ino_to_cg(fs, ino); 2085 error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp); 2086 if (error != 0) 2087 return (error); 2088 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { 2089 brelse(bp); 2090 return (ESTALE); 2091 } 2092 brelse(bp); 2093 } 2094 2095 error = ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags); 2096 if (error != 0) 2097 return (error); 2098 2099 ip = VTOI(nvp); 2100 if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) { 2101 if (ip->i_mode == 0) 2102 vgone(nvp); 2103 vput(nvp); 2104 return (ESTALE); 2105 } 2106 2107 vnode_create_vobject(nvp, DIP(ip, i_size), curthread); 2108 *vpp = nvp; 2109 return (0); 2110 } 2111 2112 /* 2113 * Initialize the filesystem. 2114 */ 2115 static int 2116 ffs_init(vfsp) 2117 struct vfsconf *vfsp; 2118 { 2119 2120 ffs_susp_initialize(); 2121 softdep_initialize(); 2122 return (ufs_init(vfsp)); 2123 } 2124 2125 /* 2126 * Undo the work of ffs_init(). 2127 */ 2128 static int 2129 ffs_uninit(vfsp) 2130 struct vfsconf *vfsp; 2131 { 2132 int ret; 2133 2134 ret = ufs_uninit(vfsp); 2135 softdep_uninitialize(); 2136 ffs_susp_uninitialize(); 2137 taskqueue_drain_all(taskqueue_thread); 2138 return (ret); 2139 } 2140 2141 /* 2142 * Structure used to pass information from ffs_sbupdate to its 2143 * helper routine ffs_use_bwrite. 2144 */ 2145 struct devfd { 2146 struct ufsmount *ump; 2147 struct buf *sbbp; 2148 int waitfor; 2149 int suspended; 2150 int error; 2151 }; 2152 2153 /* 2154 * Write a superblock and associated information back to disk. 2155 */ 2156 int 2157 ffs_sbupdate(ump, waitfor, suspended) 2158 struct ufsmount *ump; 2159 int waitfor; 2160 int suspended; 2161 { 2162 struct fs *fs; 2163 struct buf *sbbp; 2164 struct devfd devfd; 2165 2166 fs = ump->um_fs; 2167 if (fs->fs_ronly == 1 && 2168 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 2169 (MNT_RDONLY | MNT_UPDATE)) 2170 panic("ffs_sbupdate: write read-only filesystem"); 2171 /* 2172 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 2173 */ 2174 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 2175 (int)fs->fs_sbsize, 0, 0, 0); 2176 /* 2177 * Initialize info needed for write function. 2178 */ 2179 devfd.ump = ump; 2180 devfd.sbbp = sbbp; 2181 devfd.waitfor = waitfor; 2182 devfd.suspended = suspended; 2183 devfd.error = 0; 2184 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); 2185 } 2186 2187 /* 2188 * Write function for use by filesystem-layer routines. 2189 */ 2190 static int 2191 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) 2192 { 2193 struct devfd *devfdp; 2194 struct ufsmount *ump; 2195 struct buf *bp; 2196 struct fs *fs; 2197 int error; 2198 2199 devfdp = devfd; 2200 ump = devfdp->ump; 2201 fs = ump->um_fs; 2202 /* 2203 * Writing the superblock summary information. 2204 */ 2205 if (loc != fs->fs_sblockloc) { 2206 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); 2207 bcopy(buf, bp->b_data, (u_int)size); 2208 if (devfdp->suspended) 2209 bp->b_flags |= B_VALIDSUSPWRT; 2210 if (devfdp->waitfor != MNT_WAIT) 2211 bawrite(bp); 2212 else if ((error = bwrite(bp)) != 0) 2213 devfdp->error = error; 2214 return (0); 2215 } 2216 /* 2217 * Writing the superblock itself. We need to do special checks for it. 2218 */ 2219 bp = devfdp->sbbp; 2220 if (ffs_fsfail_cleanup(ump, devfdp->error)) 2221 devfdp->error = 0; 2222 if (devfdp->error != 0) { 2223 brelse(bp); 2224 return (devfdp->error); 2225 } 2226 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 2227 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2228 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2229 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 2230 fs->fs_sblockloc = SBLOCK_UFS1; 2231 } 2232 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 2233 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2234 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2235 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 2236 fs->fs_sblockloc = SBLOCK_UFS2; 2237 } 2238 if (MOUNTEDSOFTDEP(ump->um_mountp)) 2239 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 2240 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 2241 fs = (struct fs *)bp->b_data; 2242 ffs_oldfscompat_write(fs, ump); 2243 fs->fs_si = NULL; 2244 /* Recalculate the superblock hash */ 2245 fs->fs_ckhash = ffs_calc_sbhash(fs); 2246 if (devfdp->suspended) 2247 bp->b_flags |= B_VALIDSUSPWRT; 2248 if (devfdp->waitfor != MNT_WAIT) 2249 bawrite(bp); 2250 else if ((error = bwrite(bp)) != 0) 2251 devfdp->error = error; 2252 return (devfdp->error); 2253 } 2254 2255 static int 2256 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 2257 int attrnamespace, const char *attrname) 2258 { 2259 2260 #ifdef UFS_EXTATTR 2261 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 2262 attrname)); 2263 #else 2264 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 2265 attrname)); 2266 #endif 2267 } 2268 2269 static void 2270 ffs_ifree(struct ufsmount *ump, struct inode *ip) 2271 { 2272 2273 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 2274 uma_zfree(uma_ufs1, ip->i_din1); 2275 else if (ip->i_din2 != NULL) 2276 uma_zfree(uma_ufs2, ip->i_din2); 2277 uma_zfree_smr(uma_inode, ip); 2278 } 2279 2280 static int dobkgrdwrite = 1; 2281 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2282 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2283 2284 /* 2285 * Complete a background write started from bwrite. 2286 */ 2287 static void 2288 ffs_backgroundwritedone(struct buf *bp) 2289 { 2290 struct bufobj *bufobj; 2291 struct buf *origbp; 2292 2293 #ifdef SOFTUPDATES 2294 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0) 2295 softdep_handle_error(bp); 2296 #endif 2297 2298 /* 2299 * Find the original buffer that we are writing. 2300 */ 2301 bufobj = bp->b_bufobj; 2302 BO_LOCK(bufobj); 2303 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2304 panic("backgroundwritedone: lost buffer"); 2305 2306 /* 2307 * We should mark the cylinder group buffer origbp as 2308 * dirty, to not lose the failed write. 2309 */ 2310 if ((bp->b_ioflags & BIO_ERROR) != 0) 2311 origbp->b_vflags |= BV_BKGRDERR; 2312 BO_UNLOCK(bufobj); 2313 /* 2314 * Process dependencies then return any unfinished ones. 2315 */ 2316 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2317 buf_complete(bp); 2318 #ifdef SOFTUPDATES 2319 if (!LIST_EMPTY(&bp->b_dep)) 2320 softdep_move_dependencies(bp, origbp); 2321 #endif 2322 /* 2323 * This buffer is marked B_NOCACHE so when it is released 2324 * by biodone it will be tossed. Clear B_IOSTARTED in case of error. 2325 */ 2326 bp->b_flags |= B_NOCACHE; 2327 bp->b_flags &= ~(B_CACHE | B_IOSTARTED); 2328 pbrelvp(bp); 2329 2330 /* 2331 * Prevent brelse() from trying to keep and re-dirtying bp on 2332 * errors. It causes b_bufobj dereference in 2333 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2334 * pbrelvp() above. 2335 */ 2336 if ((bp->b_ioflags & BIO_ERROR) != 0) 2337 bp->b_flags |= B_INVAL; 2338 bufdone(bp); 2339 BO_LOCK(bufobj); 2340 /* 2341 * Clear the BV_BKGRDINPROG flag in the original buffer 2342 * and awaken it if it is waiting for the write to complete. 2343 * If BV_BKGRDINPROG is not set in the original buffer it must 2344 * have been released and re-instantiated - which is not legal. 2345 */ 2346 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2347 ("backgroundwritedone: lost buffer2")); 2348 origbp->b_vflags &= ~BV_BKGRDINPROG; 2349 if (origbp->b_vflags & BV_BKGRDWAIT) { 2350 origbp->b_vflags &= ~BV_BKGRDWAIT; 2351 wakeup(&origbp->b_xflags); 2352 } 2353 BO_UNLOCK(bufobj); 2354 } 2355 2356 /* 2357 * Write, release buffer on completion. (Done by iodone 2358 * if async). Do not bother writing anything if the buffer 2359 * is invalid. 2360 * 2361 * Note that we set B_CACHE here, indicating that buffer is 2362 * fully valid and thus cacheable. This is true even of NFS 2363 * now so we set it generally. This could be set either here 2364 * or in biodone() since the I/O is synchronous. We put it 2365 * here. 2366 */ 2367 static int 2368 ffs_bufwrite(struct buf *bp) 2369 { 2370 struct buf *newbp; 2371 struct cg *cgp; 2372 2373 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2374 if (bp->b_flags & B_INVAL) { 2375 brelse(bp); 2376 return (0); 2377 } 2378 2379 if (!BUF_ISLOCKED(bp)) 2380 panic("bufwrite: buffer is not busy???"); 2381 /* 2382 * If a background write is already in progress, delay 2383 * writing this block if it is asynchronous. Otherwise 2384 * wait for the background write to complete. 2385 */ 2386 BO_LOCK(bp->b_bufobj); 2387 if (bp->b_vflags & BV_BKGRDINPROG) { 2388 if (bp->b_flags & B_ASYNC) { 2389 BO_UNLOCK(bp->b_bufobj); 2390 bdwrite(bp); 2391 return (0); 2392 } 2393 bp->b_vflags |= BV_BKGRDWAIT; 2394 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2395 "bwrbg", 0); 2396 if (bp->b_vflags & BV_BKGRDINPROG) 2397 panic("bufwrite: still writing"); 2398 } 2399 bp->b_vflags &= ~BV_BKGRDERR; 2400 BO_UNLOCK(bp->b_bufobj); 2401 2402 /* 2403 * If this buffer is marked for background writing and we 2404 * do not have to wait for it, make a copy and write the 2405 * copy so as to leave this buffer ready for further use. 2406 * 2407 * This optimization eats a lot of memory. If we have a page 2408 * or buffer shortfall we can't do it. 2409 */ 2410 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2411 (bp->b_flags & B_ASYNC) && 2412 !vm_page_count_severe() && 2413 !buf_dirty_count_severe()) { 2414 KASSERT(bp->b_iodone == NULL, 2415 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2416 2417 /* get a new block */ 2418 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2419 if (newbp == NULL) 2420 goto normal_write; 2421 2422 KASSERT(buf_mapped(bp), ("Unmapped cg")); 2423 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2424 BO_LOCK(bp->b_bufobj); 2425 bp->b_vflags |= BV_BKGRDINPROG; 2426 BO_UNLOCK(bp->b_bufobj); 2427 newbp->b_xflags |= 2428 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; 2429 newbp->b_lblkno = bp->b_lblkno; 2430 newbp->b_blkno = bp->b_blkno; 2431 newbp->b_offset = bp->b_offset; 2432 newbp->b_iodone = ffs_backgroundwritedone; 2433 newbp->b_flags |= B_ASYNC; 2434 newbp->b_flags &= ~B_INVAL; 2435 pbgetvp(bp->b_vp, newbp); 2436 2437 #ifdef SOFTUPDATES 2438 /* 2439 * Move over the dependencies. If there are rollbacks, 2440 * leave the parent buffer dirtied as it will need to 2441 * be written again. 2442 */ 2443 if (LIST_EMPTY(&bp->b_dep) || 2444 softdep_move_dependencies(bp, newbp) == 0) 2445 bundirty(bp); 2446 #else 2447 bundirty(bp); 2448 #endif 2449 2450 /* 2451 * Initiate write on the copy, release the original. The 2452 * BKGRDINPROG flag prevents it from going away until 2453 * the background write completes. We have to recalculate 2454 * its check hash in case the buffer gets freed and then 2455 * reconstituted from the buffer cache during a later read. 2456 */ 2457 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2458 cgp = (struct cg *)bp->b_data; 2459 cgp->cg_ckhash = 0; 2460 cgp->cg_ckhash = 2461 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2462 } 2463 bqrelse(bp); 2464 bp = newbp; 2465 } else 2466 /* Mark the buffer clean */ 2467 bundirty(bp); 2468 2469 /* Let the normal bufwrite do the rest for us */ 2470 normal_write: 2471 /* 2472 * If we are writing a cylinder group, update its time. 2473 */ 2474 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2475 cgp = (struct cg *)bp->b_data; 2476 cgp->cg_old_time = cgp->cg_time = time_second; 2477 } 2478 return (bufwrite(bp)); 2479 } 2480 2481 static void 2482 ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2483 { 2484 struct vnode *vp; 2485 struct buf *tbp; 2486 int error, nocopy; 2487 2488 /* 2489 * This is the bufobj strategy for the private VCHR vnodes 2490 * used by FFS to access the underlying storage device. 2491 * We override the default bufobj strategy and thus bypass 2492 * VOP_STRATEGY() for these vnodes. 2493 */ 2494 vp = bo2vnode(bo); 2495 KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR || 2496 bp->b_vp->v_rdev == NULL || 2497 bp->b_vp->v_rdev->si_mountpt == NULL || 2498 VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL || 2499 vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp, 2500 ("ffs_geom_strategy() with wrong vp")); 2501 if (bp->b_iocmd == BIO_WRITE) { 2502 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2503 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2504 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2505 panic("ffs_geom_strategy: bad I/O"); 2506 nocopy = bp->b_flags & B_NOCOPY; 2507 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2508 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2509 vp->v_rdev->si_snapdata != NULL) { 2510 if ((bp->b_flags & B_CLUSTER) != 0) { 2511 runningbufwakeup(bp); 2512 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2513 b_cluster.cluster_entry) { 2514 error = ffs_copyonwrite(vp, tbp); 2515 if (error != 0 && 2516 error != EOPNOTSUPP) { 2517 bp->b_error = error; 2518 bp->b_ioflags |= BIO_ERROR; 2519 bp->b_flags &= ~B_BARRIER; 2520 bufdone(bp); 2521 return; 2522 } 2523 } 2524 bp->b_runningbufspace = bp->b_bufsize; 2525 atomic_add_long(&runningbufspace, 2526 bp->b_runningbufspace); 2527 } else { 2528 error = ffs_copyonwrite(vp, bp); 2529 if (error != 0 && error != EOPNOTSUPP) { 2530 bp->b_error = error; 2531 bp->b_ioflags |= BIO_ERROR; 2532 bp->b_flags &= ~B_BARRIER; 2533 bufdone(bp); 2534 return; 2535 } 2536 } 2537 } 2538 #ifdef SOFTUPDATES 2539 if ((bp->b_flags & B_CLUSTER) != 0) { 2540 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2541 b_cluster.cluster_entry) { 2542 if (!LIST_EMPTY(&tbp->b_dep)) 2543 buf_start(tbp); 2544 } 2545 } else { 2546 if (!LIST_EMPTY(&bp->b_dep)) 2547 buf_start(bp); 2548 } 2549 2550 #endif 2551 /* 2552 * Check for metadata that needs check-hashes and update them. 2553 */ 2554 switch (bp->b_xflags & BX_FSPRIV) { 2555 case BX_CYLGRP: 2556 ((struct cg *)bp->b_data)->cg_ckhash = 0; 2557 ((struct cg *)bp->b_data)->cg_ckhash = 2558 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2559 break; 2560 2561 case BX_SUPERBLOCK: 2562 case BX_INODE: 2563 case BX_INDIR: 2564 case BX_DIR: 2565 printf("Check-hash write is unimplemented!!!\n"); 2566 break; 2567 2568 case 0: 2569 break; 2570 2571 default: 2572 printf("multiple buffer types 0x%b\n", 2573 (u_int)(bp->b_xflags & BX_FSPRIV), 2574 PRINT_UFS_BUF_XFLAGS); 2575 break; 2576 } 2577 } 2578 if (bp->b_iocmd != BIO_READ && ffs_enxio_enable) 2579 bp->b_xflags |= BX_CVTENXIO; 2580 g_vfs_strategy(bo, bp); 2581 } 2582 2583 int 2584 ffs_own_mount(const struct mount *mp) 2585 { 2586 2587 if (mp->mnt_op == &ufs_vfsops) 2588 return (1); 2589 return (0); 2590 } 2591 2592 #ifdef DDB 2593 #ifdef SOFTUPDATES 2594 2595 /* defined in ffs_softdep.c */ 2596 extern void db_print_ffs(struct ufsmount *ump); 2597 2598 DB_SHOW_COMMAND(ffs, db_show_ffs) 2599 { 2600 struct mount *mp; 2601 struct ufsmount *ump; 2602 2603 if (have_addr) { 2604 ump = VFSTOUFS((struct mount *)addr); 2605 db_print_ffs(ump); 2606 return; 2607 } 2608 2609 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2610 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2611 db_print_ffs(VFSTOUFS(mp)); 2612 } 2613 } 2614 2615 #endif /* SOFTUPDATES */ 2616 #endif /* DDB */ 2617