1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1991, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_quota.h" 38 #include "opt_ufs.h" 39 #include "opt_ffs.h" 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/gsb_crc32.h> 44 #include <sys/systm.h> 45 #include <sys/namei.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/taskqueue.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/vnode.h> 52 #include <sys/mount.h> 53 #include <sys/bio.h> 54 #include <sys/buf.h> 55 #include <sys/conf.h> 56 #include <sys/fcntl.h> 57 #include <sys/ioccom.h> 58 #include <sys/malloc.h> 59 #include <sys/mutex.h> 60 #include <sys/rwlock.h> 61 #include <sys/sysctl.h> 62 #include <sys/vmmeter.h> 63 64 #include <security/mac/mac_framework.h> 65 66 #include <ufs/ufs/dir.h> 67 #include <ufs/ufs/extattr.h> 68 #include <ufs/ufs/gjournal.h> 69 #include <ufs/ufs/quota.h> 70 #include <ufs/ufs/ufsmount.h> 71 #include <ufs/ufs/inode.h> 72 #include <ufs/ufs/ufs_extern.h> 73 74 #include <ufs/ffs/fs.h> 75 #include <ufs/ffs/ffs_extern.h> 76 77 #include <vm/vm.h> 78 #include <vm/uma.h> 79 #include <vm/vm_page.h> 80 81 #include <geom/geom.h> 82 #include <geom/geom_vfs.h> 83 84 #include <ddb/ddb.h> 85 86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 87 VFS_SMR_DECLARE; 88 89 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 90 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 91 ufs2_daddr_t); 92 static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 93 static int ffs_sync_lazy(struct mount *mp); 94 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); 95 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); 96 97 static vfs_init_t ffs_init; 98 static vfs_uninit_t ffs_uninit; 99 static vfs_extattrctl_t ffs_extattrctl; 100 static vfs_cmount_t ffs_cmount; 101 static vfs_unmount_t ffs_unmount; 102 static vfs_mount_t ffs_mount; 103 static vfs_statfs_t ffs_statfs; 104 static vfs_fhtovp_t ffs_fhtovp; 105 static vfs_sync_t ffs_sync; 106 107 static struct vfsops ufs_vfsops = { 108 .vfs_extattrctl = ffs_extattrctl, 109 .vfs_fhtovp = ffs_fhtovp, 110 .vfs_init = ffs_init, 111 .vfs_mount = ffs_mount, 112 .vfs_cmount = ffs_cmount, 113 .vfs_quotactl = ufs_quotactl, 114 .vfs_root = vfs_cache_root, 115 .vfs_cachedroot = ufs_root, 116 .vfs_statfs = ffs_statfs, 117 .vfs_sync = ffs_sync, 118 .vfs_uninit = ffs_uninit, 119 .vfs_unmount = ffs_unmount, 120 .vfs_vget = ffs_vget, 121 .vfs_susp_clean = process_deferred_inactive, 122 }; 123 124 VFS_SET(ufs_vfsops, ufs, 0); 125 MODULE_VERSION(ufs, 1); 126 127 static b_strategy_t ffs_geom_strategy; 128 static b_write_t ffs_bufwrite; 129 130 static struct buf_ops ffs_ops = { 131 .bop_name = "FFS", 132 .bop_write = ffs_bufwrite, 133 .bop_strategy = ffs_geom_strategy, 134 .bop_sync = bufsync, 135 #ifdef NO_FFS_SNAPSHOT 136 .bop_bdflush = bufbdflush, 137 #else 138 .bop_bdflush = ffs_bdflush, 139 #endif 140 }; 141 142 /* 143 * Note that userquota and groupquota options are not currently used 144 * by UFS/FFS code and generally mount(8) does not pass those options 145 * from userland, but they can be passed by loader(8) via 146 * vfs.root.mountfrom.options. 147 */ 148 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 149 "noclusterw", "noexec", "export", "force", "from", "groupquota", 150 "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir", 151 "nosymfollow", "sync", "union", "userquota", "untrusted", NULL }; 152 153 static int ffs_enxio_enable = 1; 154 SYSCTL_DECL(_vfs_ffs); 155 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN, 156 &ffs_enxio_enable, 0, 157 "enable mapping of other disk I/O errors to ENXIO"); 158 159 /* 160 * Return buffer with the contents of block "offset" from the beginning of 161 * directory "ip". If "res" is non-zero, fill it in with a pointer to the 162 * remaining space in the directory. 163 */ 164 static int 165 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) 166 { 167 struct inode *ip; 168 struct fs *fs; 169 struct buf *bp; 170 ufs_lbn_t lbn; 171 int bsize, error; 172 173 ip = VTOI(vp); 174 fs = ITOFS(ip); 175 lbn = lblkno(fs, offset); 176 bsize = blksize(fs, ip, lbn); 177 178 *bpp = NULL; 179 error = bread(vp, lbn, bsize, NOCRED, &bp); 180 if (error) { 181 return (error); 182 } 183 if (res) 184 *res = (char *)bp->b_data + blkoff(fs, offset); 185 *bpp = bp; 186 return (0); 187 } 188 189 /* 190 * Load up the contents of an inode and copy the appropriate pieces 191 * to the incore copy. 192 */ 193 static int 194 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino) 195 { 196 struct ufs1_dinode *dip1; 197 struct ufs2_dinode *dip2; 198 int error; 199 200 if (I_IS_UFS1(ip)) { 201 dip1 = ip->i_din1; 202 *dip1 = 203 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 204 ip->i_mode = dip1->di_mode; 205 ip->i_nlink = dip1->di_nlink; 206 ip->i_effnlink = dip1->di_nlink; 207 ip->i_size = dip1->di_size; 208 ip->i_flags = dip1->di_flags; 209 ip->i_gen = dip1->di_gen; 210 ip->i_uid = dip1->di_uid; 211 ip->i_gid = dip1->di_gid; 212 return (0); 213 } 214 dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 215 if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 && 216 !ffs_fsfail_cleanup(ITOUMP(ip), error)) { 217 printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt, 218 (intmax_t)ino); 219 return (error); 220 } 221 *ip->i_din2 = *dip2; 222 dip2 = ip->i_din2; 223 ip->i_mode = dip2->di_mode; 224 ip->i_nlink = dip2->di_nlink; 225 ip->i_effnlink = dip2->di_nlink; 226 ip->i_size = dip2->di_size; 227 ip->i_flags = dip2->di_flags; 228 ip->i_gen = dip2->di_gen; 229 ip->i_uid = dip2->di_uid; 230 ip->i_gid = dip2->di_gid; 231 return (0); 232 } 233 234 /* 235 * Verify that a filesystem block number is a valid data block. 236 * This routine is only called on untrusted filesystems. 237 */ 238 static int 239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize) 240 { 241 struct fs *fs; 242 struct ufsmount *ump; 243 ufs2_daddr_t end_daddr; 244 int cg, havemtx; 245 246 KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0, 247 ("ffs_check_blkno called on a trusted file system")); 248 ump = VFSTOUFS(mp); 249 fs = ump->um_fs; 250 cg = dtog(fs, daddr); 251 end_daddr = daddr + numfrags(fs, blksize); 252 /* 253 * Verify that the block number is a valid data block. Also check 254 * that it does not point to an inode block or a superblock. Accept 255 * blocks that are unalloacted (0) or part of snapshot metadata 256 * (BLK_NOCOPY or BLK_SNAP). 257 * 258 * Thus, the block must be in a valid range for the filesystem and 259 * either in the space before a backup superblock (except the first 260 * cylinder group where that space is used by the bootstrap code) or 261 * after the inode blocks and before the end of the cylinder group. 262 */ 263 if ((uint64_t)daddr <= BLK_SNAP || 264 ((uint64_t)end_daddr <= fs->fs_size && 265 ((cg > 0 && end_daddr <= cgsblock(fs, cg)) || 266 (daddr >= cgdmin(fs, cg) && 267 end_daddr <= cgbase(fs, cg) + fs->fs_fpg)))) 268 return (0); 269 if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0) 270 UFS_LOCK(ump); 271 if (ppsratecheck(&ump->um_last_integritymsg, 272 &ump->um_secs_integritymsg, 1)) { 273 UFS_UNLOCK(ump); 274 uprintf("\n%s: inode %jd, out-of-range indirect block " 275 "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr); 276 if (havemtx) 277 UFS_LOCK(ump); 278 } else if (!havemtx) 279 UFS_UNLOCK(ump); 280 return (EINTEGRITY); 281 } 282 283 /* 284 * Initiate a forcible unmount. 285 * Used to unmount filesystems whose underlying media has gone away. 286 */ 287 static void 288 ffs_fsfail_unmount(void *v, int pending) 289 { 290 struct fsfail_task *etp; 291 struct mount *mp; 292 293 etp = v; 294 295 /* 296 * Find our mount and get a ref on it, then try to unmount. 297 */ 298 mp = vfs_getvfs(&etp->fsid); 299 if (mp != NULL) 300 dounmount(mp, MNT_FORCE, curthread); 301 free(etp, M_UFSMNT); 302 } 303 304 /* 305 * On first ENXIO error, start a task that forcibly unmounts the filesystem. 306 * 307 * Return true if a cleanup is in progress. 308 */ 309 int 310 ffs_fsfail_cleanup(struct ufsmount *ump, int error) 311 { 312 int retval; 313 314 UFS_LOCK(ump); 315 retval = ffs_fsfail_cleanup_locked(ump, error); 316 UFS_UNLOCK(ump); 317 return (retval); 318 } 319 320 int 321 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error) 322 { 323 struct fsfail_task *etp; 324 struct task *tp; 325 326 mtx_assert(UFS_MTX(ump), MA_OWNED); 327 if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) { 328 ump->um_flags |= UM_FSFAIL_CLEANUP; 329 /* 330 * Queue an async forced unmount. 331 */ 332 etp = ump->um_fsfail_task; 333 ump->um_fsfail_task = NULL; 334 if (etp != NULL) { 335 tp = &etp->task; 336 TASK_INIT(tp, 0, ffs_fsfail_unmount, etp); 337 taskqueue_enqueue(taskqueue_thread, tp); 338 printf("UFS: forcibly unmounting %s from %s\n", 339 ump->um_mountp->mnt_stat.f_mntfromname, 340 ump->um_mountp->mnt_stat.f_mntonname); 341 } 342 } 343 return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0); 344 } 345 346 /* 347 * Wrapper used during ENXIO cleanup to allocate empty buffers when 348 * the kernel is unable to read the real one. They are needed so that 349 * the soft updates code can use them to unwind its dependencies. 350 */ 351 int 352 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno, 353 daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt, 354 struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *), 355 struct buf **bpp) 356 { 357 int error; 358 359 flags |= GB_CVTENXIO; 360 error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt, 361 cred, flags, ckhashfunc, bpp); 362 if (error != 0 && ffs_fsfail_cleanup(ump, error)) { 363 error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp); 364 KASSERT(error == 0, ("getblkx failed")); 365 vfs_bio_bzero_buf(*bpp, 0, size); 366 } 367 return (error); 368 } 369 370 static int 371 ffs_mount(struct mount *mp) 372 { 373 struct vnode *devvp, *odevvp; 374 struct thread *td; 375 struct ufsmount *ump = NULL; 376 struct fs *fs; 377 pid_t fsckpid = 0; 378 int error, error1, flags; 379 uint64_t mntorflags, saved_mnt_flag; 380 accmode_t accmode; 381 struct nameidata ndp; 382 char *fspec; 383 bool mounted_softdep; 384 385 td = curthread; 386 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 387 return (EINVAL); 388 if (uma_inode == NULL) { 389 uma_inode = uma_zcreate("FFS inode", 390 sizeof(struct inode), NULL, NULL, NULL, NULL, 391 UMA_ALIGN_PTR, 0); 392 uma_ufs1 = uma_zcreate("FFS1 dinode", 393 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 394 UMA_ALIGN_PTR, 0); 395 uma_ufs2 = uma_zcreate("FFS2 dinode", 396 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 397 UMA_ALIGN_PTR, 0); 398 VFS_SMR_ZONE_SET(uma_inode); 399 } 400 401 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 402 vfs_deleteopt(mp->mnt_optnew, "userquota"); 403 404 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 405 if (error) 406 return (error); 407 408 mntorflags = 0; 409 if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0) 410 mntorflags |= MNT_UNTRUSTED; 411 412 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 413 mntorflags |= MNT_ACLS; 414 415 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 416 mntorflags |= MNT_SNAPSHOT; 417 /* 418 * Once we have set the MNT_SNAPSHOT flag, do not 419 * persist "snapshot" in the options list. 420 */ 421 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 422 vfs_deleteopt(mp->mnt_opt, "snapshot"); 423 } 424 425 if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 && 426 vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) { 427 /* 428 * Once we have set the restricted PID, do not 429 * persist "fsckpid" in the options list. 430 */ 431 vfs_deleteopt(mp->mnt_optnew, "fsckpid"); 432 vfs_deleteopt(mp->mnt_opt, "fsckpid"); 433 if (mp->mnt_flag & MNT_UPDATE) { 434 if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 && 435 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 436 vfs_mount_error(mp, 437 "Checker enable: Must be read-only"); 438 return (EINVAL); 439 } 440 } else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 441 vfs_mount_error(mp, 442 "Checker enable: Must be read-only"); 443 return (EINVAL); 444 } 445 /* Set to -1 if we are done */ 446 if (fsckpid == 0) 447 fsckpid = -1; 448 } 449 450 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 451 if (mntorflags & MNT_ACLS) { 452 vfs_mount_error(mp, 453 "\"acls\" and \"nfsv4acls\" options " 454 "are mutually exclusive"); 455 return (EINVAL); 456 } 457 mntorflags |= MNT_NFS4ACLS; 458 } 459 460 MNT_ILOCK(mp); 461 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP; 462 mp->mnt_flag |= mntorflags; 463 MNT_IUNLOCK(mp); 464 /* 465 * If updating, check whether changing from read-only to 466 * read/write; if there is no device name, that's all we do. 467 */ 468 if (mp->mnt_flag & MNT_UPDATE) { 469 ump = VFSTOUFS(mp); 470 fs = ump->um_fs; 471 odevvp = ump->um_odevvp; 472 devvp = ump->um_devvp; 473 if (fsckpid == -1 && ump->um_fsckpid > 0) { 474 if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 || 475 (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) 476 return (error); 477 g_topology_lock(); 478 /* 479 * Return to normal read-only mode. 480 */ 481 error = g_access(ump->um_cp, 0, -1, 0); 482 g_topology_unlock(); 483 ump->um_fsckpid = 0; 484 } 485 if (fs->fs_ronly == 0 && 486 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 487 /* 488 * Flush any dirty data and suspend filesystem. 489 */ 490 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 491 return (error); 492 error = vfs_write_suspend_umnt(mp); 493 if (error != 0) 494 return (error); 495 496 fs->fs_ronly = 1; 497 if (MOUNTEDSOFTDEP(mp)) { 498 MNT_ILOCK(mp); 499 mp->mnt_flag &= ~MNT_SOFTDEP; 500 MNT_IUNLOCK(mp); 501 mounted_softdep = true; 502 } else 503 mounted_softdep = false; 504 505 /* 506 * Check for and optionally get rid of files open 507 * for writing. 508 */ 509 flags = WRITECLOSE; 510 if (mp->mnt_flag & MNT_FORCE) 511 flags |= FORCECLOSE; 512 if (mounted_softdep) { 513 error = softdep_flushfiles(mp, flags, td); 514 } else { 515 error = ffs_flushfiles(mp, flags, td); 516 } 517 if (error) { 518 fs->fs_ronly = 0; 519 if (mounted_softdep) { 520 MNT_ILOCK(mp); 521 mp->mnt_flag |= MNT_SOFTDEP; 522 MNT_IUNLOCK(mp); 523 } 524 vfs_write_resume(mp, 0); 525 return (error); 526 } 527 528 if (fs->fs_pendingblocks != 0 || 529 fs->fs_pendinginodes != 0) { 530 printf("WARNING: %s Update error: blocks %jd " 531 "files %d\n", fs->fs_fsmnt, 532 (intmax_t)fs->fs_pendingblocks, 533 fs->fs_pendinginodes); 534 fs->fs_pendingblocks = 0; 535 fs->fs_pendinginodes = 0; 536 } 537 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 538 fs->fs_clean = 1; 539 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 540 fs->fs_ronly = 0; 541 fs->fs_clean = 0; 542 if (mounted_softdep) { 543 MNT_ILOCK(mp); 544 mp->mnt_flag |= MNT_SOFTDEP; 545 MNT_IUNLOCK(mp); 546 } 547 vfs_write_resume(mp, 0); 548 return (error); 549 } 550 if (mounted_softdep) 551 softdep_unmount(mp); 552 g_topology_lock(); 553 /* 554 * Drop our write and exclusive access. 555 */ 556 g_access(ump->um_cp, 0, -1, -1); 557 g_topology_unlock(); 558 MNT_ILOCK(mp); 559 mp->mnt_flag |= MNT_RDONLY; 560 MNT_IUNLOCK(mp); 561 /* 562 * Allow the writers to note that filesystem 563 * is ro now. 564 */ 565 vfs_write_resume(mp, 0); 566 } 567 if ((mp->mnt_flag & MNT_RELOAD) && 568 (error = ffs_reload(mp, td, 0)) != 0) 569 return (error); 570 if (fs->fs_ronly && 571 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 572 /* 573 * If we are running a checker, do not allow upgrade. 574 */ 575 if (ump->um_fsckpid > 0) { 576 vfs_mount_error(mp, 577 "Active checker, cannot upgrade to write"); 578 return (EINVAL); 579 } 580 /* 581 * If upgrade to read-write by non-root, then verify 582 * that user has necessary permissions on the device. 583 */ 584 vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY); 585 error = VOP_ACCESS(odevvp, VREAD | VWRITE, 586 td->td_ucred, td); 587 if (error) 588 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 589 VOP_UNLOCK(odevvp); 590 if (error) { 591 return (error); 592 } 593 fs->fs_flags &= ~FS_UNCLEAN; 594 if (fs->fs_clean == 0) { 595 fs->fs_flags |= FS_UNCLEAN; 596 if ((mp->mnt_flag & MNT_FORCE) || 597 ((fs->fs_flags & 598 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 599 (fs->fs_flags & FS_DOSOFTDEP))) { 600 printf("WARNING: %s was not properly " 601 "dismounted\n", fs->fs_fsmnt); 602 } else { 603 vfs_mount_error(mp, 604 "R/W mount of %s denied. %s.%s", 605 fs->fs_fsmnt, 606 "Filesystem is not clean - run fsck", 607 (fs->fs_flags & FS_SUJ) == 0 ? "" : 608 " Forced mount will invalidate" 609 " journal contents"); 610 return (EPERM); 611 } 612 } 613 g_topology_lock(); 614 /* 615 * Request exclusive write access. 616 */ 617 error = g_access(ump->um_cp, 0, 1, 1); 618 g_topology_unlock(); 619 if (error) 620 return (error); 621 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 622 return (error); 623 error = vfs_write_suspend_umnt(mp); 624 if (error != 0) 625 return (error); 626 fs->fs_ronly = 0; 627 MNT_ILOCK(mp); 628 saved_mnt_flag = MNT_RDONLY; 629 if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag & 630 MNT_ASYNC) != 0) 631 saved_mnt_flag |= MNT_ASYNC; 632 mp->mnt_flag &= ~saved_mnt_flag; 633 MNT_IUNLOCK(mp); 634 fs->fs_mtime = time_second; 635 /* check to see if we need to start softdep */ 636 if ((fs->fs_flags & FS_DOSOFTDEP) && 637 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 638 fs->fs_ronly = 1; 639 MNT_ILOCK(mp); 640 mp->mnt_flag |= saved_mnt_flag; 641 MNT_IUNLOCK(mp); 642 vfs_write_resume(mp, 0); 643 return (error); 644 } 645 fs->fs_clean = 0; 646 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 647 fs->fs_ronly = 1; 648 if ((fs->fs_flags & FS_DOSOFTDEP) != 0) 649 softdep_unmount(mp); 650 MNT_ILOCK(mp); 651 mp->mnt_flag |= saved_mnt_flag; 652 MNT_IUNLOCK(mp); 653 vfs_write_resume(mp, 0); 654 return (error); 655 } 656 if (fs->fs_snapinum[0] != 0) 657 ffs_snapshot_mount(mp); 658 vfs_write_resume(mp, 0); 659 } 660 /* 661 * Soft updates is incompatible with "async", 662 * so if we are doing softupdates stop the user 663 * from setting the async flag in an update. 664 * Softdep_mount() clears it in an initial mount 665 * or ro->rw remount. 666 */ 667 if (MOUNTEDSOFTDEP(mp)) { 668 /* XXX: Reset too late ? */ 669 MNT_ILOCK(mp); 670 mp->mnt_flag &= ~MNT_ASYNC; 671 MNT_IUNLOCK(mp); 672 } 673 /* 674 * Keep MNT_ACLS flag if it is stored in superblock. 675 */ 676 if ((fs->fs_flags & FS_ACLS) != 0) { 677 /* XXX: Set too late ? */ 678 MNT_ILOCK(mp); 679 mp->mnt_flag |= MNT_ACLS; 680 MNT_IUNLOCK(mp); 681 } 682 683 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 684 /* XXX: Set too late ? */ 685 MNT_ILOCK(mp); 686 mp->mnt_flag |= MNT_NFS4ACLS; 687 MNT_IUNLOCK(mp); 688 } 689 /* 690 * If this is a request from fsck to clean up the filesystem, 691 * then allow the specified pid to proceed. 692 */ 693 if (fsckpid > 0) { 694 if (ump->um_fsckpid != 0) { 695 vfs_mount_error(mp, 696 "Active checker already running on %s", 697 fs->fs_fsmnt); 698 return (EINVAL); 699 } 700 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 701 ("soft updates enabled on read-only file system")); 702 g_topology_lock(); 703 /* 704 * Request write access. 705 */ 706 error = g_access(ump->um_cp, 0, 1, 0); 707 g_topology_unlock(); 708 if (error) { 709 vfs_mount_error(mp, 710 "Checker activation failed on %s", 711 fs->fs_fsmnt); 712 return (error); 713 } 714 ump->um_fsckpid = fsckpid; 715 if (fs->fs_snapinum[0] != 0) 716 ffs_snapshot_mount(mp); 717 fs->fs_mtime = time_second; 718 fs->fs_fmod = 1; 719 fs->fs_clean = 0; 720 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 721 } 722 723 /* 724 * If this is a snapshot request, take the snapshot. 725 */ 726 if (mp->mnt_flag & MNT_SNAPSHOT) 727 return (ffs_snapshot(mp, fspec)); 728 729 /* 730 * Must not call namei() while owning busy ref. 731 */ 732 vfs_unbusy(mp); 733 } 734 735 /* 736 * Not an update, or updating the name: look up the name 737 * and verify that it refers to a sensible disk device. 738 */ 739 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); 740 error = namei(&ndp); 741 if ((mp->mnt_flag & MNT_UPDATE) != 0) { 742 /* 743 * Unmount does not start if MNT_UPDATE is set. Mount 744 * update busies mp before setting MNT_UPDATE. We 745 * must be able to retain our busy ref succesfully, 746 * without sleep. 747 */ 748 error1 = vfs_busy(mp, MBF_NOWAIT); 749 MPASS(error1 == 0); 750 } 751 if (error != 0) 752 return (error); 753 NDFREE(&ndp, NDF_ONLY_PNBUF); 754 devvp = ndp.ni_vp; 755 if (!vn_isdisk_error(devvp, &error)) { 756 vput(devvp); 757 return (error); 758 } 759 760 /* 761 * If mount by non-root, then verify that user has necessary 762 * permissions on the device. 763 */ 764 accmode = VREAD; 765 if ((mp->mnt_flag & MNT_RDONLY) == 0) 766 accmode |= VWRITE; 767 error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); 768 if (error) 769 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 770 if (error) { 771 vput(devvp); 772 return (error); 773 } 774 775 if (mp->mnt_flag & MNT_UPDATE) { 776 /* 777 * Update only 778 * 779 * If it's not the same vnode, or at least the same device 780 * then it's not correct. 781 */ 782 783 if (devvp->v_rdev != ump->um_devvp->v_rdev) 784 error = EINVAL; /* needs translation */ 785 vput(devvp); 786 if (error) 787 return (error); 788 } else { 789 /* 790 * New mount 791 * 792 * We need the name for the mount point (also used for 793 * "last mounted on") copied in. If an error occurs, 794 * the mount point is discarded by the upper level code. 795 * Note that vfs_mount_alloc() populates f_mntonname for us. 796 */ 797 if ((error = ffs_mountfs(devvp, mp, td)) != 0) { 798 vrele(devvp); 799 return (error); 800 } 801 if (fsckpid > 0) { 802 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 803 ("soft updates enabled on read-only file system")); 804 ump = VFSTOUFS(mp); 805 fs = ump->um_fs; 806 g_topology_lock(); 807 /* 808 * Request write access. 809 */ 810 error = g_access(ump->um_cp, 0, 1, 0); 811 g_topology_unlock(); 812 if (error) { 813 printf("WARNING: %s: Checker activation " 814 "failed\n", fs->fs_fsmnt); 815 } else { 816 ump->um_fsckpid = fsckpid; 817 if (fs->fs_snapinum[0] != 0) 818 ffs_snapshot_mount(mp); 819 fs->fs_mtime = time_second; 820 fs->fs_clean = 0; 821 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 822 } 823 } 824 } 825 826 MNT_ILOCK(mp); 827 /* 828 * This is racy versus lookup, see ufs_fplookup_vexec for details. 829 */ 830 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0) 831 panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp); 832 if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0) 833 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 834 MNT_IUNLOCK(mp); 835 836 vfs_mountedfrom(mp, fspec); 837 return (0); 838 } 839 840 /* 841 * Compatibility with old mount system call. 842 */ 843 844 static int 845 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 846 { 847 struct ufs_args args; 848 int error; 849 850 if (data == NULL) 851 return (EINVAL); 852 error = copyin(data, &args, sizeof args); 853 if (error) 854 return (error); 855 856 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 857 ma = mount_arg(ma, "export", &args.export, sizeof(args.export)); 858 error = kernel_mount(ma, flags); 859 860 return (error); 861 } 862 863 /* 864 * Reload all incore data for a filesystem (used after running fsck on 865 * the root filesystem and finding things to fix). If the 'force' flag 866 * is 0, the filesystem must be mounted read-only. 867 * 868 * Things to do to update the mount: 869 * 1) invalidate all cached meta-data. 870 * 2) re-read superblock from disk. 871 * 3) re-read summary information from disk. 872 * 4) invalidate all inactive vnodes. 873 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary 874 * writers, if requested. 875 * 6) invalidate all cached file data. 876 * 7) re-read inode data for all active vnodes. 877 */ 878 int 879 ffs_reload(struct mount *mp, struct thread *td, int flags) 880 { 881 struct vnode *vp, *mvp, *devvp; 882 struct inode *ip; 883 void *space; 884 struct buf *bp; 885 struct fs *fs, *newfs; 886 struct ufsmount *ump; 887 ufs2_daddr_t sblockloc; 888 int i, blks, error; 889 u_long size; 890 int32_t *lp; 891 892 ump = VFSTOUFS(mp); 893 894 MNT_ILOCK(mp); 895 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 896 MNT_IUNLOCK(mp); 897 return (EINVAL); 898 } 899 MNT_IUNLOCK(mp); 900 901 /* 902 * Step 1: invalidate all cached meta-data. 903 */ 904 devvp = VFSTOUFS(mp)->um_devvp; 905 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 906 if (vinvalbuf(devvp, 0, 0, 0) != 0) 907 panic("ffs_reload: dirty1"); 908 VOP_UNLOCK(devvp); 909 910 /* 911 * Step 2: re-read superblock from disk. 912 */ 913 fs = VFSTOUFS(mp)->um_fs; 914 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 915 NOCRED, &bp)) != 0) 916 return (error); 917 newfs = (struct fs *)bp->b_data; 918 if ((newfs->fs_magic != FS_UFS1_MAGIC && 919 newfs->fs_magic != FS_UFS2_MAGIC) || 920 newfs->fs_bsize > MAXBSIZE || 921 newfs->fs_bsize < sizeof(struct fs)) { 922 brelse(bp); 923 return (EIO); /* XXX needs translation */ 924 } 925 /* 926 * Preserve the summary information, read-only status, and 927 * superblock location by copying these fields into our new 928 * superblock before using it to update the existing superblock. 929 */ 930 newfs->fs_si = fs->fs_si; 931 newfs->fs_ronly = fs->fs_ronly; 932 sblockloc = fs->fs_sblockloc; 933 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 934 brelse(bp); 935 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 936 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 937 UFS_LOCK(ump); 938 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 939 printf("WARNING: %s: reload pending error: blocks %jd " 940 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 941 fs->fs_pendinginodes); 942 fs->fs_pendingblocks = 0; 943 fs->fs_pendinginodes = 0; 944 } 945 UFS_UNLOCK(ump); 946 947 /* 948 * Step 3: re-read summary information from disk. 949 */ 950 size = fs->fs_cssize; 951 blks = howmany(size, fs->fs_fsize); 952 if (fs->fs_contigsumsize > 0) 953 size += fs->fs_ncg * sizeof(int32_t); 954 size += fs->fs_ncg * sizeof(u_int8_t); 955 free(fs->fs_csp, M_UFSMNT); 956 space = malloc(size, M_UFSMNT, M_WAITOK); 957 fs->fs_csp = space; 958 for (i = 0; i < blks; i += fs->fs_frag) { 959 size = fs->fs_bsize; 960 if (i + fs->fs_frag > blks) 961 size = (blks - i) * fs->fs_fsize; 962 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 963 NOCRED, &bp); 964 if (error) 965 return (error); 966 bcopy(bp->b_data, space, (u_int)size); 967 space = (char *)space + size; 968 brelse(bp); 969 } 970 /* 971 * We no longer know anything about clusters per cylinder group. 972 */ 973 if (fs->fs_contigsumsize > 0) { 974 fs->fs_maxcluster = lp = space; 975 for (i = 0; i < fs->fs_ncg; i++) 976 *lp++ = fs->fs_contigsumsize; 977 space = lp; 978 } 979 size = fs->fs_ncg * sizeof(u_int8_t); 980 fs->fs_contigdirs = (u_int8_t *)space; 981 bzero(fs->fs_contigdirs, size); 982 if ((flags & FFSR_UNSUSPEND) != 0) { 983 MNT_ILOCK(mp); 984 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 985 wakeup(&mp->mnt_flag); 986 MNT_IUNLOCK(mp); 987 } 988 989 loop: 990 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 991 /* 992 * Skip syncer vnode. 993 */ 994 if (vp->v_type == VNON) { 995 VI_UNLOCK(vp); 996 continue; 997 } 998 /* 999 * Step 4: invalidate all cached file data. 1000 */ 1001 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 1002 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1003 goto loop; 1004 } 1005 if (vinvalbuf(vp, 0, 0, 0)) 1006 panic("ffs_reload: dirty2"); 1007 /* 1008 * Step 5: re-read inode data for all active vnodes. 1009 */ 1010 ip = VTOI(vp); 1011 error = 1012 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 1013 (int)fs->fs_bsize, NOCRED, &bp); 1014 if (error) { 1015 vput(vp); 1016 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1017 return (error); 1018 } 1019 if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) { 1020 brelse(bp); 1021 vput(vp); 1022 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1023 return (error); 1024 } 1025 ip->i_effnlink = ip->i_nlink; 1026 brelse(bp); 1027 vput(vp); 1028 } 1029 return (0); 1030 } 1031 1032 /* 1033 * Common code for mount and mountroot 1034 */ 1035 static int 1036 ffs_mountfs(odevvp, mp, td) 1037 struct vnode *odevvp; 1038 struct mount *mp; 1039 struct thread *td; 1040 { 1041 struct ufsmount *ump; 1042 struct fs *fs; 1043 struct cdev *dev; 1044 int error, i, len, ronly; 1045 struct ucred *cred; 1046 struct g_consumer *cp; 1047 struct mount *nmp; 1048 struct vnode *devvp; 1049 struct fsfail_task *etp; 1050 int candelete, canspeedup; 1051 off_t loc; 1052 1053 fs = NULL; 1054 ump = NULL; 1055 cred = td ? td->td_ucred : NOCRED; 1056 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 1057 1058 devvp = mntfs_allocvp(mp, odevvp); 1059 VOP_UNLOCK(odevvp); 1060 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 1061 dev = devvp->v_rdev; 1062 KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data")); 1063 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 1064 (uintptr_t)mp) == 0) { 1065 mntfs_freevp(devvp); 1066 return (EBUSY); 1067 } 1068 g_topology_lock(); 1069 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 1070 g_topology_unlock(); 1071 if (error != 0) { 1072 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1073 mntfs_freevp(devvp); 1074 return (error); 1075 } 1076 dev_ref(dev); 1077 devvp->v_bufobj.bo_ops = &ffs_ops; 1078 BO_LOCK(&odevvp->v_bufobj); 1079 odevvp->v_bufobj.bo_flag |= BO_NOBUFS; 1080 BO_UNLOCK(&odevvp->v_bufobj); 1081 if (dev->si_iosize_max != 0) 1082 mp->mnt_iosize_max = dev->si_iosize_max; 1083 if (mp->mnt_iosize_max > maxphys) 1084 mp->mnt_iosize_max = maxphys; 1085 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 1086 error = EINVAL; 1087 vfs_mount_error(mp, 1088 "Invalid sectorsize %d for superblock size %d", 1089 cp->provider->sectorsize, SBLOCKSIZE); 1090 goto out; 1091 } 1092 /* fetch the superblock and summary information */ 1093 loc = STDSB; 1094 if ((mp->mnt_flag & MNT_ROOTFS) != 0) 1095 loc = STDSB_NOHASHFAIL; 1096 if ((error = ffs_sbget(devvp, &fs, loc, M_UFSMNT, ffs_use_bread)) != 0) 1097 goto out; 1098 fs->fs_flags &= ~FS_UNCLEAN; 1099 if (fs->fs_clean == 0) { 1100 fs->fs_flags |= FS_UNCLEAN; 1101 if (ronly || (mp->mnt_flag & MNT_FORCE) || 1102 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 1103 (fs->fs_flags & FS_DOSOFTDEP))) { 1104 printf("WARNING: %s was not properly dismounted\n", 1105 fs->fs_fsmnt); 1106 } else { 1107 vfs_mount_error(mp, "R/W mount of %s denied. %s%s", 1108 fs->fs_fsmnt, "Filesystem is not clean - run fsck.", 1109 (fs->fs_flags & FS_SUJ) == 0 ? "" : 1110 " Forced mount will invalidate journal contents"); 1111 error = EPERM; 1112 goto out; 1113 } 1114 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 1115 (mp->mnt_flag & MNT_FORCE)) { 1116 printf("WARNING: %s: lost blocks %jd files %d\n", 1117 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1118 fs->fs_pendinginodes); 1119 fs->fs_pendingblocks = 0; 1120 fs->fs_pendinginodes = 0; 1121 } 1122 } 1123 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1124 printf("WARNING: %s: mount pending error: blocks %jd " 1125 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1126 fs->fs_pendinginodes); 1127 fs->fs_pendingblocks = 0; 1128 fs->fs_pendinginodes = 0; 1129 } 1130 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 1131 #ifdef UFS_GJOURNAL 1132 /* 1133 * Get journal provider name. 1134 */ 1135 len = 1024; 1136 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); 1137 if (g_io_getattr("GJOURNAL::provider", cp, &len, 1138 mp->mnt_gjprovider) == 0) { 1139 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 1140 M_UFSMNT, M_WAITOK); 1141 MNT_ILOCK(mp); 1142 mp->mnt_flag |= MNT_GJOURNAL; 1143 MNT_IUNLOCK(mp); 1144 } else { 1145 printf("WARNING: %s: GJOURNAL flag on fs " 1146 "but no gjournal provider below\n", 1147 mp->mnt_stat.f_mntonname); 1148 free(mp->mnt_gjprovider, M_UFSMNT); 1149 mp->mnt_gjprovider = NULL; 1150 } 1151 #else 1152 printf("WARNING: %s: GJOURNAL flag on fs but no " 1153 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 1154 #endif 1155 } else { 1156 mp->mnt_gjprovider = NULL; 1157 } 1158 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 1159 ump->um_cp = cp; 1160 ump->um_bo = &devvp->v_bufobj; 1161 ump->um_fs = fs; 1162 if (fs->fs_magic == FS_UFS1_MAGIC) { 1163 ump->um_fstype = UFS1; 1164 ump->um_balloc = ffs_balloc_ufs1; 1165 } else { 1166 ump->um_fstype = UFS2; 1167 ump->um_balloc = ffs_balloc_ufs2; 1168 } 1169 ump->um_blkatoff = ffs_blkatoff; 1170 ump->um_truncate = ffs_truncate; 1171 ump->um_update = ffs_update; 1172 ump->um_valloc = ffs_valloc; 1173 ump->um_vfree = ffs_vfree; 1174 ump->um_ifree = ffs_ifree; 1175 ump->um_rdonly = ffs_rdonly; 1176 ump->um_snapgone = ffs_snapgone; 1177 if ((mp->mnt_flag & MNT_UNTRUSTED) != 0) 1178 ump->um_check_blkno = ffs_check_blkno; 1179 else 1180 ump->um_check_blkno = NULL; 1181 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 1182 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc); 1183 fs->fs_ronly = ronly; 1184 fs->fs_active = NULL; 1185 mp->mnt_data = ump; 1186 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 1187 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 1188 nmp = NULL; 1189 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 1190 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 1191 if (nmp) 1192 vfs_rel(nmp); 1193 vfs_getnewfsid(mp); 1194 } 1195 ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 1196 MNT_ILOCK(mp); 1197 mp->mnt_flag |= MNT_LOCAL; 1198 MNT_IUNLOCK(mp); 1199 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 1200 #ifdef MAC 1201 MNT_ILOCK(mp); 1202 mp->mnt_flag |= MNT_MULTILABEL; 1203 MNT_IUNLOCK(mp); 1204 #else 1205 printf("WARNING: %s: multilabel flag on fs but " 1206 "no MAC support\n", mp->mnt_stat.f_mntonname); 1207 #endif 1208 } 1209 if ((fs->fs_flags & FS_ACLS) != 0) { 1210 #ifdef UFS_ACL 1211 MNT_ILOCK(mp); 1212 1213 if (mp->mnt_flag & MNT_NFS4ACLS) 1214 printf("WARNING: %s: ACLs flag on fs conflicts with " 1215 "\"nfsv4acls\" mount option; option ignored\n", 1216 mp->mnt_stat.f_mntonname); 1217 mp->mnt_flag &= ~MNT_NFS4ACLS; 1218 mp->mnt_flag |= MNT_ACLS; 1219 1220 MNT_IUNLOCK(mp); 1221 #else 1222 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 1223 mp->mnt_stat.f_mntonname); 1224 #endif 1225 } 1226 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 1227 #ifdef UFS_ACL 1228 MNT_ILOCK(mp); 1229 1230 if (mp->mnt_flag & MNT_ACLS) 1231 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 1232 "with \"acls\" mount option; option ignored\n", 1233 mp->mnt_stat.f_mntonname); 1234 mp->mnt_flag &= ~MNT_ACLS; 1235 mp->mnt_flag |= MNT_NFS4ACLS; 1236 1237 MNT_IUNLOCK(mp); 1238 #else 1239 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 1240 "ACLs support\n", mp->mnt_stat.f_mntonname); 1241 #endif 1242 } 1243 if ((fs->fs_flags & FS_TRIM) != 0) { 1244 len = sizeof(int); 1245 if (g_io_getattr("GEOM::candelete", cp, &len, 1246 &candelete) == 0) { 1247 if (candelete) 1248 ump->um_flags |= UM_CANDELETE; 1249 else 1250 printf("WARNING: %s: TRIM flag on fs but disk " 1251 "does not support TRIM\n", 1252 mp->mnt_stat.f_mntonname); 1253 } else { 1254 printf("WARNING: %s: TRIM flag on fs but disk does " 1255 "not confirm that it supports TRIM\n", 1256 mp->mnt_stat.f_mntonname); 1257 } 1258 if (((ump->um_flags) & UM_CANDELETE) != 0) { 1259 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 1260 taskqueue_thread_enqueue, &ump->um_trim_tq); 1261 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 1262 "%s trim", mp->mnt_stat.f_mntonname); 1263 ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM, 1264 &ump->um_trimlisthashsize); 1265 } 1266 } 1267 1268 len = sizeof(int); 1269 if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) { 1270 if (canspeedup) 1271 ump->um_flags |= UM_CANSPEEDUP; 1272 } 1273 1274 ump->um_mountp = mp; 1275 ump->um_dev = dev; 1276 ump->um_devvp = devvp; 1277 ump->um_odevvp = odevvp; 1278 ump->um_nindir = fs->fs_nindir; 1279 ump->um_bptrtodb = fs->fs_fsbtodb; 1280 ump->um_seqinc = fs->fs_frag; 1281 for (i = 0; i < MAXQUOTAS; i++) 1282 ump->um_quotas[i] = NULLVP; 1283 #ifdef UFS_EXTATTR 1284 ufs_extattr_uepm_init(&ump->um_extattr); 1285 #endif 1286 /* 1287 * Set FS local "last mounted on" information (NULL pad) 1288 */ 1289 bzero(fs->fs_fsmnt, MAXMNTLEN); 1290 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1291 mp->mnt_stat.f_iosize = fs->fs_bsize; 1292 1293 if (mp->mnt_flag & MNT_ROOTFS) { 1294 /* 1295 * Root mount; update timestamp in mount structure. 1296 * this will be used by the common root mount code 1297 * to update the system clock. 1298 */ 1299 mp->mnt_time = fs->fs_time; 1300 } 1301 1302 if (ronly == 0) { 1303 fs->fs_mtime = time_second; 1304 if ((fs->fs_flags & FS_DOSOFTDEP) && 1305 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1306 ffs_flushfiles(mp, FORCECLOSE, td); 1307 goto out; 1308 } 1309 if (fs->fs_snapinum[0] != 0) 1310 ffs_snapshot_mount(mp); 1311 fs->fs_fmod = 1; 1312 fs->fs_clean = 0; 1313 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1314 } 1315 /* 1316 * Initialize filesystem state information in mount struct. 1317 */ 1318 MNT_ILOCK(mp); 1319 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1320 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1321 MNT_IUNLOCK(mp); 1322 #ifdef UFS_EXTATTR 1323 #ifdef UFS_EXTATTR_AUTOSTART 1324 /* 1325 * 1326 * Auto-starting does the following: 1327 * - check for /.attribute in the fs, and extattr_start if so 1328 * - for each file in .attribute, enable that file with 1329 * an attribute of the same name. 1330 * Not clear how to report errors -- probably eat them. 1331 * This would all happen while the filesystem was busy/not 1332 * available, so would effectively be "atomic". 1333 */ 1334 (void) ufs_extattr_autostart(mp, td); 1335 #endif /* !UFS_EXTATTR_AUTOSTART */ 1336 #endif /* !UFS_EXTATTR */ 1337 etp = malloc(sizeof *ump->um_fsfail_task, M_UFSMNT, M_WAITOK | M_ZERO); 1338 etp->fsid = mp->mnt_stat.f_fsid; 1339 ump->um_fsfail_task = etp; 1340 return (0); 1341 out: 1342 if (fs != NULL) { 1343 free(fs->fs_csp, M_UFSMNT); 1344 free(fs->fs_si, M_UFSMNT); 1345 free(fs, M_UFSMNT); 1346 } 1347 if (cp != NULL) { 1348 g_topology_lock(); 1349 g_vfs_close(cp); 1350 g_topology_unlock(); 1351 } 1352 if (ump) { 1353 mtx_destroy(UFS_MTX(ump)); 1354 if (mp->mnt_gjprovider != NULL) { 1355 free(mp->mnt_gjprovider, M_UFSMNT); 1356 mp->mnt_gjprovider = NULL; 1357 } 1358 MPASS(ump->um_softdep == NULL); 1359 free(ump, M_UFSMNT); 1360 mp->mnt_data = NULL; 1361 } 1362 BO_LOCK(&odevvp->v_bufobj); 1363 odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1364 BO_UNLOCK(&odevvp->v_bufobj); 1365 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1366 mntfs_freevp(devvp); 1367 dev_rel(dev); 1368 return (error); 1369 } 1370 1371 /* 1372 * A read function for use by filesystem-layer routines. 1373 */ 1374 static int 1375 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) 1376 { 1377 struct buf *bp; 1378 int error; 1379 1380 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); 1381 *bufp = malloc(size, M_UFSMNT, M_WAITOK); 1382 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, 1383 &bp)) != 0) 1384 return (error); 1385 bcopy(bp->b_data, *bufp, size); 1386 bp->b_flags |= B_INVAL | B_NOCACHE; 1387 brelse(bp); 1388 return (0); 1389 } 1390 1391 static int bigcgs = 0; 1392 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1393 1394 /* 1395 * Sanity checks for loading old filesystem superblocks. 1396 * See ffs_oldfscompat_write below for unwound actions. 1397 * 1398 * XXX - Parts get retired eventually. 1399 * Unfortunately new bits get added. 1400 */ 1401 static void 1402 ffs_oldfscompat_read(fs, ump, sblockloc) 1403 struct fs *fs; 1404 struct ufsmount *ump; 1405 ufs2_daddr_t sblockloc; 1406 { 1407 off_t maxfilesize; 1408 1409 /* 1410 * If not yet done, update fs_flags location and value of fs_sblockloc. 1411 */ 1412 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1413 fs->fs_flags = fs->fs_old_flags; 1414 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1415 fs->fs_sblockloc = sblockloc; 1416 } 1417 /* 1418 * If not yet done, update UFS1 superblock with new wider fields. 1419 */ 1420 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1421 fs->fs_maxbsize = fs->fs_bsize; 1422 fs->fs_time = fs->fs_old_time; 1423 fs->fs_size = fs->fs_old_size; 1424 fs->fs_dsize = fs->fs_old_dsize; 1425 fs->fs_csaddr = fs->fs_old_csaddr; 1426 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1427 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1428 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1429 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1430 } 1431 if (fs->fs_magic == FS_UFS1_MAGIC && 1432 fs->fs_old_inodefmt < FS_44INODEFMT) { 1433 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1434 fs->fs_qbmask = ~fs->fs_bmask; 1435 fs->fs_qfmask = ~fs->fs_fmask; 1436 } 1437 if (fs->fs_magic == FS_UFS1_MAGIC) { 1438 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1439 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1440 if (fs->fs_maxfilesize > maxfilesize) 1441 fs->fs_maxfilesize = maxfilesize; 1442 } 1443 /* Compatibility for old filesystems */ 1444 if (fs->fs_avgfilesize <= 0) 1445 fs->fs_avgfilesize = AVFILESIZ; 1446 if (fs->fs_avgfpdir <= 0) 1447 fs->fs_avgfpdir = AFPDIR; 1448 if (bigcgs) { 1449 fs->fs_save_cgsize = fs->fs_cgsize; 1450 fs->fs_cgsize = fs->fs_bsize; 1451 } 1452 } 1453 1454 /* 1455 * Unwinding superblock updates for old filesystems. 1456 * See ffs_oldfscompat_read above for details. 1457 * 1458 * XXX - Parts get retired eventually. 1459 * Unfortunately new bits get added. 1460 */ 1461 void 1462 ffs_oldfscompat_write(fs, ump) 1463 struct fs *fs; 1464 struct ufsmount *ump; 1465 { 1466 1467 /* 1468 * Copy back UFS2 updated fields that UFS1 inspects. 1469 */ 1470 if (fs->fs_magic == FS_UFS1_MAGIC) { 1471 fs->fs_old_time = fs->fs_time; 1472 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1473 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1474 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1475 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1476 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1477 } 1478 if (bigcgs) { 1479 fs->fs_cgsize = fs->fs_save_cgsize; 1480 fs->fs_save_cgsize = 0; 1481 } 1482 } 1483 1484 /* 1485 * unmount system call 1486 */ 1487 static int 1488 ffs_unmount(mp, mntflags) 1489 struct mount *mp; 1490 int mntflags; 1491 { 1492 struct thread *td; 1493 struct ufsmount *ump = VFSTOUFS(mp); 1494 struct fs *fs; 1495 int error, flags, susp; 1496 #ifdef UFS_EXTATTR 1497 int e_restart; 1498 #endif 1499 1500 flags = 0; 1501 td = curthread; 1502 fs = ump->um_fs; 1503 if (mntflags & MNT_FORCE) 1504 flags |= FORCECLOSE; 1505 susp = fs->fs_ronly == 0; 1506 #ifdef UFS_EXTATTR 1507 if ((error = ufs_extattr_stop(mp, td))) { 1508 if (error != EOPNOTSUPP) 1509 printf("WARNING: unmount %s: ufs_extattr_stop " 1510 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1511 error); 1512 e_restart = 0; 1513 } else { 1514 ufs_extattr_uepm_destroy(&ump->um_extattr); 1515 e_restart = 1; 1516 } 1517 #endif 1518 if (susp) { 1519 error = vfs_write_suspend_umnt(mp); 1520 if (error != 0) 1521 goto fail1; 1522 } 1523 if (MOUNTEDSOFTDEP(mp)) 1524 error = softdep_flushfiles(mp, flags, td); 1525 else 1526 error = ffs_flushfiles(mp, flags, td); 1527 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) 1528 goto fail; 1529 1530 UFS_LOCK(ump); 1531 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1532 printf("WARNING: unmount %s: pending error: blocks %jd " 1533 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1534 fs->fs_pendinginodes); 1535 fs->fs_pendingblocks = 0; 1536 fs->fs_pendinginodes = 0; 1537 } 1538 UFS_UNLOCK(ump); 1539 if (MOUNTEDSOFTDEP(mp)) 1540 softdep_unmount(mp); 1541 MPASS(ump->um_softdep == NULL); 1542 if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) { 1543 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1544 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1545 if (ffs_fsfail_cleanup(ump, error)) 1546 error = 0; 1547 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) { 1548 fs->fs_clean = 0; 1549 goto fail; 1550 } 1551 } 1552 if (susp) 1553 vfs_write_resume(mp, VR_START_WRITE); 1554 if (ump->um_trim_tq != NULL) { 1555 while (ump->um_trim_inflight != 0) 1556 pause("ufsutr", hz); 1557 taskqueue_drain_all(ump->um_trim_tq); 1558 taskqueue_free(ump->um_trim_tq); 1559 free (ump->um_trimhash, M_TRIM); 1560 } 1561 g_topology_lock(); 1562 if (ump->um_fsckpid > 0) { 1563 /* 1564 * Return to normal read-only mode. 1565 */ 1566 error = g_access(ump->um_cp, 0, -1, 0); 1567 ump->um_fsckpid = 0; 1568 } 1569 g_vfs_close(ump->um_cp); 1570 g_topology_unlock(); 1571 BO_LOCK(&ump->um_odevvp->v_bufobj); 1572 ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1573 BO_UNLOCK(&ump->um_odevvp->v_bufobj); 1574 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1575 mntfs_freevp(ump->um_devvp); 1576 vrele(ump->um_odevvp); 1577 dev_rel(ump->um_dev); 1578 mtx_destroy(UFS_MTX(ump)); 1579 if (mp->mnt_gjprovider != NULL) { 1580 free(mp->mnt_gjprovider, M_UFSMNT); 1581 mp->mnt_gjprovider = NULL; 1582 } 1583 free(fs->fs_csp, M_UFSMNT); 1584 free(fs->fs_si, M_UFSMNT); 1585 free(fs, M_UFSMNT); 1586 if (ump->um_fsfail_task != NULL) 1587 free(ump->um_fsfail_task, M_UFSMNT); 1588 free(ump, M_UFSMNT); 1589 mp->mnt_data = NULL; 1590 MNT_ILOCK(mp); 1591 mp->mnt_flag &= ~MNT_LOCAL; 1592 MNT_IUNLOCK(mp); 1593 if (td->td_su == mp) { 1594 td->td_su = NULL; 1595 vfs_rel(mp); 1596 } 1597 return (error); 1598 1599 fail: 1600 if (susp) 1601 vfs_write_resume(mp, VR_START_WRITE); 1602 fail1: 1603 #ifdef UFS_EXTATTR 1604 if (e_restart) { 1605 ufs_extattr_uepm_init(&ump->um_extattr); 1606 #ifdef UFS_EXTATTR_AUTOSTART 1607 (void) ufs_extattr_autostart(mp, td); 1608 #endif 1609 } 1610 #endif 1611 1612 return (error); 1613 } 1614 1615 /* 1616 * Flush out all the files in a filesystem. 1617 */ 1618 int 1619 ffs_flushfiles(mp, flags, td) 1620 struct mount *mp; 1621 int flags; 1622 struct thread *td; 1623 { 1624 struct ufsmount *ump; 1625 int qerror, error; 1626 1627 ump = VFSTOUFS(mp); 1628 qerror = 0; 1629 #ifdef QUOTA 1630 if (mp->mnt_flag & MNT_QUOTA) { 1631 int i; 1632 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1633 if (error) 1634 return (error); 1635 for (i = 0; i < MAXQUOTAS; i++) { 1636 error = quotaoff(td, mp, i); 1637 if (error != 0) { 1638 if ((flags & EARLYFLUSH) == 0) 1639 return (error); 1640 else 1641 qerror = error; 1642 } 1643 } 1644 1645 /* 1646 * Here we fall through to vflush again to ensure that 1647 * we have gotten rid of all the system vnodes, unless 1648 * quotas must not be closed. 1649 */ 1650 } 1651 #endif 1652 ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles"); 1653 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1654 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1655 return (error); 1656 ffs_snapshot_unmount(mp); 1657 flags |= FORCECLOSE; 1658 /* 1659 * Here we fall through to vflush again to ensure 1660 * that we have gotten rid of all the system vnodes. 1661 */ 1662 } 1663 1664 /* 1665 * Do not close system files if quotas were not closed, to be 1666 * able to sync the remaining dquots. The freeblks softupdate 1667 * workitems might hold a reference on a dquot, preventing 1668 * quotaoff() from completing. Next round of 1669 * softdep_flushworklist() iteration should process the 1670 * blockers, allowing the next run of quotaoff() to finally 1671 * flush held dquots. 1672 * 1673 * Otherwise, flush all the files. 1674 */ 1675 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1676 return (error); 1677 1678 /* 1679 * Flush filesystem metadata. 1680 */ 1681 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1682 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1683 VOP_UNLOCK(ump->um_devvp); 1684 return (error); 1685 } 1686 1687 /* 1688 * Get filesystem statistics. 1689 */ 1690 static int 1691 ffs_statfs(mp, sbp) 1692 struct mount *mp; 1693 struct statfs *sbp; 1694 { 1695 struct ufsmount *ump; 1696 struct fs *fs; 1697 1698 ump = VFSTOUFS(mp); 1699 fs = ump->um_fs; 1700 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1701 panic("ffs_statfs"); 1702 sbp->f_version = STATFS_VERSION; 1703 sbp->f_bsize = fs->fs_fsize; 1704 sbp->f_iosize = fs->fs_bsize; 1705 sbp->f_blocks = fs->fs_dsize; 1706 UFS_LOCK(ump); 1707 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1708 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1709 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1710 dbtofsb(fs, fs->fs_pendingblocks); 1711 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; 1712 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1713 UFS_UNLOCK(ump); 1714 sbp->f_namemax = UFS_MAXNAMLEN; 1715 return (0); 1716 } 1717 1718 static bool 1719 sync_doupdate(struct inode *ip) 1720 { 1721 1722 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1723 IN_UPDATE)) != 0); 1724 } 1725 1726 static int 1727 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused) 1728 { 1729 struct inode *ip; 1730 1731 /* 1732 * Flags are safe to access because ->v_data invalidation 1733 * is held off by listmtx. 1734 */ 1735 if (vp->v_type == VNON) 1736 return (false); 1737 ip = VTOI(vp); 1738 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) 1739 return (false); 1740 return (true); 1741 } 1742 1743 /* 1744 * For a lazy sync, we only care about access times, quotas and the 1745 * superblock. Other filesystem changes are already converted to 1746 * cylinder group blocks or inode blocks updates and are written to 1747 * disk by syncer. 1748 */ 1749 static int 1750 ffs_sync_lazy(mp) 1751 struct mount *mp; 1752 { 1753 struct vnode *mvp, *vp; 1754 struct inode *ip; 1755 struct thread *td; 1756 int allerror, error; 1757 1758 allerror = 0; 1759 td = curthread; 1760 if ((mp->mnt_flag & MNT_NOATIME) != 0) { 1761 #ifdef QUOTA 1762 qsync(mp); 1763 #endif 1764 goto sbupdate; 1765 } 1766 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) { 1767 if (vp->v_type == VNON) { 1768 VI_UNLOCK(vp); 1769 continue; 1770 } 1771 ip = VTOI(vp); 1772 1773 /* 1774 * The IN_ACCESS flag is converted to IN_MODIFIED by 1775 * ufs_close() and ufs_getattr() by the calls to 1776 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1777 * Test also all the other timestamp flags too, to pick up 1778 * any other cases that could be missed. 1779 */ 1780 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1781 VI_UNLOCK(vp); 1782 continue; 1783 } 1784 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0) 1785 continue; 1786 #ifdef QUOTA 1787 qsyncvp(vp); 1788 #endif 1789 if (sync_doupdate(ip)) 1790 error = ffs_update(vp, 0); 1791 if (error != 0) 1792 allerror = error; 1793 vput(vp); 1794 } 1795 sbupdate: 1796 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1797 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1798 allerror = error; 1799 return (allerror); 1800 } 1801 1802 /* 1803 * Go through the disk queues to initiate sandbagged IO; 1804 * go through the inodes to write those that have been modified; 1805 * initiate the writing of the super block if it has been modified. 1806 * 1807 * Note: we are always called with the filesystem marked busy using 1808 * vfs_busy(). 1809 */ 1810 static int 1811 ffs_sync(mp, waitfor) 1812 struct mount *mp; 1813 int waitfor; 1814 { 1815 struct vnode *mvp, *vp, *devvp; 1816 struct thread *td; 1817 struct inode *ip; 1818 struct ufsmount *ump = VFSTOUFS(mp); 1819 struct fs *fs; 1820 int error, count, lockreq, allerror = 0; 1821 int suspend; 1822 int suspended; 1823 int secondary_writes; 1824 int secondary_accwrites; 1825 int softdep_deps; 1826 int softdep_accdeps; 1827 struct bufobj *bo; 1828 1829 suspend = 0; 1830 suspended = 0; 1831 td = curthread; 1832 fs = ump->um_fs; 1833 if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0) 1834 panic("%s: ffs_sync: modification on read-only filesystem", 1835 fs->fs_fsmnt); 1836 if (waitfor == MNT_LAZY) { 1837 if (!rebooting) 1838 return (ffs_sync_lazy(mp)); 1839 waitfor = MNT_NOWAIT; 1840 } 1841 1842 /* 1843 * Write back each (modified) inode. 1844 */ 1845 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1846 if (waitfor == MNT_SUSPEND) { 1847 suspend = 1; 1848 waitfor = MNT_WAIT; 1849 } 1850 if (waitfor == MNT_WAIT) 1851 lockreq = LK_EXCLUSIVE; 1852 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1853 loop: 1854 /* Grab snapshot of secondary write counts */ 1855 MNT_ILOCK(mp); 1856 secondary_writes = mp->mnt_secondary_writes; 1857 secondary_accwrites = mp->mnt_secondary_accwrites; 1858 MNT_IUNLOCK(mp); 1859 1860 /* Grab snapshot of softdep dependency counts */ 1861 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1862 1863 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1864 /* 1865 * Depend on the vnode interlock to keep things stable enough 1866 * for a quick test. Since there might be hundreds of 1867 * thousands of vnodes, we cannot afford even a subroutine 1868 * call unless there's a good chance that we have work to do. 1869 */ 1870 if (vp->v_type == VNON) { 1871 VI_UNLOCK(vp); 1872 continue; 1873 } 1874 ip = VTOI(vp); 1875 if ((ip->i_flag & 1876 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1877 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1878 VI_UNLOCK(vp); 1879 continue; 1880 } 1881 if ((error = vget(vp, lockreq)) != 0) { 1882 if (error == ENOENT || error == ENOLCK) { 1883 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1884 goto loop; 1885 } 1886 continue; 1887 } 1888 #ifdef QUOTA 1889 qsyncvp(vp); 1890 #endif 1891 for (;;) { 1892 error = ffs_syncvnode(vp, waitfor, 0); 1893 if (error == ERELOOKUP) 1894 continue; 1895 if (error != 0) 1896 allerror = error; 1897 break; 1898 } 1899 vput(vp); 1900 } 1901 /* 1902 * Force stale filesystem control information to be flushed. 1903 */ 1904 if (waitfor == MNT_WAIT || rebooting) { 1905 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1906 allerror = error; 1907 if (ffs_fsfail_cleanup(ump, allerror)) 1908 allerror = 0; 1909 /* Flushed work items may create new vnodes to clean */ 1910 if (allerror == 0 && count) 1911 goto loop; 1912 } 1913 1914 devvp = ump->um_devvp; 1915 bo = &devvp->v_bufobj; 1916 BO_LOCK(bo); 1917 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1918 BO_UNLOCK(bo); 1919 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1920 error = VOP_FSYNC(devvp, waitfor, td); 1921 VOP_UNLOCK(devvp); 1922 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1923 error = ffs_sbupdate(ump, waitfor, 0); 1924 if (error != 0) 1925 allerror = error; 1926 if (ffs_fsfail_cleanup(ump, allerror)) 1927 allerror = 0; 1928 if (allerror == 0 && waitfor == MNT_WAIT) 1929 goto loop; 1930 } else if (suspend != 0) { 1931 if (softdep_check_suspend(mp, 1932 devvp, 1933 softdep_deps, 1934 softdep_accdeps, 1935 secondary_writes, 1936 secondary_accwrites) != 0) { 1937 MNT_IUNLOCK(mp); 1938 goto loop; /* More work needed */ 1939 } 1940 mtx_assert(MNT_MTX(mp), MA_OWNED); 1941 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1942 MNT_IUNLOCK(mp); 1943 suspended = 1; 1944 } else 1945 BO_UNLOCK(bo); 1946 /* 1947 * Write back modified superblock. 1948 */ 1949 if (fs->fs_fmod != 0 && 1950 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1951 allerror = error; 1952 if (ffs_fsfail_cleanup(ump, allerror)) 1953 allerror = 0; 1954 return (allerror); 1955 } 1956 1957 int 1958 ffs_vget(mp, ino, flags, vpp) 1959 struct mount *mp; 1960 ino_t ino; 1961 int flags; 1962 struct vnode **vpp; 1963 { 1964 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1965 } 1966 1967 int 1968 ffs_vgetf(mp, ino, flags, vpp, ffs_flags) 1969 struct mount *mp; 1970 ino_t ino; 1971 int flags; 1972 struct vnode **vpp; 1973 int ffs_flags; 1974 { 1975 struct fs *fs; 1976 struct inode *ip; 1977 struct ufsmount *ump; 1978 struct buf *bp; 1979 struct vnode *vp; 1980 daddr_t dbn; 1981 int error; 1982 1983 MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 || 1984 (flags & LK_EXCLUSIVE) != 0); 1985 1986 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1987 if (error != 0) 1988 return (error); 1989 if (*vpp != NULL) { 1990 if ((ffs_flags & FFSV_REPLACE) == 0 || 1991 ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 || 1992 !VN_IS_DOOMED(*vpp))) 1993 return (0); 1994 vgone(*vpp); 1995 vput(*vpp); 1996 } 1997 1998 /* 1999 * We must promote to an exclusive lock for vnode creation. This 2000 * can happen if lookup is passed LOCKSHARED. 2001 */ 2002 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 2003 flags &= ~LK_TYPE_MASK; 2004 flags |= LK_EXCLUSIVE; 2005 } 2006 2007 /* 2008 * We do not lock vnode creation as it is believed to be too 2009 * expensive for such rare case as simultaneous creation of vnode 2010 * for same ino by different processes. We just allow them to race 2011 * and check later to decide who wins. Let the race begin! 2012 */ 2013 2014 ump = VFSTOUFS(mp); 2015 fs = ump->um_fs; 2016 ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO); 2017 2018 /* Allocate a new vnode/inode. */ 2019 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 2020 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 2021 if (error) { 2022 *vpp = NULL; 2023 uma_zfree_smr(uma_inode, ip); 2024 return (error); 2025 } 2026 /* 2027 * FFS supports recursive locking. 2028 */ 2029 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 2030 VN_LOCK_AREC(vp); 2031 vp->v_data = ip; 2032 vp->v_bufobj.bo_bsize = fs->fs_bsize; 2033 ip->i_vnode = vp; 2034 ip->i_ump = ump; 2035 ip->i_number = ino; 2036 ip->i_ea_refs = 0; 2037 ip->i_nextclustercg = -1; 2038 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; 2039 ip->i_mode = 0; /* ensure error cases below throw away vnode */ 2040 cluster_init_vn(&ip->i_clusterw); 2041 #ifdef DIAGNOSTIC 2042 ufs_init_trackers(ip); 2043 #endif 2044 #ifdef QUOTA 2045 { 2046 int i; 2047 for (i = 0; i < MAXQUOTAS; i++) 2048 ip->i_dquot[i] = NODQUOT; 2049 } 2050 #endif 2051 2052 if (ffs_flags & FFSV_FORCEINSMQ) 2053 vp->v_vflag |= VV_FORCEINSMQ; 2054 error = insmntque(vp, mp); 2055 if (error != 0) { 2056 uma_zfree_smr(uma_inode, ip); 2057 *vpp = NULL; 2058 return (error); 2059 } 2060 vp->v_vflag &= ~VV_FORCEINSMQ; 2061 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 2062 if (error != 0) 2063 return (error); 2064 if (*vpp != NULL) { 2065 /* 2066 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set) 2067 * operate on empty inode, which must not be found by 2068 * other threads until fully filled. Vnode for empty 2069 * inode must be not re-inserted on the hash by other 2070 * thread, after removal by us at the beginning. 2071 */ 2072 MPASS((ffs_flags & FFSV_REPLACE) == 0); 2073 return (0); 2074 } 2075 2076 /* Read in the disk contents for the inode, copy into the inode. */ 2077 dbn = fsbtodb(fs, ino_to_fsba(fs, ino)); 2078 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize, 2079 NULL, NULL, 0, NOCRED, 0, NULL, &bp); 2080 if (error != 0) { 2081 /* 2082 * The inode does not contain anything useful, so it would 2083 * be misleading to leave it on its hash chain. With mode 2084 * still zero, it will be unlinked and returned to the free 2085 * list by vput(). 2086 */ 2087 vgone(vp); 2088 vput(vp); 2089 *vpp = NULL; 2090 return (error); 2091 } 2092 if (I_IS_UFS1(ip)) 2093 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 2094 else 2095 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 2096 if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) { 2097 bqrelse(bp); 2098 vgone(vp); 2099 vput(vp); 2100 *vpp = NULL; 2101 return (error); 2102 } 2103 if (DOINGSOFTDEP(vp) && (!fs->fs_ronly || 2104 (ffs_flags & FFSV_FORCEINODEDEP) != 0)) 2105 softdep_load_inodeblock(ip); 2106 else 2107 ip->i_effnlink = ip->i_nlink; 2108 bqrelse(bp); 2109 2110 /* 2111 * Initialize the vnode from the inode, check for aliases. 2112 * Note that the underlying vnode may have changed. 2113 */ 2114 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, 2115 &vp); 2116 if (error) { 2117 vgone(vp); 2118 vput(vp); 2119 *vpp = NULL; 2120 return (error); 2121 } 2122 2123 /* 2124 * Finish inode initialization. 2125 */ 2126 if (vp->v_type != VFIFO) { 2127 /* FFS supports shared locking for all files except fifos. */ 2128 VN_LOCK_ASHARE(vp); 2129 } 2130 2131 /* 2132 * Set up a generation number for this inode if it does not 2133 * already have one. This should only happen on old filesystems. 2134 */ 2135 if (ip->i_gen == 0) { 2136 while (ip->i_gen == 0) 2137 ip->i_gen = arc4random(); 2138 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 2139 UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 2140 DIP_SET(ip, i_gen, ip->i_gen); 2141 } 2142 } 2143 #ifdef MAC 2144 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 2145 /* 2146 * If this vnode is already allocated, and we're running 2147 * multi-label, attempt to perform a label association 2148 * from the extended attributes on the inode. 2149 */ 2150 error = mac_vnode_associate_extattr(mp, vp); 2151 if (error) { 2152 /* ufs_inactive will release ip->i_devvp ref. */ 2153 vgone(vp); 2154 vput(vp); 2155 *vpp = NULL; 2156 return (error); 2157 } 2158 } 2159 #endif 2160 2161 *vpp = vp; 2162 return (0); 2163 } 2164 2165 /* 2166 * File handle to vnode 2167 * 2168 * Have to be really careful about stale file handles: 2169 * - check that the inode number is valid 2170 * - for UFS2 check that the inode number is initialized 2171 * - call ffs_vget() to get the locked inode 2172 * - check for an unallocated inode (i_mode == 0) 2173 * - check that the given client host has export rights and return 2174 * those rights via. exflagsp and credanonp 2175 */ 2176 static int 2177 ffs_fhtovp(mp, fhp, flags, vpp) 2178 struct mount *mp; 2179 struct fid *fhp; 2180 int flags; 2181 struct vnode **vpp; 2182 { 2183 struct ufid *ufhp; 2184 2185 ufhp = (struct ufid *)fhp; 2186 return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags, 2187 vpp, 0)); 2188 } 2189 2190 int 2191 ffs_inotovp(mp, ino, gen, lflags, vpp, ffs_flags) 2192 struct mount *mp; 2193 ino_t ino; 2194 u_int64_t gen; 2195 int lflags; 2196 struct vnode **vpp; 2197 int ffs_flags; 2198 { 2199 struct ufsmount *ump; 2200 struct vnode *nvp; 2201 struct inode *ip; 2202 struct fs *fs; 2203 struct cg *cgp; 2204 struct buf *bp; 2205 u_int cg; 2206 int error; 2207 2208 ump = VFSTOUFS(mp); 2209 fs = ump->um_fs; 2210 *vpp = NULL; 2211 2212 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) 2213 return (ESTALE); 2214 2215 /* 2216 * Need to check if inode is initialized because UFS2 does lazy 2217 * initialization and nfs_fhtovp can offer arbitrary inode numbers. 2218 */ 2219 if (fs->fs_magic == FS_UFS2_MAGIC) { 2220 cg = ino_to_cg(fs, ino); 2221 error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp); 2222 if (error != 0) 2223 return (error); 2224 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { 2225 brelse(bp); 2226 return (ESTALE); 2227 } 2228 brelse(bp); 2229 } 2230 2231 error = ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags); 2232 if (error != 0) 2233 return (error); 2234 2235 ip = VTOI(nvp); 2236 if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) { 2237 if (ip->i_mode == 0) 2238 vgone(nvp); 2239 vput(nvp); 2240 return (ESTALE); 2241 } 2242 2243 vnode_create_vobject(nvp, DIP(ip, i_size), curthread); 2244 *vpp = nvp; 2245 return (0); 2246 } 2247 2248 /* 2249 * Initialize the filesystem. 2250 */ 2251 static int 2252 ffs_init(vfsp) 2253 struct vfsconf *vfsp; 2254 { 2255 2256 ffs_susp_initialize(); 2257 softdep_initialize(); 2258 return (ufs_init(vfsp)); 2259 } 2260 2261 /* 2262 * Undo the work of ffs_init(). 2263 */ 2264 static int 2265 ffs_uninit(vfsp) 2266 struct vfsconf *vfsp; 2267 { 2268 int ret; 2269 2270 ret = ufs_uninit(vfsp); 2271 softdep_uninitialize(); 2272 ffs_susp_uninitialize(); 2273 taskqueue_drain_all(taskqueue_thread); 2274 return (ret); 2275 } 2276 2277 /* 2278 * Structure used to pass information from ffs_sbupdate to its 2279 * helper routine ffs_use_bwrite. 2280 */ 2281 struct devfd { 2282 struct ufsmount *ump; 2283 struct buf *sbbp; 2284 int waitfor; 2285 int suspended; 2286 int error; 2287 }; 2288 2289 /* 2290 * Write a superblock and associated information back to disk. 2291 */ 2292 int 2293 ffs_sbupdate(ump, waitfor, suspended) 2294 struct ufsmount *ump; 2295 int waitfor; 2296 int suspended; 2297 { 2298 struct fs *fs; 2299 struct buf *sbbp; 2300 struct devfd devfd; 2301 2302 fs = ump->um_fs; 2303 if (fs->fs_ronly == 1 && 2304 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 2305 (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0) 2306 panic("ffs_sbupdate: write read-only filesystem"); 2307 /* 2308 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 2309 */ 2310 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 2311 (int)fs->fs_sbsize, 0, 0, 0); 2312 /* 2313 * Initialize info needed for write function. 2314 */ 2315 devfd.ump = ump; 2316 devfd.sbbp = sbbp; 2317 devfd.waitfor = waitfor; 2318 devfd.suspended = suspended; 2319 devfd.error = 0; 2320 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); 2321 } 2322 2323 /* 2324 * Write function for use by filesystem-layer routines. 2325 */ 2326 static int 2327 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) 2328 { 2329 struct devfd *devfdp; 2330 struct ufsmount *ump; 2331 struct buf *bp; 2332 struct fs *fs; 2333 int error; 2334 2335 devfdp = devfd; 2336 ump = devfdp->ump; 2337 fs = ump->um_fs; 2338 /* 2339 * Writing the superblock summary information. 2340 */ 2341 if (loc != fs->fs_sblockloc) { 2342 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); 2343 bcopy(buf, bp->b_data, (u_int)size); 2344 if (devfdp->suspended) 2345 bp->b_flags |= B_VALIDSUSPWRT; 2346 if (devfdp->waitfor != MNT_WAIT) 2347 bawrite(bp); 2348 else if ((error = bwrite(bp)) != 0) 2349 devfdp->error = error; 2350 return (0); 2351 } 2352 /* 2353 * Writing the superblock itself. We need to do special checks for it. 2354 */ 2355 bp = devfdp->sbbp; 2356 if (ffs_fsfail_cleanup(ump, devfdp->error)) 2357 devfdp->error = 0; 2358 if (devfdp->error != 0) { 2359 brelse(bp); 2360 return (devfdp->error); 2361 } 2362 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 2363 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2364 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2365 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 2366 fs->fs_sblockloc = SBLOCK_UFS1; 2367 } 2368 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 2369 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 2370 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 2371 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 2372 fs->fs_sblockloc = SBLOCK_UFS2; 2373 } 2374 if (MOUNTEDSOFTDEP(ump->um_mountp)) 2375 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 2376 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 2377 fs = (struct fs *)bp->b_data; 2378 ffs_oldfscompat_write(fs, ump); 2379 fs->fs_si = NULL; 2380 /* Recalculate the superblock hash */ 2381 fs->fs_ckhash = ffs_calc_sbhash(fs); 2382 if (devfdp->suspended) 2383 bp->b_flags |= B_VALIDSUSPWRT; 2384 if (devfdp->waitfor != MNT_WAIT) 2385 bawrite(bp); 2386 else if ((error = bwrite(bp)) != 0) 2387 devfdp->error = error; 2388 return (devfdp->error); 2389 } 2390 2391 static int 2392 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 2393 int attrnamespace, const char *attrname) 2394 { 2395 2396 #ifdef UFS_EXTATTR 2397 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 2398 attrname)); 2399 #else 2400 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 2401 attrname)); 2402 #endif 2403 } 2404 2405 static void 2406 ffs_ifree(struct ufsmount *ump, struct inode *ip) 2407 { 2408 2409 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 2410 uma_zfree(uma_ufs1, ip->i_din1); 2411 else if (ip->i_din2 != NULL) 2412 uma_zfree(uma_ufs2, ip->i_din2); 2413 uma_zfree_smr(uma_inode, ip); 2414 } 2415 2416 static int dobkgrdwrite = 1; 2417 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2418 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2419 2420 /* 2421 * Complete a background write started from bwrite. 2422 */ 2423 static void 2424 ffs_backgroundwritedone(struct buf *bp) 2425 { 2426 struct bufobj *bufobj; 2427 struct buf *origbp; 2428 2429 #ifdef SOFTUPDATES 2430 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0) 2431 softdep_handle_error(bp); 2432 #endif 2433 2434 /* 2435 * Find the original buffer that we are writing. 2436 */ 2437 bufobj = bp->b_bufobj; 2438 BO_LOCK(bufobj); 2439 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2440 panic("backgroundwritedone: lost buffer"); 2441 2442 /* 2443 * We should mark the cylinder group buffer origbp as 2444 * dirty, to not lose the failed write. 2445 */ 2446 if ((bp->b_ioflags & BIO_ERROR) != 0) 2447 origbp->b_vflags |= BV_BKGRDERR; 2448 BO_UNLOCK(bufobj); 2449 /* 2450 * Process dependencies then return any unfinished ones. 2451 */ 2452 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2453 buf_complete(bp); 2454 #ifdef SOFTUPDATES 2455 if (!LIST_EMPTY(&bp->b_dep)) 2456 softdep_move_dependencies(bp, origbp); 2457 #endif 2458 /* 2459 * This buffer is marked B_NOCACHE so when it is released 2460 * by biodone it will be tossed. Clear B_IOSTARTED in case of error. 2461 */ 2462 bp->b_flags |= B_NOCACHE; 2463 bp->b_flags &= ~(B_CACHE | B_IOSTARTED); 2464 pbrelvp(bp); 2465 2466 /* 2467 * Prevent brelse() from trying to keep and re-dirtying bp on 2468 * errors. It causes b_bufobj dereference in 2469 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2470 * pbrelvp() above. 2471 */ 2472 if ((bp->b_ioflags & BIO_ERROR) != 0) 2473 bp->b_flags |= B_INVAL; 2474 bufdone(bp); 2475 BO_LOCK(bufobj); 2476 /* 2477 * Clear the BV_BKGRDINPROG flag in the original buffer 2478 * and awaken it if it is waiting for the write to complete. 2479 * If BV_BKGRDINPROG is not set in the original buffer it must 2480 * have been released and re-instantiated - which is not legal. 2481 */ 2482 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2483 ("backgroundwritedone: lost buffer2")); 2484 origbp->b_vflags &= ~BV_BKGRDINPROG; 2485 if (origbp->b_vflags & BV_BKGRDWAIT) { 2486 origbp->b_vflags &= ~BV_BKGRDWAIT; 2487 wakeup(&origbp->b_xflags); 2488 } 2489 BO_UNLOCK(bufobj); 2490 } 2491 2492 /* 2493 * Write, release buffer on completion. (Done by iodone 2494 * if async). Do not bother writing anything if the buffer 2495 * is invalid. 2496 * 2497 * Note that we set B_CACHE here, indicating that buffer is 2498 * fully valid and thus cacheable. This is true even of NFS 2499 * now so we set it generally. This could be set either here 2500 * or in biodone() since the I/O is synchronous. We put it 2501 * here. 2502 */ 2503 static int 2504 ffs_bufwrite(struct buf *bp) 2505 { 2506 struct buf *newbp; 2507 struct cg *cgp; 2508 2509 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2510 if (bp->b_flags & B_INVAL) { 2511 brelse(bp); 2512 return (0); 2513 } 2514 2515 if (!BUF_ISLOCKED(bp)) 2516 panic("bufwrite: buffer is not busy???"); 2517 /* 2518 * If a background write is already in progress, delay 2519 * writing this block if it is asynchronous. Otherwise 2520 * wait for the background write to complete. 2521 */ 2522 BO_LOCK(bp->b_bufobj); 2523 if (bp->b_vflags & BV_BKGRDINPROG) { 2524 if (bp->b_flags & B_ASYNC) { 2525 BO_UNLOCK(bp->b_bufobj); 2526 bdwrite(bp); 2527 return (0); 2528 } 2529 bp->b_vflags |= BV_BKGRDWAIT; 2530 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2531 "bwrbg", 0); 2532 if (bp->b_vflags & BV_BKGRDINPROG) 2533 panic("bufwrite: still writing"); 2534 } 2535 bp->b_vflags &= ~BV_BKGRDERR; 2536 BO_UNLOCK(bp->b_bufobj); 2537 2538 /* 2539 * If this buffer is marked for background writing and we 2540 * do not have to wait for it, make a copy and write the 2541 * copy so as to leave this buffer ready for further use. 2542 * 2543 * This optimization eats a lot of memory. If we have a page 2544 * or buffer shortfall we can't do it. 2545 */ 2546 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2547 (bp->b_flags & B_ASYNC) && 2548 !vm_page_count_severe() && 2549 !buf_dirty_count_severe()) { 2550 KASSERT(bp->b_iodone == NULL, 2551 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2552 2553 /* get a new block */ 2554 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2555 if (newbp == NULL) 2556 goto normal_write; 2557 2558 KASSERT(buf_mapped(bp), ("Unmapped cg")); 2559 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2560 BO_LOCK(bp->b_bufobj); 2561 bp->b_vflags |= BV_BKGRDINPROG; 2562 BO_UNLOCK(bp->b_bufobj); 2563 newbp->b_xflags |= 2564 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; 2565 newbp->b_lblkno = bp->b_lblkno; 2566 newbp->b_blkno = bp->b_blkno; 2567 newbp->b_offset = bp->b_offset; 2568 newbp->b_iodone = ffs_backgroundwritedone; 2569 newbp->b_flags |= B_ASYNC; 2570 newbp->b_flags &= ~B_INVAL; 2571 pbgetvp(bp->b_vp, newbp); 2572 2573 #ifdef SOFTUPDATES 2574 /* 2575 * Move over the dependencies. If there are rollbacks, 2576 * leave the parent buffer dirtied as it will need to 2577 * be written again. 2578 */ 2579 if (LIST_EMPTY(&bp->b_dep) || 2580 softdep_move_dependencies(bp, newbp) == 0) 2581 bundirty(bp); 2582 #else 2583 bundirty(bp); 2584 #endif 2585 2586 /* 2587 * Initiate write on the copy, release the original. The 2588 * BKGRDINPROG flag prevents it from going away until 2589 * the background write completes. We have to recalculate 2590 * its check hash in case the buffer gets freed and then 2591 * reconstituted from the buffer cache during a later read. 2592 */ 2593 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2594 cgp = (struct cg *)bp->b_data; 2595 cgp->cg_ckhash = 0; 2596 cgp->cg_ckhash = 2597 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2598 } 2599 bqrelse(bp); 2600 bp = newbp; 2601 } else 2602 /* Mark the buffer clean */ 2603 bundirty(bp); 2604 2605 /* Let the normal bufwrite do the rest for us */ 2606 normal_write: 2607 /* 2608 * If we are writing a cylinder group, update its time. 2609 */ 2610 if ((bp->b_xflags & BX_CYLGRP) != 0) { 2611 cgp = (struct cg *)bp->b_data; 2612 cgp->cg_old_time = cgp->cg_time = time_second; 2613 } 2614 return (bufwrite(bp)); 2615 } 2616 2617 static void 2618 ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2619 { 2620 struct vnode *vp; 2621 struct buf *tbp; 2622 int error, nocopy; 2623 2624 /* 2625 * This is the bufobj strategy for the private VCHR vnodes 2626 * used by FFS to access the underlying storage device. 2627 * We override the default bufobj strategy and thus bypass 2628 * VOP_STRATEGY() for these vnodes. 2629 */ 2630 vp = bo2vnode(bo); 2631 KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR || 2632 bp->b_vp->v_rdev == NULL || 2633 bp->b_vp->v_rdev->si_mountpt == NULL || 2634 VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL || 2635 vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp, 2636 ("ffs_geom_strategy() with wrong vp")); 2637 if (bp->b_iocmd == BIO_WRITE) { 2638 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2639 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2640 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2641 panic("ffs_geom_strategy: bad I/O"); 2642 nocopy = bp->b_flags & B_NOCOPY; 2643 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2644 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2645 vp->v_rdev->si_snapdata != NULL) { 2646 if ((bp->b_flags & B_CLUSTER) != 0) { 2647 runningbufwakeup(bp); 2648 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2649 b_cluster.cluster_entry) { 2650 error = ffs_copyonwrite(vp, tbp); 2651 if (error != 0 && 2652 error != EOPNOTSUPP) { 2653 bp->b_error = error; 2654 bp->b_ioflags |= BIO_ERROR; 2655 bp->b_flags &= ~B_BARRIER; 2656 bufdone(bp); 2657 return; 2658 } 2659 } 2660 bp->b_runningbufspace = bp->b_bufsize; 2661 atomic_add_long(&runningbufspace, 2662 bp->b_runningbufspace); 2663 } else { 2664 error = ffs_copyonwrite(vp, bp); 2665 if (error != 0 && error != EOPNOTSUPP) { 2666 bp->b_error = error; 2667 bp->b_ioflags |= BIO_ERROR; 2668 bp->b_flags &= ~B_BARRIER; 2669 bufdone(bp); 2670 return; 2671 } 2672 } 2673 } 2674 #ifdef SOFTUPDATES 2675 if ((bp->b_flags & B_CLUSTER) != 0) { 2676 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2677 b_cluster.cluster_entry) { 2678 if (!LIST_EMPTY(&tbp->b_dep)) 2679 buf_start(tbp); 2680 } 2681 } else { 2682 if (!LIST_EMPTY(&bp->b_dep)) 2683 buf_start(bp); 2684 } 2685 2686 #endif 2687 /* 2688 * Check for metadata that needs check-hashes and update them. 2689 */ 2690 switch (bp->b_xflags & BX_FSPRIV) { 2691 case BX_CYLGRP: 2692 ((struct cg *)bp->b_data)->cg_ckhash = 0; 2693 ((struct cg *)bp->b_data)->cg_ckhash = 2694 calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 2695 break; 2696 2697 case BX_SUPERBLOCK: 2698 case BX_INODE: 2699 case BX_INDIR: 2700 case BX_DIR: 2701 printf("Check-hash write is unimplemented!!!\n"); 2702 break; 2703 2704 case 0: 2705 break; 2706 2707 default: 2708 printf("multiple buffer types 0x%b\n", 2709 (u_int)(bp->b_xflags & BX_FSPRIV), 2710 PRINT_UFS_BUF_XFLAGS); 2711 break; 2712 } 2713 } 2714 if (bp->b_iocmd != BIO_READ && ffs_enxio_enable) 2715 bp->b_xflags |= BX_CVTENXIO; 2716 g_vfs_strategy(bo, bp); 2717 } 2718 2719 int 2720 ffs_own_mount(const struct mount *mp) 2721 { 2722 2723 if (mp->mnt_op == &ufs_vfsops) 2724 return (1); 2725 return (0); 2726 } 2727 2728 #ifdef DDB 2729 #ifdef SOFTUPDATES 2730 2731 /* defined in ffs_softdep.c */ 2732 extern void db_print_ffs(struct ufsmount *ump); 2733 2734 DB_SHOW_COMMAND(ffs, db_show_ffs) 2735 { 2736 struct mount *mp; 2737 struct ufsmount *ump; 2738 2739 if (have_addr) { 2740 ump = VFSTOUFS((struct mount *)addr); 2741 db_print_ffs(ump); 2742 return; 2743 } 2744 2745 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2746 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2747 db_print_ffs(VFSTOUFS(mp)); 2748 } 2749 } 2750 2751 #endif /* SOFTUPDATES */ 2752 #endif /* DDB */ 2753