160727d8bSWarner Losh /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1989, 1991, 1993, 1994 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 6df8bae1dSRodney W. Grimes * 7df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 8df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 9df8bae1dSRodney W. Grimes * are met: 10df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 11df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 12df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 13df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 14df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 15fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors 16df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 17df8bae1dSRodney W. Grimes * without specific prior written permission. 18df8bae1dSRodney W. Grimes * 19df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29df8bae1dSRodney W. Grimes * SUCH DAMAGE. 30df8bae1dSRodney W. Grimes */ 31df8bae1dSRodney W. Grimes 32f4636c59SDavid E. O'Brien #include <sys/cdefs.h> 3301733a9bSGarrett Wollman #include "opt_quota.h" 34516081f2SRobert Watson #include "opt_ufs.h" 356e77a041SPoul-Henning Kamp #include "opt_ffs.h" 3652dfc8d7SKonstantin Belousov #include "opt_ddb.h" 3701733a9bSGarrett Wollman 38df8bae1dSRodney W. Grimes #include <sys/param.h> 39f89d2072SXin LI #include <sys/gsb_crc32.h> 40df8bae1dSRodney W. Grimes #include <sys/systm.h> 41df8bae1dSRodney W. Grimes #include <sys/namei.h> 42acd3428bSRobert Watson #include <sys/priv.h> 43df8bae1dSRodney W. Grimes #include <sys/proc.h> 44c79dff0fSKonstantin Belousov #include <sys/taskqueue.h> 45df8bae1dSRodney W. Grimes #include <sys/kernel.h> 46daec9284SConrad Meyer #include <sys/ktr.h> 47df8bae1dSRodney W. Grimes #include <sys/vnode.h> 48df8bae1dSRodney W. Grimes #include <sys/mount.h> 499626b608SPoul-Henning Kamp #include <sys/bio.h> 50df8bae1dSRodney W. Grimes #include <sys/buf.h> 5181bca6ddSKATO Takenori #include <sys/conf.h> 523ac4d1efSBruce Evans #include <sys/fcntl.h> 531848286aSEdward Tomasz Napierala #include <sys/ioccom.h> 54df8bae1dSRodney W. Grimes #include <sys/malloc.h> 551b367556SJason Evans #include <sys/mutex.h> 5622a72260SJeff Roberson #include <sys/rwlock.h> 57d79ff54bSChuck Silvers #include <sys/sysctl.h> 589ed01c32SGleb Smirnoff #include <sys/vmmeter.h> 59a18b1f1dSJason Evans 60aed55708SRobert Watson #include <security/mac/mac_framework.h> 61aed55708SRobert Watson 62a96da1c3SConrad Meyer #include <ufs/ufs/dir.h> 63a64ed089SRobert Watson #include <ufs/ufs/extattr.h> 641a60c7fcSPawel Jakub Dawidek #include <ufs/ufs/gjournal.h> 65df8bae1dSRodney W. Grimes #include <ufs/ufs/quota.h> 66df8bae1dSRodney W. Grimes #include <ufs/ufs/ufsmount.h> 67df8bae1dSRodney W. Grimes #include <ufs/ufs/inode.h> 68df8bae1dSRodney W. Grimes #include <ufs/ufs/ufs_extern.h> 69df8bae1dSRodney W. Grimes 70df8bae1dSRodney W. Grimes #include <ufs/ffs/fs.h> 71df8bae1dSRodney W. Grimes #include <ufs/ffs/ffs_extern.h> 72df8bae1dSRodney W. Grimes 73f6b04d2bSDavid Greenman #include <vm/vm.h> 74aa4d7a8aSPoul-Henning Kamp #include <vm/uma.h> 75f6b04d2bSDavid Greenman #include <vm/vm_page.h> 76f6b04d2bSDavid Greenman 7743920011SPoul-Henning Kamp #include <geom/geom.h> 7843920011SPoul-Henning Kamp #include <geom/geom_vfs.h> 7943920011SPoul-Henning Kamp 8052dfc8d7SKonstantin Belousov #include <ddb/ddb.h> 8152dfc8d7SKonstantin Belousov 82adf41577SPoul-Henning Kamp static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 839d5a594fSMateusz Guzik VFS_SMR_DECLARE; 8455166637SPoul-Henning Kamp 85975512a9SPoul-Henning Kamp static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 86975512a9SPoul-Henning Kamp static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 87a988a5c6SKonstantin Belousov static int ffs_sync_lazy(struct mount *mp); 88dffce215SKirk McKusick static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); 89dffce215SKirk McKusick static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); 90a988a5c6SKonstantin Belousov 919bf1a756SPoul-Henning Kamp static vfs_init_t ffs_init; 929bf1a756SPoul-Henning Kamp static vfs_uninit_t ffs_uninit; 93d6fe88e4SPoul-Henning Kamp static vfs_extattrctl_t ffs_extattrctl; 9420a92a18SPoul-Henning Kamp static vfs_cmount_t ffs_cmount; 95adf41577SPoul-Henning Kamp static vfs_unmount_t ffs_unmount; 9620a92a18SPoul-Henning Kamp static vfs_mount_t ffs_mount; 97adf41577SPoul-Henning Kamp static vfs_statfs_t ffs_statfs; 98adf41577SPoul-Henning Kamp static vfs_fhtovp_t ffs_fhtovp; 99adf41577SPoul-Henning Kamp static vfs_sync_t ffs_sync; 100df8bae1dSRodney W. Grimes 101303b270bSEivind Eklund static struct vfsops ufs_vfsops = { 1027652131bSPoul-Henning Kamp .vfs_extattrctl = ffs_extattrctl, 1037652131bSPoul-Henning Kamp .vfs_fhtovp = ffs_fhtovp, 1047652131bSPoul-Henning Kamp .vfs_init = ffs_init, 10520a92a18SPoul-Henning Kamp .vfs_mount = ffs_mount, 10620a92a18SPoul-Henning Kamp .vfs_cmount = ffs_cmount, 1077652131bSPoul-Henning Kamp .vfs_quotactl = ufs_quotactl, 108e35cd9e3SMateusz Guzik .vfs_root = vfs_cache_root, 109e35cd9e3SMateusz Guzik .vfs_cachedroot = ufs_root, 1107652131bSPoul-Henning Kamp .vfs_statfs = ffs_statfs, 1117652131bSPoul-Henning Kamp .vfs_sync = ffs_sync, 1127652131bSPoul-Henning Kamp .vfs_uninit = ffs_uninit, 1137652131bSPoul-Henning Kamp .vfs_unmount = ffs_unmount, 1147652131bSPoul-Henning Kamp .vfs_vget = ffs_vget, 1152814d5baSKonstantin Belousov .vfs_susp_clean = process_deferred_inactive, 116df8bae1dSRodney W. Grimes }; 117df8bae1dSRodney W. Grimes 1181cd455f3SRick Macklem VFS_SET(ufs_vfsops, ufs, VFCF_FILEREVINC); 1195fe6d2beSPawel Jakub Dawidek MODULE_VERSION(ufs, 1); 120c901836cSGarrett Wollman 1216e77a041SPoul-Henning Kamp static b_strategy_t ffs_geom_strategy; 122dd19a799SPoul-Henning Kamp static b_write_t ffs_bufwrite; 1236e77a041SPoul-Henning Kamp 1246e77a041SPoul-Henning Kamp static struct buf_ops ffs_ops = { 1256e77a041SPoul-Henning Kamp .bop_name = "FFS", 126dd19a799SPoul-Henning Kamp .bop_write = ffs_bufwrite, 1276e77a041SPoul-Henning Kamp .bop_strategy = ffs_geom_strategy, 1286ef8480aSPoul-Henning Kamp .bop_sync = bufsync, 1292cc7d26fSKonstantin Belousov #ifdef NO_FFS_SNAPSHOT 1302cc7d26fSKonstantin Belousov .bop_bdflush = bufbdflush, 1312cc7d26fSKonstantin Belousov #else 1322cc7d26fSKonstantin Belousov .bop_bdflush = ffs_bdflush, 1332cc7d26fSKonstantin Belousov #endif 1346e77a041SPoul-Henning Kamp }; 1356e77a041SPoul-Henning Kamp 1360b962648SAndriy Gapon /* 1370b962648SAndriy Gapon * Note that userquota and groupquota options are not currently used 1380b962648SAndriy Gapon * by UFS/FFS code and generally mount(8) does not pass those options 1390b962648SAndriy Gapon * from userland, but they can be passed by loader(8) via 1400b962648SAndriy Gapon * vfs.root.mountfrom.options. 1410b962648SAndriy Gapon */ 142d952ba1bSJohn Baldwin static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 1430b962648SAndriy Gapon "noclusterw", "noexec", "export", "force", "from", "groupquota", 1449acea164SRobert Wing "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir", 145daba4da8SKirk McKusick "nosymfollow", "sync", "union", "userquota", "untrusted", NULL }; 14620a92a18SPoul-Henning Kamp 147d79ff54bSChuck Silvers static int ffs_enxio_enable = 1; 148d79ff54bSChuck Silvers SYSCTL_DECL(_vfs_ffs); 149d79ff54bSChuck Silvers SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN, 150d79ff54bSChuck Silvers &ffs_enxio_enable, 0, 151d79ff54bSChuck Silvers "enable mapping of other disk I/O errors to ENXIO"); 152d79ff54bSChuck Silvers 153d9a8abf6SChuck Silvers /* 154d9a8abf6SChuck Silvers * Return buffer with the contents of block "offset" from the beginning of 155d9a8abf6SChuck Silvers * directory "ip". If "res" is non-zero, fill it in with a pointer to the 156d9a8abf6SChuck Silvers * remaining space in the directory. 157d9a8abf6SChuck Silvers */ 158d9a8abf6SChuck Silvers static int 159d9a8abf6SChuck Silvers ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) 160d9a8abf6SChuck Silvers { 161d9a8abf6SChuck Silvers struct inode *ip; 162d9a8abf6SChuck Silvers struct fs *fs; 163d9a8abf6SChuck Silvers struct buf *bp; 164d9a8abf6SChuck Silvers ufs_lbn_t lbn; 165d9a8abf6SChuck Silvers int bsize, error; 166d9a8abf6SChuck Silvers 167d9a8abf6SChuck Silvers ip = VTOI(vp); 168d9a8abf6SChuck Silvers fs = ITOFS(ip); 169d9a8abf6SChuck Silvers lbn = lblkno(fs, offset); 170d9a8abf6SChuck Silvers bsize = blksize(fs, ip, lbn); 171d9a8abf6SChuck Silvers 172d9a8abf6SChuck Silvers *bpp = NULL; 173d9a8abf6SChuck Silvers error = bread(vp, lbn, bsize, NOCRED, &bp); 174d9a8abf6SChuck Silvers if (error) { 175d9a8abf6SChuck Silvers return (error); 176d9a8abf6SChuck Silvers } 177d9a8abf6SChuck Silvers if (res) 178d9a8abf6SChuck Silvers *res = (char *)bp->b_data + blkoff(fs, offset); 179d9a8abf6SChuck Silvers *bpp = bp; 180d9a8abf6SChuck Silvers return (0); 181d9a8abf6SChuck Silvers } 182d9a8abf6SChuck Silvers 183d9a8abf6SChuck Silvers /* 184d9a8abf6SChuck Silvers * Load up the contents of an inode and copy the appropriate pieces 185d9a8abf6SChuck Silvers * to the incore copy. 186d9a8abf6SChuck Silvers */ 187d9a8abf6SChuck Silvers static int 188d9a8abf6SChuck Silvers ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino) 189d9a8abf6SChuck Silvers { 190d9a8abf6SChuck Silvers struct ufs1_dinode *dip1; 191d9a8abf6SChuck Silvers struct ufs2_dinode *dip2; 192d9a8abf6SChuck Silvers int error; 193d9a8abf6SChuck Silvers 194d9a8abf6SChuck Silvers if (I_IS_UFS1(ip)) { 195d9a8abf6SChuck Silvers dip1 = ip->i_din1; 196d9a8abf6SChuck Silvers *dip1 = 197d9a8abf6SChuck Silvers *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 198d9a8abf6SChuck Silvers ip->i_mode = dip1->di_mode; 199d9a8abf6SChuck Silvers ip->i_nlink = dip1->di_nlink; 200d9a8abf6SChuck Silvers ip->i_effnlink = dip1->di_nlink; 201d9a8abf6SChuck Silvers ip->i_size = dip1->di_size; 202d9a8abf6SChuck Silvers ip->i_flags = dip1->di_flags; 203d9a8abf6SChuck Silvers ip->i_gen = dip1->di_gen; 204d9a8abf6SChuck Silvers ip->i_uid = dip1->di_uid; 205d9a8abf6SChuck Silvers ip->i_gid = dip1->di_gid; 2061111a443SKirk McKusick if (ffs_oldfscompat_inode_read(fs, ip->i_dp, time_second) && 2071111a443SKirk McKusick fs->fs_ronly == 0) 2081111a443SKirk McKusick UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 209d9a8abf6SChuck Silvers return (0); 210d9a8abf6SChuck Silvers } 211d9a8abf6SChuck Silvers dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 212d9a8abf6SChuck Silvers if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 && 213d9a8abf6SChuck Silvers !ffs_fsfail_cleanup(ITOUMP(ip), error)) { 214d9a8abf6SChuck Silvers printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt, 215d9a8abf6SChuck Silvers (intmax_t)ino); 216d9a8abf6SChuck Silvers return (error); 217d9a8abf6SChuck Silvers } 218d9a8abf6SChuck Silvers *ip->i_din2 = *dip2; 219d9a8abf6SChuck Silvers dip2 = ip->i_din2; 220d9a8abf6SChuck Silvers ip->i_mode = dip2->di_mode; 221d9a8abf6SChuck Silvers ip->i_nlink = dip2->di_nlink; 222d9a8abf6SChuck Silvers ip->i_effnlink = dip2->di_nlink; 223d9a8abf6SChuck Silvers ip->i_size = dip2->di_size; 224d9a8abf6SChuck Silvers ip->i_flags = dip2->di_flags; 225d9a8abf6SChuck Silvers ip->i_gen = dip2->di_gen; 226d9a8abf6SChuck Silvers ip->i_uid = dip2->di_uid; 227d9a8abf6SChuck Silvers ip->i_gid = dip2->di_gid; 2281111a443SKirk McKusick if (ffs_oldfscompat_inode_read(fs, ip->i_dp, time_second) && 2291111a443SKirk McKusick fs->fs_ronly == 0) 2301111a443SKirk McKusick UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 231d9a8abf6SChuck Silvers return (0); 232d9a8abf6SChuck Silvers } 233d9a8abf6SChuck Silvers 234d9a8abf6SChuck Silvers /* 235d9a8abf6SChuck Silvers * Verify that a filesystem block number is a valid data block. 236d9a8abf6SChuck Silvers * This routine is only called on untrusted filesystems. 237d9a8abf6SChuck Silvers */ 238d9a8abf6SChuck Silvers static int 239d9a8abf6SChuck Silvers ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize) 240d9a8abf6SChuck Silvers { 241d9a8abf6SChuck Silvers struct fs *fs; 242d9a8abf6SChuck Silvers struct ufsmount *ump; 243d9a8abf6SChuck Silvers ufs2_daddr_t end_daddr; 244d9a8abf6SChuck Silvers int cg, havemtx; 245d9a8abf6SChuck Silvers 246d9a8abf6SChuck Silvers KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0, 247d9a8abf6SChuck Silvers ("ffs_check_blkno called on a trusted file system")); 248d9a8abf6SChuck Silvers ump = VFSTOUFS(mp); 249d9a8abf6SChuck Silvers fs = ump->um_fs; 250d9a8abf6SChuck Silvers cg = dtog(fs, daddr); 251d9a8abf6SChuck Silvers end_daddr = daddr + numfrags(fs, blksize); 252d9a8abf6SChuck Silvers /* 253d9a8abf6SChuck Silvers * Verify that the block number is a valid data block. Also check 254d9a8abf6SChuck Silvers * that it does not point to an inode block or a superblock. Accept 255d9a8abf6SChuck Silvers * blocks that are unalloacted (0) or part of snapshot metadata 256d9a8abf6SChuck Silvers * (BLK_NOCOPY or BLK_SNAP). 257d9a8abf6SChuck Silvers * 258d9a8abf6SChuck Silvers * Thus, the block must be in a valid range for the filesystem and 259d9a8abf6SChuck Silvers * either in the space before a backup superblock (except the first 260d9a8abf6SChuck Silvers * cylinder group where that space is used by the bootstrap code) or 261d9a8abf6SChuck Silvers * after the inode blocks and before the end of the cylinder group. 262d9a8abf6SChuck Silvers */ 263d9a8abf6SChuck Silvers if ((uint64_t)daddr <= BLK_SNAP || 264d9a8abf6SChuck Silvers ((uint64_t)end_daddr <= fs->fs_size && 265d9a8abf6SChuck Silvers ((cg > 0 && end_daddr <= cgsblock(fs, cg)) || 266d9a8abf6SChuck Silvers (daddr >= cgdmin(fs, cg) && 267d9a8abf6SChuck Silvers end_daddr <= cgbase(fs, cg) + fs->fs_fpg)))) 268d9a8abf6SChuck Silvers return (0); 269d9a8abf6SChuck Silvers if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0) 270d9a8abf6SChuck Silvers UFS_LOCK(ump); 271d9a8abf6SChuck Silvers if (ppsratecheck(&ump->um_last_integritymsg, 272d9a8abf6SChuck Silvers &ump->um_secs_integritymsg, 1)) { 273d9a8abf6SChuck Silvers UFS_UNLOCK(ump); 274d9a8abf6SChuck Silvers uprintf("\n%s: inode %jd, out-of-range indirect block " 275d9a8abf6SChuck Silvers "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr); 276d9a8abf6SChuck Silvers if (havemtx) 277d9a8abf6SChuck Silvers UFS_LOCK(ump); 278d9a8abf6SChuck Silvers } else if (!havemtx) 279d9a8abf6SChuck Silvers UFS_UNLOCK(ump); 280d9a8abf6SChuck Silvers return (EINTEGRITY); 281d9a8abf6SChuck Silvers } 282d9a8abf6SChuck Silvers 283d9a8abf6SChuck Silvers /* 284211ec9b7SJason A. Harmening * On first ENXIO error, initiate an asynchronous forcible unmount. 285d9a8abf6SChuck Silvers * Used to unmount filesystems whose underlying media has gone away. 286d9a8abf6SChuck Silvers * 287d9a8abf6SChuck Silvers * Return true if a cleanup is in progress. 288d9a8abf6SChuck Silvers */ 289d9a8abf6SChuck Silvers int 290d9a8abf6SChuck Silvers ffs_fsfail_cleanup(struct ufsmount *ump, int error) 291d9a8abf6SChuck Silvers { 292d9a8abf6SChuck Silvers int retval; 293d9a8abf6SChuck Silvers 294d9a8abf6SChuck Silvers UFS_LOCK(ump); 295d9a8abf6SChuck Silvers retval = ffs_fsfail_cleanup_locked(ump, error); 296d9a8abf6SChuck Silvers UFS_UNLOCK(ump); 297d9a8abf6SChuck Silvers return (retval); 298d9a8abf6SChuck Silvers } 299d9a8abf6SChuck Silvers 300d9a8abf6SChuck Silvers int 301d9a8abf6SChuck Silvers ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error) 302d9a8abf6SChuck Silvers { 303d9a8abf6SChuck Silvers mtx_assert(UFS_MTX(ump), MA_OWNED); 304d9a8abf6SChuck Silvers if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) { 305d9a8abf6SChuck Silvers ump->um_flags |= UM_FSFAIL_CLEANUP; 30660a41168SChuck Silvers if (ump->um_mountp == rootvnode->v_mount) 30760a41168SChuck Silvers panic("UFS: root fs would be forcibly unmounted"); 30860a41168SChuck Silvers 309d9a8abf6SChuck Silvers /* 310d9a8abf6SChuck Silvers * Queue an async forced unmount. 311d9a8abf6SChuck Silvers */ 312211ec9b7SJason A. Harmening vfs_ref(ump->um_mountp); 313211ec9b7SJason A. Harmening dounmount(ump->um_mountp, 314211ec9b7SJason A. Harmening MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread); 315d9a8abf6SChuck Silvers printf("UFS: forcibly unmounting %s from %s\n", 316d9a8abf6SChuck Silvers ump->um_mountp->mnt_stat.f_mntfromname, 317d9a8abf6SChuck Silvers ump->um_mountp->mnt_stat.f_mntonname); 318d9a8abf6SChuck Silvers } 319d9a8abf6SChuck Silvers return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0); 320d9a8abf6SChuck Silvers } 321d9a8abf6SChuck Silvers 322d9a8abf6SChuck Silvers /* 323d9a8abf6SChuck Silvers * Wrapper used during ENXIO cleanup to allocate empty buffers when 324d9a8abf6SChuck Silvers * the kernel is unable to read the real one. They are needed so that 325d9a8abf6SChuck Silvers * the soft updates code can use them to unwind its dependencies. 326d9a8abf6SChuck Silvers */ 327d9a8abf6SChuck Silvers int 328d9a8abf6SChuck Silvers ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno, 329d9a8abf6SChuck Silvers daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt, 330d9a8abf6SChuck Silvers struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *), 331d9a8abf6SChuck Silvers struct buf **bpp) 332d9a8abf6SChuck Silvers { 333d9a8abf6SChuck Silvers int error; 334d9a8abf6SChuck Silvers 335d9a8abf6SChuck Silvers flags |= GB_CVTENXIO; 336d9a8abf6SChuck Silvers error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt, 337d9a8abf6SChuck Silvers cred, flags, ckhashfunc, bpp); 338d9a8abf6SChuck Silvers if (error != 0 && ffs_fsfail_cleanup(ump, error)) { 339d9a8abf6SChuck Silvers error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp); 340d9a8abf6SChuck Silvers KASSERT(error == 0, ("getblkx failed")); 341d9a8abf6SChuck Silvers vfs_bio_bzero_buf(*bpp, 0, size); 342d9a8abf6SChuck Silvers } 343d9a8abf6SChuck Silvers return (error); 344d9a8abf6SChuck Silvers } 345d9a8abf6SChuck Silvers 3465e8c582aSPoul-Henning Kamp static int 347dfd233edSAttilio Rao ffs_mount(struct mount *mp) 348df8bae1dSRodney W. Grimes { 349f15ccf88SChuck Silvers struct vnode *devvp, *odevvp; 350dfd233edSAttilio Rao struct thread *td; 351f7a3729cSKevin Lo struct ufsmount *ump = NULL; 35205f4ff5dSPoul-Henning Kamp struct fs *fs; 3532030ee0eSKonstantin Belousov int error, flags; 3542030ee0eSKonstantin Belousov int error1 __diagused; 3555ffc99e2SKonstantin Belousov uint64_t mntorflags, saved_mnt_flag; 35615bc6b2bSEdward Tomasz Napierala accmode_t accmode; 3575e8c582aSPoul-Henning Kamp struct nameidata ndp; 35820a92a18SPoul-Henning Kamp char *fspec; 3597c7a6681SKonstantin Belousov bool mounted_softdep; 360df8bae1dSRodney W. Grimes 361dfd233edSAttilio Rao td = curthread; 36220a92a18SPoul-Henning Kamp if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 36320a92a18SPoul-Henning Kamp return (EINVAL); 364aa4d7a8aSPoul-Henning Kamp if (uma_inode == NULL) { 365aa4d7a8aSPoul-Henning Kamp uma_inode = uma_zcreate("FFS inode", 366aa4d7a8aSPoul-Henning Kamp sizeof(struct inode), NULL, NULL, NULL, NULL, 367aa4d7a8aSPoul-Henning Kamp UMA_ALIGN_PTR, 0); 368aa4d7a8aSPoul-Henning Kamp uma_ufs1 = uma_zcreate("FFS1 dinode", 369aa4d7a8aSPoul-Henning Kamp sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 370aa4d7a8aSPoul-Henning Kamp UMA_ALIGN_PTR, 0); 371aa4d7a8aSPoul-Henning Kamp uma_ufs2 = uma_zcreate("FFS2 dinode", 372aa4d7a8aSPoul-Henning Kamp sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 373aa4d7a8aSPoul-Henning Kamp UMA_ALIGN_PTR, 0); 3749d5a594fSMateusz Guzik VFS_SMR_ZONE_SET(uma_inode); 375aa4d7a8aSPoul-Henning Kamp } 3768d02a378SPawel Jakub Dawidek 3770b962648SAndriy Gapon vfs_deleteopt(mp->mnt_optnew, "groupquota"); 3780b962648SAndriy Gapon vfs_deleteopt(mp->mnt_optnew, "userquota"); 3790b962648SAndriy Gapon 38020a92a18SPoul-Henning Kamp fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 38151ac12abSPoul-Henning Kamp if (error) 382f2a2857bSKirk McKusick return (error); 3832b14f991SJulian Elischer 3845da56ddbSTor Egge mntorflags = 0; 385daba4da8SKirk McKusick if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0) 386daba4da8SKirk McKusick mntorflags |= MNT_UNTRUSTED; 387daba4da8SKirk McKusick 38826f59b64SCraig Rodrigues if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 3895da56ddbSTor Egge mntorflags |= MNT_ACLS; 39026f59b64SCraig Rodrigues 391fb77e0afSCraig Rodrigues if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 3925da56ddbSTor Egge mntorflags |= MNT_SNAPSHOT; 393fb77e0afSCraig Rodrigues /* 394fb77e0afSCraig Rodrigues * Once we have set the MNT_SNAPSHOT flag, do not 395fb77e0afSCraig Rodrigues * persist "snapshot" in the options list. 396fb77e0afSCraig Rodrigues */ 397fb77e0afSCraig Rodrigues vfs_deleteopt(mp->mnt_optnew, "snapshot"); 398fb77e0afSCraig Rodrigues vfs_deleteopt(mp->mnt_opt, "snapshot"); 399fb77e0afSCraig Rodrigues } 40026f59b64SCraig Rodrigues 4019340fc72SEdward Tomasz Napierala if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 4029340fc72SEdward Tomasz Napierala if (mntorflags & MNT_ACLS) { 403b60ee81eSKirk McKusick vfs_mount_error(mp, 404b60ee81eSKirk McKusick "\"acls\" and \"nfsv4acls\" options " 405b60ee81eSKirk McKusick "are mutually exclusive"); 4069340fc72SEdward Tomasz Napierala return (EINVAL); 4079340fc72SEdward Tomasz Napierala } 4089340fc72SEdward Tomasz Napierala mntorflags |= MNT_NFS4ACLS; 4099340fc72SEdward Tomasz Napierala } 4109340fc72SEdward Tomasz Napierala 4115da56ddbSTor Egge MNT_ILOCK(mp); 4129d5a594fSMateusz Guzik mp->mnt_kern_flag &= ~MNTK_FPLOOKUP; 41361996181SEdward Tomasz Napierala mp->mnt_flag |= mntorflags; 4145da56ddbSTor Egge MNT_IUNLOCK(mp); 4150455cc71SRobert Wing 416df8bae1dSRodney W. Grimes /* 417ab2dbd9bSRobert Wing * If this is a snapshot request, take the snapshot. 418ab2dbd9bSRobert Wing */ 41927d673fbSKirk McKusick if (mp->mnt_flag & MNT_SNAPSHOT) { 42027d673fbSKirk McKusick if ((mp->mnt_flag & MNT_UPDATE) == 0) 42127d673fbSKirk McKusick return (EINVAL); 422ab2dbd9bSRobert Wing return (ffs_snapshot(mp, fspec)); 42327d673fbSKirk McKusick } 424ab2dbd9bSRobert Wing 425ab2dbd9bSRobert Wing /* 4260455cc71SRobert Wing * Must not call namei() while owning busy ref. 4270455cc71SRobert Wing */ 4280455cc71SRobert Wing if (mp->mnt_flag & MNT_UPDATE) 4290455cc71SRobert Wing vfs_unbusy(mp); 4300455cc71SRobert Wing 4310455cc71SRobert Wing /* 4320455cc71SRobert Wing * Not an update, or updating the name: look up the name 4330455cc71SRobert Wing * and verify that it refers to a sensible disk device. 4340455cc71SRobert Wing */ 4350455cc71SRobert Wing NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec); 4360455cc71SRobert Wing error = namei(&ndp); 4370455cc71SRobert Wing if ((mp->mnt_flag & MNT_UPDATE) != 0) { 4380455cc71SRobert Wing /* 4390455cc71SRobert Wing * Unmount does not start if MNT_UPDATE is set. Mount 4400455cc71SRobert Wing * update busies mp before setting MNT_UPDATE. We 4410455cc71SRobert Wing * must be able to retain our busy ref successfully, 4420455cc71SRobert Wing * without sleep. 4430455cc71SRobert Wing */ 4440455cc71SRobert Wing error1 = vfs_busy(mp, MBF_NOWAIT); 4450455cc71SRobert Wing MPASS(error1 == 0); 4460455cc71SRobert Wing } 4470455cc71SRobert Wing if (error != 0) 4480455cc71SRobert Wing return (error); 449bb92cd7bSMateusz Guzik NDFREE_PNBUF(&ndp); 4500455cc71SRobert Wing if (!vn_isdisk_error(ndp.ni_vp, &error)) { 4510455cc71SRobert Wing vput(ndp.ni_vp); 4520455cc71SRobert Wing return (error); 4530455cc71SRobert Wing } 4540455cc71SRobert Wing 4550455cc71SRobert Wing /* 4560455cc71SRobert Wing * If mount by non-root, then verify that user has necessary 4570455cc71SRobert Wing * permissions on the device. 4580455cc71SRobert Wing */ 4590455cc71SRobert Wing accmode = VREAD; 4600455cc71SRobert Wing if ((mp->mnt_flag & MNT_RDONLY) == 0) 4610455cc71SRobert Wing accmode |= VWRITE; 4620455cc71SRobert Wing error = VOP_ACCESS(ndp.ni_vp, accmode, td->td_ucred, td); 4630455cc71SRobert Wing if (error) 4640455cc71SRobert Wing error = priv_check(td, PRIV_VFS_MOUNT_PERM); 4650455cc71SRobert Wing if (error) { 4660455cc71SRobert Wing vput(ndp.ni_vp); 4670455cc71SRobert Wing return (error); 4680455cc71SRobert Wing } 4690455cc71SRobert Wing 4700455cc71SRobert Wing /* 4710455cc71SRobert Wing * New mount 4720455cc71SRobert Wing * 4730455cc71SRobert Wing * We need the name for the mount point (also used for 4740455cc71SRobert Wing * "last mounted on") copied in. If an error occurs, 4750455cc71SRobert Wing * the mount point is discarded by the upper level code. 4760455cc71SRobert Wing * Note that vfs_mount_alloc() populates f_mntonname for us. 4770455cc71SRobert Wing */ 4780455cc71SRobert Wing if ((mp->mnt_flag & MNT_UPDATE) == 0) { 4790455cc71SRobert Wing if ((error = ffs_mountfs(ndp.ni_vp, mp, td)) != 0) { 4800455cc71SRobert Wing vrele(ndp.ni_vp); 4810455cc71SRobert Wing return (error); 4820455cc71SRobert Wing } 4830455cc71SRobert Wing } else { 4840455cc71SRobert Wing /* 4850455cc71SRobert Wing * When updating, check whether changing from read-only to 486df8bae1dSRodney W. Grimes * read/write; if there is no device name, that's all we do. 487df8bae1dSRodney W. Grimes */ 488df8bae1dSRodney W. Grimes ump = VFSTOUFS(mp); 489df8bae1dSRodney W. Grimes fs = ump->um_fs; 490f15ccf88SChuck Silvers odevvp = ump->um_odevvp; 49126cf9c3bSPeter Wemm devvp = ump->um_devvp; 4920455cc71SRobert Wing 4930455cc71SRobert Wing /* 4940455cc71SRobert Wing * If it's not the same vnode, or at least the same device 4950455cc71SRobert Wing * then it's not correct. 4960455cc71SRobert Wing */ 4970455cc71SRobert Wing if (ndp.ni_vp->v_rdev != ump->um_odevvp->v_rdev) 4980455cc71SRobert Wing error = EINVAL; /* needs translation */ 4990455cc71SRobert Wing vput(ndp.ni_vp); 5000455cc71SRobert Wing if (error) 5010455cc71SRobert Wing return (error); 50220a92a18SPoul-Henning Kamp if (fs->fs_ronly == 0 && 50320a92a18SPoul-Henning Kamp vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 5046fecb4e4SKonstantin Belousov /* 5056fecb4e4SKonstantin Belousov * Flush any dirty data and suspend filesystem. 5066fecb4e4SKonstantin Belousov */ 507f2a2857bSKirk McKusick if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 508f2a2857bSKirk McKusick return (error); 509895b3782SKonstantin Belousov error = vfs_write_suspend_umnt(mp); 510895b3782SKonstantin Belousov if (error != 0) 5119ab73fd1SKirk McKusick return (error); 5127c7a6681SKonstantin Belousov 5137c7a6681SKonstantin Belousov fs->fs_ronly = 1; 5147c7a6681SKonstantin Belousov if (MOUNTEDSOFTDEP(mp)) { 5157c7a6681SKonstantin Belousov MNT_ILOCK(mp); 5167c7a6681SKonstantin Belousov mp->mnt_flag &= ~MNT_SOFTDEP; 5177c7a6681SKonstantin Belousov MNT_IUNLOCK(mp); 5187c7a6681SKonstantin Belousov mounted_softdep = true; 5197c7a6681SKonstantin Belousov } else 5207c7a6681SKonstantin Belousov mounted_softdep = false; 5217c7a6681SKonstantin Belousov 522cd600596SKirk McKusick /* 523cd600596SKirk McKusick * Check for and optionally get rid of files open 524cd600596SKirk McKusick * for writing. 525cd600596SKirk McKusick */ 526df8bae1dSRodney W. Grimes flags = WRITECLOSE; 527df8bae1dSRodney W. Grimes if (mp->mnt_flag & MNT_FORCE) 528df8bae1dSRodney W. Grimes flags |= FORCECLOSE; 5297c7a6681SKonstantin Belousov if (mounted_softdep) { 530b40ce416SJulian Elischer error = softdep_flushfiles(mp, flags, td); 531b1897c19SJulian Elischer } else { 532b40ce416SJulian Elischer error = ffs_flushfiles(mp, flags, td); 533df8bae1dSRodney W. Grimes } 534f2a2857bSKirk McKusick if (error) { 5357c7a6681SKonstantin Belousov fs->fs_ronly = 0; 5367c7a6681SKonstantin Belousov if (mounted_softdep) { 5377c7a6681SKonstantin Belousov MNT_ILOCK(mp); 5387c7a6681SKonstantin Belousov mp->mnt_flag |= MNT_SOFTDEP; 5397c7a6681SKonstantin Belousov MNT_IUNLOCK(mp); 5407c7a6681SKonstantin Belousov } 541ddd6b3fcSKonstantin Belousov vfs_write_resume(mp, 0); 542f2a2857bSKirk McKusick return (error); 543b1897c19SJulian Elischer } 5447c7a6681SKonstantin Belousov 5459ccb939eSKirk McKusick if (fs->fs_pendingblocks != 0 || 5469ccb939eSKirk McKusick fs->fs_pendinginodes != 0) { 547b60ee81eSKirk McKusick printf("WARNING: %s Update error: blocks %jd " 548b60ee81eSKirk McKusick "files %d\n", fs->fs_fsmnt, 5491c85e6a3SKirk McKusick (intmax_t)fs->fs_pendingblocks, 5509ccb939eSKirk McKusick fs->fs_pendinginodes); 5519ccb939eSKirk McKusick fs->fs_pendingblocks = 0; 5529ccb939eSKirk McKusick fs->fs_pendinginodes = 0; 5539ccb939eSKirk McKusick } 5541a6a6610SKirk McKusick if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 555f2a2857bSKirk McKusick fs->fs_clean = 1; 556791dd2faSTor Egge if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 557f2a2857bSKirk McKusick fs->fs_ronly = 0; 558f2a2857bSKirk McKusick fs->fs_clean = 0; 5597c7a6681SKonstantin Belousov if (mounted_softdep) { 5607c7a6681SKonstantin Belousov MNT_ILOCK(mp); 5617c7a6681SKonstantin Belousov mp->mnt_flag |= MNT_SOFTDEP; 5627c7a6681SKonstantin Belousov MNT_IUNLOCK(mp); 5637c7a6681SKonstantin Belousov } 564ddd6b3fcSKonstantin Belousov vfs_write_resume(mp, 0); 565f2a2857bSKirk McKusick return (error); 5662b14f991SJulian Elischer } 5677c7a6681SKonstantin Belousov if (mounted_softdep) 5689420dc62SKirk McKusick softdep_unmount(mp); 56943920011SPoul-Henning Kamp g_topology_lock(); 5708795189cSKirk McKusick /* 5718795189cSKirk McKusick * Drop our write and exclusive access. 5728795189cSKirk McKusick */ 5738795189cSKirk McKusick g_access(ump->um_cp, 0, -1, -1); 57443920011SPoul-Henning Kamp g_topology_unlock(); 5755da56ddbSTor Egge MNT_ILOCK(mp); 57620a92a18SPoul-Henning Kamp mp->mnt_flag |= MNT_RDONLY; 5775da56ddbSTor Egge MNT_IUNLOCK(mp); 5786fecb4e4SKonstantin Belousov /* 5796fecb4e4SKonstantin Belousov * Allow the writers to note that filesystem 5806fecb4e4SKonstantin Belousov * is ro now. 5816fecb4e4SKonstantin Belousov */ 582ddd6b3fcSKonstantin Belousov vfs_write_resume(mp, 0); 583f2a2857bSKirk McKusick } 584f2a2857bSKirk McKusick if ((mp->mnt_flag & MNT_RELOAD) && 585c18a6c15SKirk McKusick (error = ffs_reload(mp, 0)) != 0) { 586f2a2857bSKirk McKusick return (error); 587c18a6c15SKirk McKusick } else { 588c18a6c15SKirk McKusick /* ffs_reload replaces the superblock structure */ 589c18a6c15SKirk McKusick fs = ump->um_fs; 590c18a6c15SKirk McKusick } 59120a92a18SPoul-Henning Kamp if (fs->fs_ronly && 59220a92a18SPoul-Henning Kamp !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 593c9b99213SBruce Evans /* 594c9b99213SBruce Evans * If upgrade to read-write by non-root, then verify 595c9b99213SBruce Evans * that user has necessary permissions on the device. 596c9b99213SBruce Evans */ 597f15ccf88SChuck Silvers vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY); 598f15ccf88SChuck Silvers error = VOP_ACCESS(odevvp, VREAD | VWRITE, 599acd3428bSRobert Watson td->td_ucred, td); 600acd3428bSRobert Watson if (error) 601acd3428bSRobert Watson error = priv_check(td, PRIV_VFS_MOUNT_PERM); 602f15ccf88SChuck Silvers VOP_UNLOCK(odevvp); 603acd3428bSRobert Watson if (error) { 604c9b99213SBruce Evans return (error); 605c9b99213SBruce Evans } 6067e58bfacSBruce Evans fs->fs_flags &= ~FS_UNCLEAN; 6070922cce6SBruce Evans if (fs->fs_clean == 0) { 6087e58bfacSBruce Evans fs->fs_flags |= FS_UNCLEAN; 609812b1d41SKirk McKusick if ((mp->mnt_flag & MNT_FORCE) || 610113db2ddSJeff Roberson ((fs->fs_flags & 611113db2ddSJeff Roberson (FS_SUJ | FS_NEEDSFSCK)) == 0 && 6121a6a6610SKirk McKusick (fs->fs_flags & FS_DOSOFTDEP))) { 613b60ee81eSKirk McKusick printf("WARNING: %s was not properly " 61408e5f519SKirk McKusick "dismounted\n", 61508e5f519SKirk McKusick mp->mnt_stat.f_mntonname); 6160922cce6SBruce Evans } else { 617b60ee81eSKirk McKusick vfs_mount_error(mp, 618b60ee81eSKirk McKusick "R/W mount of %s denied. %s.%s", 61908e5f519SKirk McKusick mp->mnt_stat.f_mntonname, 620b60ee81eSKirk McKusick "Filesystem is not clean - run fsck", 621b60ee81eSKirk McKusick (fs->fs_flags & FS_SUJ) == 0 ? "" : 622b60ee81eSKirk McKusick " Forced mount will invalidate" 623b60ee81eSKirk McKusick " journal contents"); 624f2a2857bSKirk McKusick return (EPERM); 6250922cce6SBruce Evans } 6260922cce6SBruce Evans } 62740c340aaSPoul-Henning Kamp g_topology_lock(); 62840c340aaSPoul-Henning Kamp /* 6298795189cSKirk McKusick * Request exclusive write access. 63040c340aaSPoul-Henning Kamp */ 63140c340aaSPoul-Henning Kamp error = g_access(ump->um_cp, 0, 1, 1); 63240c340aaSPoul-Henning Kamp g_topology_unlock(); 63340c340aaSPoul-Henning Kamp if (error) 63440c340aaSPoul-Henning Kamp return (error); 635f2a2857bSKirk McKusick if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 636f2a2857bSKirk McKusick return (error); 6375ffc99e2SKonstantin Belousov error = vfs_write_suspend_umnt(mp); 6385ffc99e2SKonstantin Belousov if (error != 0) 6395ffc99e2SKonstantin Belousov return (error); 640f2a2857bSKirk McKusick fs->fs_ronly = 0; 6415da56ddbSTor Egge MNT_ILOCK(mp); 6425ffc99e2SKonstantin Belousov saved_mnt_flag = MNT_RDONLY; 6435ffc99e2SKonstantin Belousov if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag & 6445ffc99e2SKonstantin Belousov MNT_ASYNC) != 0) 6455ffc99e2SKonstantin Belousov saved_mnt_flag |= MNT_ASYNC; 6465ffc99e2SKonstantin Belousov mp->mnt_flag &= ~saved_mnt_flag; 6475da56ddbSTor Egge MNT_IUNLOCK(mp); 648113db2ddSJeff Roberson fs->fs_mtime = time_second; 64926cf9c3bSPeter Wemm /* check to see if we need to start softdep */ 650f2a2857bSKirk McKusick if ((fs->fs_flags & FS_DOSOFTDEP) && 651a854ed98SJohn Baldwin (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 6525ffc99e2SKonstantin Belousov fs->fs_ronly = 1; 6535ffc99e2SKonstantin Belousov MNT_ILOCK(mp); 6545ffc99e2SKonstantin Belousov mp->mnt_flag |= saved_mnt_flag; 6555ffc99e2SKonstantin Belousov MNT_IUNLOCK(mp); 6565ffc99e2SKonstantin Belousov vfs_write_resume(mp, 0); 657f2a2857bSKirk McKusick return (error); 65826cf9c3bSPeter Wemm } 659113db2ddSJeff Roberson fs->fs_clean = 0; 660113db2ddSJeff Roberson if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 6615ffc99e2SKonstantin Belousov fs->fs_ronly = 1; 662f776c54cSKonstantin Belousov if ((fs->fs_flags & FS_DOSOFTDEP) != 0) 663f776c54cSKonstantin Belousov softdep_unmount(mp); 6645ffc99e2SKonstantin Belousov MNT_ILOCK(mp); 6655ffc99e2SKonstantin Belousov mp->mnt_flag |= saved_mnt_flag; 6665ffc99e2SKonstantin Belousov MNT_IUNLOCK(mp); 6675ffc99e2SKonstantin Belousov vfs_write_resume(mp, 0); 668113db2ddSJeff Roberson return (error); 669113db2ddSJeff Roberson } 670f2a2857bSKirk McKusick if (fs->fs_snapinum[0] != 0) 671f2a2857bSKirk McKusick ffs_snapshot_mount(mp); 6725ffc99e2SKonstantin Belousov vfs_write_resume(mp, 0); 6731469eec8SDavid Greenman } 674c11d2981SJulian Elischer /* 675c11d2981SJulian Elischer * Soft updates is incompatible with "async", 676c11d2981SJulian Elischer * so if we are doing softupdates stop the user 677c11d2981SJulian Elischer * from setting the async flag in an update. 678c11d2981SJulian Elischer * Softdep_mount() clears it in an initial mount 679c11d2981SJulian Elischer * or ro->rw remount. 680c11d2981SJulian Elischer */ 681fddf7baeSKirk McKusick if (MOUNTEDSOFTDEP(mp)) { 6825da56ddbSTor Egge /* XXX: Reset too late ? */ 6835da56ddbSTor Egge MNT_ILOCK(mp); 684c11d2981SJulian Elischer mp->mnt_flag &= ~MNT_ASYNC; 6855da56ddbSTor Egge MNT_IUNLOCK(mp); 6865da56ddbSTor Egge } 687df8bae1dSRodney W. Grimes /* 68839cfb239SPawel Jakub Dawidek * Keep MNT_ACLS flag if it is stored in superblock. 68939cfb239SPawel Jakub Dawidek */ 6905da56ddbSTor Egge if ((fs->fs_flags & FS_ACLS) != 0) { 6915da56ddbSTor Egge /* XXX: Set too late ? */ 6925da56ddbSTor Egge MNT_ILOCK(mp); 69339cfb239SPawel Jakub Dawidek mp->mnt_flag |= MNT_ACLS; 6945da56ddbSTor Egge MNT_IUNLOCK(mp); 6955da56ddbSTor Egge } 696cea90362SCraig Rodrigues 6979340fc72SEdward Tomasz Napierala if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 6989340fc72SEdward Tomasz Napierala /* XXX: Set too late ? */ 6999340fc72SEdward Tomasz Napierala MNT_ILOCK(mp); 7009340fc72SEdward Tomasz Napierala mp->mnt_flag |= MNT_NFS4ACLS; 7019340fc72SEdward Tomasz Napierala MNT_IUNLOCK(mp); 7029340fc72SEdward Tomasz Napierala } 7039340fc72SEdward Tomasz Napierala 704f2a2857bSKirk McKusick } 7059d5a594fSMateusz Guzik 7069d5a594fSMateusz Guzik MNT_ILOCK(mp); 7079d5a594fSMateusz Guzik /* 7089d5a594fSMateusz Guzik * This is racy versus lookup, see ufs_fplookup_vexec for details. 7099d5a594fSMateusz Guzik */ 7109d5a594fSMateusz Guzik if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0) 7119d5a594fSMateusz Guzik panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp); 71203337743SMateusz Guzik if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0) 7139d5a594fSMateusz Guzik mp->mnt_kern_flag |= MNTK_FPLOOKUP; 7149d5a594fSMateusz Guzik MNT_IUNLOCK(mp); 7159d5a594fSMateusz Guzik 71620a92a18SPoul-Henning Kamp vfs_mountedfrom(mp, fspec); 717f2a2857bSKirk McKusick return (0); 7182b14f991SJulian Elischer } 7192b14f991SJulian Elischer 720df8bae1dSRodney W. Grimes /* 72120a92a18SPoul-Henning Kamp * Compatibility with old mount system call. 72220a92a18SPoul-Henning Kamp */ 72320a92a18SPoul-Henning Kamp 72420a92a18SPoul-Henning Kamp static int 725cc672d35SKirk McKusick ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 72620a92a18SPoul-Henning Kamp { 72720a92a18SPoul-Henning Kamp struct ufs_args args; 72820a92a18SPoul-Henning Kamp int error; 72920a92a18SPoul-Henning Kamp 73020a92a18SPoul-Henning Kamp if (data == NULL) 73120a92a18SPoul-Henning Kamp return (EINVAL); 73220a92a18SPoul-Henning Kamp error = copyin(data, &args, sizeof args); 73320a92a18SPoul-Henning Kamp if (error) 73420a92a18SPoul-Henning Kamp return (error); 73520a92a18SPoul-Henning Kamp 73620a92a18SPoul-Henning Kamp ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 7371f7104d7SRick Macklem ma = mount_arg(ma, "export", &args.export, sizeof(args.export)); 73820a92a18SPoul-Henning Kamp error = kernel_mount(ma, flags); 73920a92a18SPoul-Henning Kamp 74020a92a18SPoul-Henning Kamp return (error); 74120a92a18SPoul-Henning Kamp } 74220a92a18SPoul-Henning Kamp 74320a92a18SPoul-Henning Kamp /* 744df8bae1dSRodney W. Grimes * Reload all incore data for a filesystem (used after running fsck on 7451848286aSEdward Tomasz Napierala * the root filesystem and finding things to fix). If the 'force' flag 7461848286aSEdward Tomasz Napierala * is 0, the filesystem must be mounted read-only. 747df8bae1dSRodney W. Grimes * 748df8bae1dSRodney W. Grimes * Things to do to update the mount: 749df8bae1dSRodney W. Grimes * 1) invalidate all cached meta-data. 750df8bae1dSRodney W. Grimes * 2) re-read superblock from disk. 751c18a6c15SKirk McKusick * 3) If requested, clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags 752c18a6c15SKirk McKusick * to allow secondary writers. 753c18a6c15SKirk McKusick * 4) invalidate all cached file data. 754c18a6c15SKirk McKusick * 5) re-read inode data for all active vnodes. 755df8bae1dSRodney W. Grimes */ 7561848286aSEdward Tomasz Napierala int 757440320b6SRobert Wing ffs_reload(struct mount *mp, int flags) 758df8bae1dSRodney W. Grimes { 75982be0a5aSTor Egge struct vnode *vp, *mvp, *devvp; 760df8bae1dSRodney W. Grimes struct inode *ip; 761df8bae1dSRodney W. Grimes struct buf *bp; 762996c772fSJohn Dyson struct fs *fs, *newfs; 7633ba649d7SJeff Roberson struct ufsmount *ump; 764c18a6c15SKirk McKusick int error; 765df8bae1dSRodney W. Grimes 7663ba649d7SJeff Roberson ump = VFSTOUFS(mp); 7671848286aSEdward Tomasz Napierala 7681848286aSEdward Tomasz Napierala MNT_ILOCK(mp); 769df426577SKonstantin Belousov if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 7701848286aSEdward Tomasz Napierala MNT_IUNLOCK(mp); 7711848286aSEdward Tomasz Napierala return (EINVAL); 7721848286aSEdward Tomasz Napierala } 7731848286aSEdward Tomasz Napierala MNT_IUNLOCK(mp); 7741848286aSEdward Tomasz Napierala 775df8bae1dSRodney W. Grimes /* 776df8bae1dSRodney W. Grimes * Step 1: invalidate all cached meta-data. 777df8bae1dSRodney W. Grimes */ 7782b14f991SJulian Elischer devvp = VFSTOUFS(mp)->um_devvp; 779cb05b60aSAttilio Rao vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 7800d7935fdSAttilio Rao if (vinvalbuf(devvp, 0, 0, 0) != 0) 781df8bae1dSRodney W. Grimes panic("ffs_reload: dirty1"); 782b249ce48SMateusz Guzik VOP_UNLOCK(devvp); 78395e5e988SJohn Dyson 784df8bae1dSRodney W. Grimes /* 785df8bae1dSRodney W. Grimes * Step 2: re-read superblock from disk. 786df8bae1dSRodney W. Grimes */ 787c18a6c15SKirk McKusick if ((error = ffs_sbget(devvp, &newfs, UFS_STDSB, 0, M_UFSMNT, 788c18a6c15SKirk McKusick ffs_use_bread)) != 0) 789df8bae1dSRodney W. Grimes return (error); 790996c772fSJohn Dyson /* 791c18a6c15SKirk McKusick * Replace our superblock with the new superblock. Preserve 792c18a6c15SKirk McKusick * our read-only status. 793996c772fSJohn Dyson */ 794c18a6c15SKirk McKusick fs = VFSTOUFS(mp)->um_fs; 7951848286aSEdward Tomasz Napierala newfs->fs_ronly = fs->fs_ronly; 796c18a6c15SKirk McKusick free(fs->fs_csp, M_UFSMNT); 797c18a6c15SKirk McKusick free(fs->fs_si, M_UFSMNT); 798c18a6c15SKirk McKusick free(fs, M_UFSMNT); 799c18a6c15SKirk McKusick fs = VFSTOUFS(mp)->um_fs = newfs; 800f1b4324bSChuck Silvers ump->um_bsize = fs->fs_bsize; 801f784da88SKonstantin Belousov ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 8023ba649d7SJeff Roberson UFS_LOCK(ump); 8039ccb939eSKirk McKusick if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 804b60ee81eSKirk McKusick printf("WARNING: %s: reload pending error: blocks %jd " 80508e5f519SKirk McKusick "files %d\n", mp->mnt_stat.f_mntonname, 80608e5f519SKirk McKusick (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); 8079ccb939eSKirk McKusick fs->fs_pendingblocks = 0; 8089ccb939eSKirk McKusick fs->fs_pendinginodes = 0; 8099ccb939eSKirk McKusick } 8103ba649d7SJeff Roberson UFS_UNLOCK(ump); 811df8bae1dSRodney W. Grimes /* 812c18a6c15SKirk McKusick * Step 3: If requested, clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags 813c18a6c15SKirk McKusick * to allow secondary writers. 814df8bae1dSRodney W. Grimes */ 815df426577SKonstantin Belousov if ((flags & FFSR_UNSUSPEND) != 0) { 816df426577SKonstantin Belousov MNT_ILOCK(mp); 817df426577SKonstantin Belousov mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 818df426577SKonstantin Belousov wakeup(&mp->mnt_flag); 819df426577SKonstantin Belousov MNT_IUNLOCK(mp); 820df426577SKonstantin Belousov } 821996c772fSJohn Dyson 822df8bae1dSRodney W. Grimes loop: 82371469bb3SKirk McKusick MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 824df8bae1dSRodney W. Grimes /* 8251848286aSEdward Tomasz Napierala * Skip syncer vnode. 8261848286aSEdward Tomasz Napierala */ 8271848286aSEdward Tomasz Napierala if (vp->v_type == VNON) { 8281848286aSEdward Tomasz Napierala VI_UNLOCK(vp); 8291848286aSEdward Tomasz Napierala continue; 8301848286aSEdward Tomasz Napierala } 8311848286aSEdward Tomasz Napierala /* 832fe68abe2SJeff Roberson * Step 4: invalidate all cached file data. 833df8bae1dSRodney W. Grimes */ 834a92a971bSMateusz Guzik if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 83571469bb3SKirk McKusick MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 836df8bae1dSRodney W. Grimes goto loop; 837996c772fSJohn Dyson } 8380d7935fdSAttilio Rao if (vinvalbuf(vp, 0, 0, 0)) 839df8bae1dSRodney W. Grimes panic("ffs_reload: dirty2"); 840df8bae1dSRodney W. Grimes /* 841fe68abe2SJeff Roberson * Step 5: re-read inode data for all active vnodes. 842df8bae1dSRodney W. Grimes */ 843df8bae1dSRodney W. Grimes ip = VTOI(vp); 844c9671602SPoul-Henning Kamp error = 845df8bae1dSRodney W. Grimes bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 846c9671602SPoul-Henning Kamp (int)fs->fs_bsize, NOCRED, &bp); 847c9671602SPoul-Henning Kamp if (error) { 8489fc5d538SKirk McKusick vput(vp); 84971469bb3SKirk McKusick MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 850df8bae1dSRodney W. Grimes return (error); 851df8bae1dSRodney W. Grimes } 8529fc5d538SKirk McKusick if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) { 8539fc5d538SKirk McKusick brelse(bp); 8549fc5d538SKirk McKusick vput(vp); 8559fc5d538SKirk McKusick MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 8569fc5d538SKirk McKusick return (error); 8579fc5d538SKirk McKusick } 858b1897c19SJulian Elischer ip->i_effnlink = ip->i_nlink; 859df8bae1dSRodney W. Grimes brelse(bp); 8609fc5d538SKirk McKusick vput(vp); 861df8bae1dSRodney W. Grimes } 862df8bae1dSRodney W. Grimes return (0); 863df8bae1dSRodney W. Grimes } 864df8bae1dSRodney W. Grimes 8651c85e6a3SKirk McKusick /* 866df8bae1dSRodney W. Grimes * Common code for mount and mountroot 867df8bae1dSRodney W. Grimes */ 868975512a9SPoul-Henning Kamp static int 869064e6b43SKirk McKusick ffs_mountfs(struct vnode *odevvp, struct mount *mp, struct thread *td) 870df8bae1dSRodney W. Grimes { 87105f4ff5dSPoul-Henning Kamp struct ufsmount *ump; 87205f4ff5dSPoul-Henning Kamp struct fs *fs; 87389c9c53dSPoul-Henning Kamp struct cdev *dev; 874dffce215SKirk McKusick int error, i, len, ronly; 875996c772fSJohn Dyson struct ucred *cred; 87643920011SPoul-Henning Kamp struct g_consumer *cp; 8773bbd6d8aSJeff Roberson struct mount *nmp; 878f15ccf88SChuck Silvers struct vnode *devvp; 879e6886616SKirk McKusick int candelete, canspeedup; 880df8bae1dSRodney W. Grimes 881dffce215SKirk McKusick fs = NULL; 882ee445a69SJohn Baldwin ump = NULL; 883a854ed98SJohn Baldwin cred = td ? td->td_ucred : NOCRED; 88443920011SPoul-Henning Kamp ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 885ee445a69SJohn Baldwin 886f15ccf88SChuck Silvers devvp = mntfs_allocvp(mp, odevvp); 887c70b3cd2SKonstantin Belousov KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 888ee445a69SJohn Baldwin dev = devvp->v_rdev; 88979a5c790SKirk McKusick KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data")); 890c70b3cd2SKonstantin Belousov if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 891c70b3cd2SKonstantin Belousov (uintptr_t)mp) == 0) { 892f15ccf88SChuck Silvers mntfs_freevp(devvp); 893c70b3cd2SKonstantin Belousov return (EBUSY); 894c70b3cd2SKonstantin Belousov } 89543920011SPoul-Henning Kamp g_topology_lock(); 89643920011SPoul-Henning Kamp error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 89743920011SPoul-Henning Kamp g_topology_unlock(); 898c70b3cd2SKonstantin Belousov if (error != 0) { 899c70b3cd2SKonstantin Belousov atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 900f15ccf88SChuck Silvers mntfs_freevp(devvp); 901c70b3cd2SKonstantin Belousov return (error); 902c70b3cd2SKonstantin Belousov } 903c70b3cd2SKonstantin Belousov dev_ref(dev); 904c70b3cd2SKonstantin Belousov devvp->v_bufobj.bo_ops = &ffs_ops; 905f15ccf88SChuck Silvers BO_LOCK(&odevvp->v_bufobj); 906f15ccf88SChuck Silvers odevvp->v_bufobj.bo_flag |= BO_NOBUFS; 907f15ccf88SChuck Silvers BO_UNLOCK(&odevvp->v_bufobj); 90825809a01SKonstantin Belousov VOP_UNLOCK(devvp); 909c70b3cd2SKonstantin Belousov if (dev->si_iosize_max != 0) 910c70b3cd2SKonstantin Belousov mp->mnt_iosize_max = dev->si_iosize_max; 911cd853791SKonstantin Belousov if (mp->mnt_iosize_max > maxphys) 912cd853791SKonstantin Belousov mp->mnt_iosize_max = maxphys; 91371ac2d7cSCraig Rodrigues if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 91471ac2d7cSCraig Rodrigues error = EINVAL; 91571ac2d7cSCraig Rodrigues vfs_mount_error(mp, 91671ac2d7cSCraig Rodrigues "Invalid sectorsize %d for superblock size %d", 91771ac2d7cSCraig Rodrigues cp->provider->sectorsize, SBLOCKSIZE); 91871ac2d7cSCraig Rodrigues goto out; 91971ac2d7cSCraig Rodrigues } 920dffce215SKirk McKusick /* fetch the superblock and summary information */ 9219e9dcac9SKirk McKusick if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0) 922e6886616SKirk McKusick error = ffs_sbsearch(devvp, &fs, 0, M_UFSMNT, ffs_use_bread); 923e6886616SKirk McKusick else 924e6886616SKirk McKusick error = ffs_sbget(devvp, &fs, UFS_STDSB, 0, M_UFSMNT, 925e6886616SKirk McKusick ffs_use_bread); 926e6886616SKirk McKusick if (error != 0) 927df8bae1dSRodney W. Grimes goto out; 9280922cce6SBruce Evans fs->fs_flags &= ~FS_UNCLEAN; 9290922cce6SBruce Evans if (fs->fs_clean == 0) { 9300922cce6SBruce Evans fs->fs_flags |= FS_UNCLEAN; 931812b1d41SKirk McKusick if (ronly || (mp->mnt_flag & MNT_FORCE) || 932113db2ddSJeff Roberson ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 9331a6a6610SKirk McKusick (fs->fs_flags & FS_DOSOFTDEP))) { 93459b3a4ebSDavid E. O'Brien printf("WARNING: %s was not properly dismounted\n", 93508e5f519SKirk McKusick mp->mnt_stat.f_mntonname); 9361469eec8SDavid Greenman } else { 93708e5f519SKirk McKusick vfs_mount_error(mp, "R/W mount on %s denied. " 93808e5f519SKirk McKusick "Filesystem is not clean - run fsck.%s", 93908e5f519SKirk McKusick mp->mnt_stat.f_mntonname, 940b60ee81eSKirk McKusick (fs->fs_flags & FS_SUJ) == 0 ? "" : 941b60ee81eSKirk McKusick " Forced mount will invalidate journal contents"); 9421469eec8SDavid Greenman error = EPERM; 9431469eec8SDavid Greenman goto out; 9441469eec8SDavid Greenman } 9451c85e6a3SKirk McKusick if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 9461c85e6a3SKirk McKusick (mp->mnt_flag & MNT_FORCE)) { 947b60ee81eSKirk McKusick printf("WARNING: %s: lost blocks %jd files %d\n", 94808e5f519SKirk McKusick mp->mnt_stat.f_mntonname, 94908e5f519SKirk McKusick (intmax_t)fs->fs_pendingblocks, 9501c85e6a3SKirk McKusick fs->fs_pendinginodes); 9519ccb939eSKirk McKusick fs->fs_pendingblocks = 0; 9529ccb939eSKirk McKusick fs->fs_pendinginodes = 0; 9539ccb939eSKirk McKusick } 9549ccb939eSKirk McKusick } 9559ccb939eSKirk McKusick if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 956b60ee81eSKirk McKusick printf("WARNING: %s: mount pending error: blocks %jd " 95708e5f519SKirk McKusick "files %d\n", mp->mnt_stat.f_mntonname, 95808e5f519SKirk McKusick (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); 9599ccb939eSKirk McKusick fs->fs_pendingblocks = 0; 9609ccb939eSKirk McKusick fs->fs_pendinginodes = 0; 9611469eec8SDavid Greenman } 9621a60c7fcSPawel Jakub Dawidek if ((fs->fs_flags & FS_GJOURNAL) != 0) { 9631a60c7fcSPawel Jakub Dawidek #ifdef UFS_GJOURNAL 9641a60c7fcSPawel Jakub Dawidek /* 9651a60c7fcSPawel Jakub Dawidek * Get journal provider name. 9661a60c7fcSPawel Jakub Dawidek */ 967ad544726SKirk McKusick len = 1024; 968831b1ff7SKirk McKusick mp->mnt_gjprovider = malloc((uint64_t)len, M_UFSMNT, M_WAITOK); 969ad544726SKirk McKusick if (g_io_getattr("GJOURNAL::provider", cp, &len, 9701a60c7fcSPawel Jakub Dawidek mp->mnt_gjprovider) == 0) { 971ad544726SKirk McKusick mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 9721a60c7fcSPawel Jakub Dawidek M_UFSMNT, M_WAITOK); 9731a60c7fcSPawel Jakub Dawidek MNT_ILOCK(mp); 9741a60c7fcSPawel Jakub Dawidek mp->mnt_flag |= MNT_GJOURNAL; 9751a60c7fcSPawel Jakub Dawidek MNT_IUNLOCK(mp); 9761a60c7fcSPawel Jakub Dawidek } else { 9771fbcaa13SKirk McKusick if ((mp->mnt_flag & MNT_RDONLY) == 0) 978b60ee81eSKirk McKusick printf("WARNING: %s: GJOURNAL flag on fs " 979b60ee81eSKirk McKusick "but no gjournal provider below\n", 9801a60c7fcSPawel Jakub Dawidek mp->mnt_stat.f_mntonname); 9811a60c7fcSPawel Jakub Dawidek free(mp->mnt_gjprovider, M_UFSMNT); 9821a60c7fcSPawel Jakub Dawidek mp->mnt_gjprovider = NULL; 9831a60c7fcSPawel Jakub Dawidek } 9841a60c7fcSPawel Jakub Dawidek #else 985b60ee81eSKirk McKusick printf("WARNING: %s: GJOURNAL flag on fs but no " 986b60ee81eSKirk McKusick "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 9871a60c7fcSPawel Jakub Dawidek #endif 9881a60c7fcSPawel Jakub Dawidek } else { 9891a60c7fcSPawel Jakub Dawidek mp->mnt_gjprovider = NULL; 9901a60c7fcSPawel Jakub Dawidek } 991a163d034SWarner Losh ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 99243920011SPoul-Henning Kamp ump->um_cp = cp; 99343920011SPoul-Henning Kamp ump->um_bo = &devvp->v_bufobj; 994dffce215SKirk McKusick ump->um_fs = fs; 9951c85e6a3SKirk McKusick if (fs->fs_magic == FS_UFS1_MAGIC) { 9961c85e6a3SKirk McKusick ump->um_fstype = UFS1; 9971c85e6a3SKirk McKusick ump->um_balloc = ffs_balloc_ufs1; 9981c85e6a3SKirk McKusick } else { 9991c85e6a3SKirk McKusick ump->um_fstype = UFS2; 10001c85e6a3SKirk McKusick ump->um_balloc = ffs_balloc_ufs2; 10011c85e6a3SKirk McKusick } 1002cec0f20cSPoul-Henning Kamp ump->um_blkatoff = ffs_blkatoff; 1003cec0f20cSPoul-Henning Kamp ump->um_truncate = ffs_truncate; 1004987f5696SPoul-Henning Kamp ump->um_update = ffs_update; 1005cec0f20cSPoul-Henning Kamp ump->um_valloc = ffs_valloc; 1006cec0f20cSPoul-Henning Kamp ump->um_vfree = ffs_vfree; 1007975512a9SPoul-Henning Kamp ump->um_ifree = ffs_ifree; 100890446e36SKonstantin Belousov ump->um_rdonly = ffs_rdonly; 100916b1f68dSKonstantin Belousov ump->um_snapgone = ffs_snapgone; 1010fdf34aa3SKirk McKusick if ((mp->mnt_flag & MNT_UNTRUSTED) != 0) 1011fdf34aa3SKirk McKusick ump->um_check_blkno = ffs_check_blkno; 1012fdf34aa3SKirk McKusick else 1013fdf34aa3SKirk McKusick ump->um_check_blkno = NULL; 10143ba649d7SJeff Roberson mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 10158df4bc48SKonstantin Belousov sx_init(&ump->um_checkpath_lock, "uchpth"); 1016df8bae1dSRodney W. Grimes fs->fs_ronly = ronly; 1017143a5346SIan Dowse fs->fs_active = NULL; 101877465d93SAlfred Perlstein mp->mnt_data = ump; 101968de329eSPoul-Henning Kamp mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 10208f89943eSGuido van Rooij mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 10213bbd6d8aSJeff Roberson nmp = NULL; 102268de329eSPoul-Henning Kamp if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 10233bbd6d8aSJeff Roberson (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 10243bbd6d8aSJeff Roberson if (nmp) 10253bbd6d8aSJeff Roberson vfs_rel(nmp); 102668de329eSPoul-Henning Kamp vfs_getnewfsid(mp); 10273bbd6d8aSJeff Roberson } 1028f1b4324bSChuck Silvers ump->um_bsize = fs->fs_bsize; 1029f784da88SKonstantin Belousov ump->um_maxsymlinklen = fs->fs_maxsymlinklen; 10305da56ddbSTor Egge MNT_ILOCK(mp); 1031cc9d8990SPeter Wemm mp->mnt_flag |= MNT_LOCAL; 10325da56ddbSTor Egge MNT_IUNLOCK(mp); 10335da56ddbSTor Egge if ((fs->fs_flags & FS_MULTILABEL) != 0) { 10347b3f1bbdSTom Rhodes #ifdef MAC 10355da56ddbSTor Egge MNT_ILOCK(mp); 103680830407SRobert Watson mp->mnt_flag |= MNT_MULTILABEL; 10375da56ddbSTor Egge MNT_IUNLOCK(mp); 10387b3f1bbdSTom Rhodes #else 1039b60ee81eSKirk McKusick printf("WARNING: %s: multilabel flag on fs but " 1040b60ee81eSKirk McKusick "no MAC support\n", mp->mnt_stat.f_mntonname); 10417b3f1bbdSTom Rhodes #endif 10425da56ddbSTor Egge } 10435da56ddbSTor Egge if ((fs->fs_flags & FS_ACLS) != 0) { 10447b3f1bbdSTom Rhodes #ifdef UFS_ACL 10455da56ddbSTor Egge MNT_ILOCK(mp); 10469340fc72SEdward Tomasz Napierala 10479340fc72SEdward Tomasz Napierala if (mp->mnt_flag & MNT_NFS4ACLS) 1048b60ee81eSKirk McKusick printf("WARNING: %s: ACLs flag on fs conflicts with " 1049b60ee81eSKirk McKusick "\"nfsv4acls\" mount option; option ignored\n", 1050b60ee81eSKirk McKusick mp->mnt_stat.f_mntonname); 10519340fc72SEdward Tomasz Napierala mp->mnt_flag &= ~MNT_NFS4ACLS; 105280830407SRobert Watson mp->mnt_flag |= MNT_ACLS; 10539340fc72SEdward Tomasz Napierala 10545da56ddbSTor Egge MNT_IUNLOCK(mp); 10557b3f1bbdSTom Rhodes #else 105659b3a4ebSDavid E. O'Brien printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 1057946478fcSPawel Jakub Dawidek mp->mnt_stat.f_mntonname); 10587b3f1bbdSTom Rhodes #endif 10595da56ddbSTor Egge } 10609340fc72SEdward Tomasz Napierala if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 10619340fc72SEdward Tomasz Napierala #ifdef UFS_ACL 10629340fc72SEdward Tomasz Napierala MNT_ILOCK(mp); 10639340fc72SEdward Tomasz Napierala 10649340fc72SEdward Tomasz Napierala if (mp->mnt_flag & MNT_ACLS) 1065b60ee81eSKirk McKusick printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 1066b60ee81eSKirk McKusick "with \"acls\" mount option; option ignored\n", 1067b60ee81eSKirk McKusick mp->mnt_stat.f_mntonname); 10689340fc72SEdward Tomasz Napierala mp->mnt_flag &= ~MNT_ACLS; 10699340fc72SEdward Tomasz Napierala mp->mnt_flag |= MNT_NFS4ACLS; 10709340fc72SEdward Tomasz Napierala 10719340fc72SEdward Tomasz Napierala MNT_IUNLOCK(mp); 10729340fc72SEdward Tomasz Napierala #else 1073b60ee81eSKirk McKusick printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 1074b60ee81eSKirk McKusick "ACLs support\n", mp->mnt_stat.f_mntonname); 10759340fc72SEdward Tomasz Napierala #endif 10769340fc72SEdward Tomasz Napierala } 10778c2a54deSKonstantin Belousov if ((fs->fs_flags & FS_TRIM) != 0) { 1078ad544726SKirk McKusick len = sizeof(int); 1079ad544726SKirk McKusick if (g_io_getattr("GEOM::candelete", cp, &len, 1080ab0bcb60SKirk McKusick &candelete) == 0) { 1081ab0bcb60SKirk McKusick if (candelete) 1082ab0bcb60SKirk McKusick ump->um_flags |= UM_CANDELETE; 1083ab0bcb60SKirk McKusick else 1084b60ee81eSKirk McKusick printf("WARNING: %s: TRIM flag on fs but disk " 1085b60ee81eSKirk McKusick "does not support TRIM\n", 10868c2a54deSKonstantin Belousov mp->mnt_stat.f_mntonname); 10878c2a54deSKonstantin Belousov } else { 1088b60ee81eSKirk McKusick printf("WARNING: %s: TRIM flag on fs but disk does " 1089b60ee81eSKirk McKusick "not confirm that it supports TRIM\n", 10908c2a54deSKonstantin Belousov mp->mnt_stat.f_mntonname); 10918c2a54deSKonstantin Belousov } 1092ab0bcb60SKirk McKusick if (((ump->um_flags) & UM_CANDELETE) != 0) { 1093c79dff0fSKonstantin Belousov ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 1094c79dff0fSKonstantin Belousov taskqueue_thread_enqueue, &ump->um_trim_tq); 1095c79dff0fSKonstantin Belousov taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 1096c79dff0fSKonstantin Belousov "%s trim", mp->mnt_stat.f_mntonname); 10977e038bc2SKirk McKusick ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM, 10987e038bc2SKirk McKusick &ump->um_trimlisthashsize); 1099c79dff0fSKonstantin Belousov } 11008c2a54deSKonstantin Belousov } 11019340fc72SEdward Tomasz Napierala 110213532153SScott Long len = sizeof(int); 110313532153SScott Long if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) { 110413532153SScott Long if (canspeedup) 110513532153SScott Long ump->um_flags |= UM_CANSPEEDUP; 110613532153SScott Long } 110713532153SScott Long 1108df8bae1dSRodney W. Grimes ump->um_mountp = mp; 1109df8bae1dSRodney W. Grimes ump->um_dev = dev; 1110df8bae1dSRodney W. Grimes ump->um_devvp = devvp; 1111f15ccf88SChuck Silvers ump->um_odevvp = odevvp; 1112df8bae1dSRodney W. Grimes ump->um_nindir = fs->fs_nindir; 1113df8bae1dSRodney W. Grimes ump->um_bptrtodb = fs->fs_fsbtodb; 1114df8bae1dSRodney W. Grimes ump->um_seqinc = fs->fs_frag; 1115df8bae1dSRodney W. Grimes for (i = 0; i < MAXQUOTAS; i++) 1116df8bae1dSRodney W. Grimes ump->um_quotas[i] = NULLVP; 1117516081f2SRobert Watson #ifdef UFS_EXTATTR 1118a64ed089SRobert Watson ufs_extattr_uepm_init(&ump->um_extattr); 1119a64ed089SRobert Watson #endif 11202b14f991SJulian Elischer /* 11212b14f991SJulian Elischer * Set FS local "last mounted on" information (NULL pad) 11222b14f991SJulian Elischer */ 112393373c42SSuleiman Souhlal bzero(fs->fs_fsmnt, MAXMNTLEN); 112493373c42SSuleiman Souhlal strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1125113db2ddSJeff Roberson mp->mnt_stat.f_iosize = fs->fs_bsize; 11262b14f991SJulian Elischer 11272b14f991SJulian Elischer if (mp->mnt_flag & MNT_ROOTFS) { 11282b14f991SJulian Elischer /* 11292b14f991SJulian Elischer * Root mount; update timestamp in mount structure. 11302b14f991SJulian Elischer * this will be used by the common root mount code 11312b14f991SJulian Elischer * to update the system clock. 11322b14f991SJulian Elischer */ 11332b14f991SJulian Elischer mp->mnt_time = fs->fs_time; 11342b14f991SJulian Elischer } 1135996c772fSJohn Dyson 1136996c772fSJohn Dyson if (ronly == 0) { 1137113db2ddSJeff Roberson fs->fs_mtime = time_second; 1138b1897c19SJulian Elischer if ((fs->fs_flags & FS_DOSOFTDEP) && 1139b1897c19SJulian Elischer (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1140fddd463dSKonstantin Belousov ffs_flushfiles(mp, FORCECLOSE, td); 1141b1897c19SJulian Elischer goto out; 1142b1897c19SJulian Elischer } 1143f2a2857bSKirk McKusick if (fs->fs_snapinum[0] != 0) 1144f2a2857bSKirk McKusick ffs_snapshot_mount(mp); 1145cf60e8e4SKirk McKusick fs->fs_fmod = 1; 1146996c772fSJohn Dyson fs->fs_clean = 0; 1147791dd2faSTor Egge (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1148996c772fSJohn Dyson } 1149d8d3d415SPoul-Henning Kamp /* 115098cbffd7SKonstantin Belousov * Initialize filesystem state information in mount struct. 1151d8d3d415SPoul-Henning Kamp */ 11526cf7bc60SRobert Watson MNT_ILOCK(mp); 1153bc2258daSAttilio Rao mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 11545f34e93cSMark Johnston MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 11556cf7bc60SRobert Watson MNT_IUNLOCK(mp); 1156516081f2SRobert Watson #ifdef UFS_EXTATTR 1157516081f2SRobert Watson #ifdef UFS_EXTATTR_AUTOSTART 11589de54ba5SRobert Watson /* 11599de54ba5SRobert Watson * 1160f5161237SRobert Watson * Auto-starting does the following: 11619de54ba5SRobert Watson * - check for /.attribute in the fs, and extattr_start if so 11629de54ba5SRobert Watson * - for each file in .attribute, enable that file with 11639de54ba5SRobert Watson * an attribute of the same name. 11649de54ba5SRobert Watson * Not clear how to report errors -- probably eat them. 11659de54ba5SRobert Watson * This would all happen while the filesystem was busy/not 11669de54ba5SRobert Watson * available, so would effectively be "atomic". 11679de54ba5SRobert Watson */ 1168b40ce416SJulian Elischer (void) ufs_extattr_autostart(mp, td); 1169516081f2SRobert Watson #endif /* !UFS_EXTATTR_AUTOSTART */ 1170516081f2SRobert Watson #endif /* !UFS_EXTATTR */ 1171df8bae1dSRodney W. Grimes return (0); 1172df8bae1dSRodney W. Grimes out: 1173dffce215SKirk McKusick if (fs != NULL) { 1174dffce215SKirk McKusick free(fs->fs_csp, M_UFSMNT); 117534816cb9SKirk McKusick free(fs->fs_si, M_UFSMNT); 1176dffce215SKirk McKusick free(fs, M_UFSMNT); 1177dffce215SKirk McKusick } 117843920011SPoul-Henning Kamp if (cp != NULL) { 117943920011SPoul-Henning Kamp g_topology_lock(); 11800d7935fdSAttilio Rao g_vfs_close(cp); 118143920011SPoul-Henning Kamp g_topology_unlock(); 118243920011SPoul-Henning Kamp } 11838df4bc48SKonstantin Belousov if (ump != NULL) { 11843ba649d7SJeff Roberson mtx_destroy(UFS_MTX(ump)); 11858df4bc48SKonstantin Belousov sx_destroy(&ump->um_checkpath_lock); 11861a60c7fcSPawel Jakub Dawidek if (mp->mnt_gjprovider != NULL) { 11871a60c7fcSPawel Jakub Dawidek free(mp->mnt_gjprovider, M_UFSMNT); 11881a60c7fcSPawel Jakub Dawidek mp->mnt_gjprovider = NULL; 11891a60c7fcSPawel Jakub Dawidek } 11902af934ccSKonstantin Belousov MPASS(ump->um_softdep == NULL); 1191df8bae1dSRodney W. Grimes free(ump, M_UFSMNT); 119277465d93SAlfred Perlstein mp->mnt_data = NULL; 1193df8bae1dSRodney W. Grimes } 1194f15ccf88SChuck Silvers BO_LOCK(&odevvp->v_bufobj); 1195f15ccf88SChuck Silvers odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1196f15ccf88SChuck Silvers BO_UNLOCK(&odevvp->v_bufobj); 1197c70b3cd2SKonstantin Belousov atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 119825809a01SKonstantin Belousov vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1199f15ccf88SChuck Silvers mntfs_freevp(devvp); 120049c4791cSEdward Tomasz Napierala dev_rel(dev); 1201df8bae1dSRodney W. Grimes return (error); 1202df8bae1dSRodney W. Grimes } 1203df8bae1dSRodney W. Grimes 1204dffce215SKirk McKusick /* 1205dffce215SKirk McKusick * A read function for use by filesystem-layer routines. 1206dffce215SKirk McKusick */ 1207dffce215SKirk McKusick static int 1208dffce215SKirk McKusick ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) 1209dffce215SKirk McKusick { 1210dffce215SKirk McKusick struct buf *bp; 1211dffce215SKirk McKusick int error; 1212dffce215SKirk McKusick 1213efbf3964SKirk McKusick KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); 1214dffce215SKirk McKusick *bufp = malloc(size, M_UFSMNT, M_WAITOK); 1215dffce215SKirk McKusick if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, 1216efbf3964SKirk McKusick &bp)) != 0) 1217dffce215SKirk McKusick return (error); 1218dffce215SKirk McKusick bcopy(bp->b_data, *bufp, size); 1219dffce215SKirk McKusick bp->b_flags |= B_INVAL | B_NOCACHE; 1220dffce215SKirk McKusick brelse(bp); 1221dffce215SKirk McKusick return (0); 1222dffce215SKirk McKusick } 1223dffce215SKirk McKusick 1224df8bae1dSRodney W. Grimes /* 1225df8bae1dSRodney W. Grimes * unmount system call 1226df8bae1dSRodney W. Grimes */ 1227adf41577SPoul-Henning Kamp static int 1228064e6b43SKirk McKusick ffs_unmount(struct mount *mp, int mntflags) 1229df8bae1dSRodney W. Grimes { 1230dfd233edSAttilio Rao struct thread *td; 123105f4ff5dSPoul-Henning Kamp struct ufsmount *ump = VFSTOUFS(mp); 123205f4ff5dSPoul-Henning Kamp struct fs *fs; 12336fecb4e4SKonstantin Belousov int error, flags, susp; 1234df86ccf6SKonstantin Belousov #ifdef UFS_EXTATTR 1235df86ccf6SKonstantin Belousov int e_restart; 1236df86ccf6SKonstantin Belousov #endif 1237df8bae1dSRodney W. Grimes 1238df8bae1dSRodney W. Grimes flags = 0; 1239dfd233edSAttilio Rao td = curthread; 12406fecb4e4SKonstantin Belousov fs = ump->um_fs; 124171f26429SKonstantin Belousov if (mntflags & MNT_FORCE) 1242df8bae1dSRodney W. Grimes flags |= FORCECLOSE; 12434ce90426SKonstantin Belousov susp = fs->fs_ronly == 0; 1244516081f2SRobert Watson #ifdef UFS_EXTATTR 1245b40ce416SJulian Elischer if ((error = ufs_extattr_stop(mp, td))) { 1246b2b0497aSRobert Watson if (error != EOPNOTSUPP) 1247b60ee81eSKirk McKusick printf("WARNING: unmount %s: ufs_extattr_stop " 1248b60ee81eSKirk McKusick "returned errno %d\n", mp->mnt_stat.f_mntonname, 1249b2b0497aSRobert Watson error); 1250df86ccf6SKonstantin Belousov e_restart = 0; 12517df97b61SRobert Watson } else { 12529de54ba5SRobert Watson ufs_extattr_uepm_destroy(&ump->um_extattr); 1253df86ccf6SKonstantin Belousov e_restart = 1; 12547df97b61SRobert Watson } 1255a64ed089SRobert Watson #endif 12566fecb4e4SKonstantin Belousov if (susp) { 1257895b3782SKonstantin Belousov error = vfs_write_suspend_umnt(mp); 1258895b3782SKonstantin Belousov if (error != 0) 1259895b3782SKonstantin Belousov goto fail1; 12606fecb4e4SKonstantin Belousov } 1261fddf7baeSKirk McKusick if (MOUNTEDSOFTDEP(mp)) 12623c140b2dSEdward Tomasz Napierala error = softdep_flushfiles(mp, flags, td); 12633c140b2dSEdward Tomasz Napierala else 12643c140b2dSEdward Tomasz Napierala error = ffs_flushfiles(mp, flags, td); 1265d79ff54bSChuck Silvers if (error != 0 && !ffs_fsfail_cleanup(ump, error)) 12666fecb4e4SKonstantin Belousov goto fail; 12673c140b2dSEdward Tomasz Napierala 12683ba649d7SJeff Roberson UFS_LOCK(ump); 12699ccb939eSKirk McKusick if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1270b60ee81eSKirk McKusick printf("WARNING: unmount %s: pending error: blocks %jd " 1271b60ee81eSKirk McKusick "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 12721c85e6a3SKirk McKusick fs->fs_pendinginodes); 12739ccb939eSKirk McKusick fs->fs_pendingblocks = 0; 12749ccb939eSKirk McKusick fs->fs_pendinginodes = 0; 12759ccb939eSKirk McKusick } 12763ba649d7SJeff Roberson UFS_UNLOCK(ump); 1277519e3c3bSKirk McKusick if (MOUNTEDSOFTDEP(mp)) 1278113db2ddSJeff Roberson softdep_unmount(mp); 12792af934ccSKonstantin Belousov MPASS(ump->um_softdep == NULL); 12809acea164SRobert Wing if (fs->fs_ronly == 0) { 12811a6a6610SKirk McKusick fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1282791dd2faSTor Egge error = ffs_sbupdate(ump, MNT_WAIT, 0); 1283d79ff54bSChuck Silvers if (ffs_fsfail_cleanup(ump, error)) 1284d79ff54bSChuck Silvers error = 0; 1285d79ff54bSChuck Silvers if (error != 0 && !ffs_fsfail_cleanup(ump, error)) { 1286996c772fSJohn Dyson fs->fs_clean = 0; 12876fecb4e4SKonstantin Belousov goto fail; 1288996c772fSJohn Dyson } 1289e0e9c421SDavid Greenman } 1290ddd6b3fcSKonstantin Belousov if (susp) 1291ddd6b3fcSKonstantin Belousov vfs_write_resume(mp, VR_START_WRITE); 1292c79dff0fSKonstantin Belousov if (ump->um_trim_tq != NULL) { 12933dc5f8e1SChuck Silvers MPASS(ump->um_trim_inflight == 0); 1294c79dff0fSKonstantin Belousov taskqueue_free(ump->um_trim_tq); 12957e038bc2SKirk McKusick free (ump->um_trimhash, M_TRIM); 1296c79dff0fSKonstantin Belousov } 1297c34a5148SKonstantin Belousov vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 129843920011SPoul-Henning Kamp g_topology_lock(); 12990d7935fdSAttilio Rao g_vfs_close(ump->um_cp); 130043920011SPoul-Henning Kamp g_topology_unlock(); 1301f15ccf88SChuck Silvers BO_LOCK(&ump->um_odevvp->v_bufobj); 1302f15ccf88SChuck Silvers ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; 1303f15ccf88SChuck Silvers BO_UNLOCK(&ump->um_odevvp->v_bufobj); 1304c70b3cd2SKonstantin Belousov atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1305f15ccf88SChuck Silvers mntfs_freevp(ump->um_devvp); 1306f15ccf88SChuck Silvers vrele(ump->um_odevvp); 130749c4791cSEdward Tomasz Napierala dev_rel(ump->um_dev); 13083ba649d7SJeff Roberson mtx_destroy(UFS_MTX(ump)); 13098df4bc48SKonstantin Belousov sx_destroy(&ump->um_checkpath_lock); 13101a60c7fcSPawel Jakub Dawidek if (mp->mnt_gjprovider != NULL) { 13111a60c7fcSPawel Jakub Dawidek free(mp->mnt_gjprovider, M_UFSMNT); 13121a60c7fcSPawel Jakub Dawidek mp->mnt_gjprovider = NULL; 13131a60c7fcSPawel Jakub Dawidek } 1314f55ff3f3SIan Dowse free(fs->fs_csp, M_UFSMNT); 131534816cb9SKirk McKusick free(fs->fs_si, M_UFSMNT); 1316df8bae1dSRodney W. Grimes free(fs, M_UFSMNT); 1317df8bae1dSRodney W. Grimes free(ump, M_UFSMNT); 131877465d93SAlfred Perlstein mp->mnt_data = NULL; 13194cbc378cSKonstantin Belousov if (td->td_su == mp) { 13204cbc378cSKonstantin Belousov td->td_su = NULL; 13214cbc378cSKonstantin Belousov vfs_rel(mp); 13224cbc378cSKonstantin Belousov } 1323df8bae1dSRodney W. Grimes return (error); 13246fecb4e4SKonstantin Belousov 13256fecb4e4SKonstantin Belousov fail: 1326ddd6b3fcSKonstantin Belousov if (susp) 1327ddd6b3fcSKonstantin Belousov vfs_write_resume(mp, VR_START_WRITE); 1328895b3782SKonstantin Belousov fail1: 1329df86ccf6SKonstantin Belousov #ifdef UFS_EXTATTR 1330df86ccf6SKonstantin Belousov if (e_restart) { 1331df86ccf6SKonstantin Belousov ufs_extattr_uepm_init(&ump->um_extattr); 1332df86ccf6SKonstantin Belousov #ifdef UFS_EXTATTR_AUTOSTART 1333df86ccf6SKonstantin Belousov (void) ufs_extattr_autostart(mp, td); 1334df86ccf6SKonstantin Belousov #endif 1335df86ccf6SKonstantin Belousov } 1336df86ccf6SKonstantin Belousov #endif 1337df86ccf6SKonstantin Belousov 13386fecb4e4SKonstantin Belousov return (error); 1339df8bae1dSRodney W. Grimes } 1340df8bae1dSRodney W. Grimes 1341df8bae1dSRodney W. Grimes /* 1342df8bae1dSRodney W. Grimes * Flush out all the files in a filesystem. 1343df8bae1dSRodney W. Grimes */ 134426f9a767SRodney W. Grimes int 1345064e6b43SKirk McKusick ffs_flushfiles(struct mount *mp, int flags, struct thread *td) 1346df8bae1dSRodney W. Grimes { 134705f4ff5dSPoul-Henning Kamp struct ufsmount *ump; 1348ba05dec5SKonstantin Belousov int qerror, error; 1349df8bae1dSRodney W. Grimes 1350df8bae1dSRodney W. Grimes ump = VFSTOUFS(mp); 1351ba05dec5SKonstantin Belousov qerror = 0; 1352df8bae1dSRodney W. Grimes #ifdef QUOTA 1353df8bae1dSRodney W. Grimes if (mp->mnt_flag & MNT_QUOTA) { 1354c1d9efcbSPoul-Henning Kamp int i; 1355f257b7a5SAlfred Perlstein error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1356c1d9efcbSPoul-Henning Kamp if (error) 1357df8bae1dSRodney W. Grimes return (error); 1358df8bae1dSRodney W. Grimes for (i = 0; i < MAXQUOTAS; i++) { 1359ba05dec5SKonstantin Belousov error = quotaoff(td, mp, i); 1360ba05dec5SKonstantin Belousov if (error != 0) { 1361ba05dec5SKonstantin Belousov if ((flags & EARLYFLUSH) == 0) 1362ba05dec5SKonstantin Belousov return (error); 1363ba05dec5SKonstantin Belousov else 1364ba05dec5SKonstantin Belousov qerror = error; 1365df8bae1dSRodney W. Grimes } 1366ba05dec5SKonstantin Belousov } 1367ba05dec5SKonstantin Belousov 1368df8bae1dSRodney W. Grimes /* 1369ba05dec5SKonstantin Belousov * Here we fall through to vflush again to ensure that 1370ba05dec5SKonstantin Belousov * we have gotten rid of all the system vnodes, unless 1371ba05dec5SKonstantin Belousov * quotas must not be closed. 1372df8bae1dSRodney W. Grimes */ 1373df8bae1dSRodney W. Grimes } 1374df8bae1dSRodney W. Grimes #endif 137576b05e3eSKonstantin Belousov /* devvp is not locked there */ 1376e6e370a7SJeff Roberson if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1377f257b7a5SAlfred Perlstein if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1378f2a2857bSKirk McKusick return (error); 1379f2a2857bSKirk McKusick ffs_snapshot_unmount(mp); 138095e7a3c3STor Egge flags |= FORCECLOSE; 1381f2a2857bSKirk McKusick /* 1382f2a2857bSKirk McKusick * Here we fall through to vflush again to ensure 1383f2a2857bSKirk McKusick * that we have gotten rid of all the system vnodes. 1384f2a2857bSKirk McKusick */ 1385f2a2857bSKirk McKusick } 1386ba05dec5SKonstantin Belousov 1387b1897c19SJulian Elischer /* 1388ba05dec5SKonstantin Belousov * Do not close system files if quotas were not closed, to be 1389ba05dec5SKonstantin Belousov * able to sync the remaining dquots. The freeblks softupdate 1390ba05dec5SKonstantin Belousov * workitems might hold a reference on a dquot, preventing 1391ba05dec5SKonstantin Belousov * quotaoff() from completing. Next round of 1392ba05dec5SKonstantin Belousov * softdep_flushworklist() iteration should process the 1393ba05dec5SKonstantin Belousov * blockers, allowing the next run of quotaoff() to finally 1394ba05dec5SKonstantin Belousov * flush held dquots. 1395ba05dec5SKonstantin Belousov * 1396ba05dec5SKonstantin Belousov * Otherwise, flush all the files. 1397b1897c19SJulian Elischer */ 1398ba05dec5SKonstantin Belousov if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1399b1897c19SJulian Elischer return (error); 1400ba05dec5SKonstantin Belousov 1401b1897c19SJulian Elischer /* 14023dc5f8e1SChuck Silvers * If this is a forcible unmount and there were any files that 14033dc5f8e1SChuck Silvers * were unlinked but still open, then vflush() will have 14043dc5f8e1SChuck Silvers * truncated and freed those files, which might have started 14053dc5f8e1SChuck Silvers * some trim work. Wait here for any trims to complete 14063dc5f8e1SChuck Silvers * and process the blkfrees which follow the trims. 14073dc5f8e1SChuck Silvers * This may create more dirty devvp buffers and softdep deps. 14083dc5f8e1SChuck Silvers */ 14093dc5f8e1SChuck Silvers if (ump->um_trim_tq != NULL) { 14103dc5f8e1SChuck Silvers while (ump->um_trim_inflight != 0) 14113dc5f8e1SChuck Silvers pause("ufsutr", hz); 14123dc5f8e1SChuck Silvers taskqueue_drain_all(ump->um_trim_tq); 14133dc5f8e1SChuck Silvers } 14143dc5f8e1SChuck Silvers 14153dc5f8e1SChuck Silvers /* 1416b1897c19SJulian Elischer * Flush filesystem metadata. 1417b1897c19SJulian Elischer */ 1418cb05b60aSAttilio Rao vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 14198df6bac4SPoul-Henning Kamp error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1420b249ce48SMateusz Guzik VOP_UNLOCK(ump->um_devvp); 1421df8bae1dSRodney W. Grimes return (error); 1422df8bae1dSRodney W. Grimes } 1423df8bae1dSRodney W. Grimes 1424df8bae1dSRodney W. Grimes /* 1425df8bae1dSRodney W. Grimes * Get filesystem statistics. 1426df8bae1dSRodney W. Grimes */ 1427adf41577SPoul-Henning Kamp static int 1428064e6b43SKirk McKusick ffs_statfs(struct mount *mp, struct statfs *sbp) 1429df8bae1dSRodney W. Grimes { 143005f4ff5dSPoul-Henning Kamp struct ufsmount *ump; 143105f4ff5dSPoul-Henning Kamp struct fs *fs; 1432df8bae1dSRodney W. Grimes 1433df8bae1dSRodney W. Grimes ump = VFSTOUFS(mp); 1434df8bae1dSRodney W. Grimes fs = ump->um_fs; 14351c85e6a3SKirk McKusick if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1436df8bae1dSRodney W. Grimes panic("ffs_statfs"); 1437fde81c7dSKirk McKusick sbp->f_version = STATFS_VERSION; 1438df8bae1dSRodney W. Grimes sbp->f_bsize = fs->fs_fsize; 1439df8bae1dSRodney W. Grimes sbp->f_iosize = fs->fs_bsize; 1440df8bae1dSRodney W. Grimes sbp->f_blocks = fs->fs_dsize; 14413ba649d7SJeff Roberson UFS_LOCK(ump); 1442df8bae1dSRodney W. Grimes sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 14439ccb939eSKirk McKusick fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 14449ccb939eSKirk McKusick sbp->f_bavail = freespace(fs, fs->fs_minfree) + 14459ccb939eSKirk McKusick dbtofsb(fs, fs->fs_pendingblocks); 14461dc349abSEd Maste sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; 14479ccb939eSKirk McKusick sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 14483ba649d7SJeff Roberson UFS_UNLOCK(ump); 1449a96da1c3SConrad Meyer sbp->f_namemax = UFS_MAXNAMLEN; 1450df8bae1dSRodney W. Grimes return (0); 1451df8bae1dSRodney W. Grimes } 1452df8bae1dSRodney W. Grimes 14531eabd967SKonstantin Belousov static bool 14541eabd967SKonstantin Belousov sync_doupdate(struct inode *ip) 14551eabd967SKonstantin Belousov { 14561eabd967SKonstantin Belousov 14571eabd967SKonstantin Belousov return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 14581eabd967SKonstantin Belousov IN_UPDATE)) != 0); 14591eabd967SKonstantin Belousov } 14601eabd967SKonstantin Belousov 146180663cadSMateusz Guzik static int 146280663cadSMateusz Guzik ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused) 146380663cadSMateusz Guzik { 146480663cadSMateusz Guzik struct inode *ip; 146580663cadSMateusz Guzik 146680663cadSMateusz Guzik /* 146780663cadSMateusz Guzik * Flags are safe to access because ->v_data invalidation 146880663cadSMateusz Guzik * is held off by listmtx. 146980663cadSMateusz Guzik */ 147080663cadSMateusz Guzik if (vp->v_type == VNON) 147180663cadSMateusz Guzik return (false); 147280663cadSMateusz Guzik ip = VTOI(vp); 147380663cadSMateusz Guzik if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) 147480663cadSMateusz Guzik return (false); 147580663cadSMateusz Guzik return (true); 147680663cadSMateusz Guzik } 147780663cadSMateusz Guzik 1478df8bae1dSRodney W. Grimes /* 1479a988a5c6SKonstantin Belousov * For a lazy sync, we only care about access times, quotas and the 1480a988a5c6SKonstantin Belousov * superblock. Other filesystem changes are already converted to 1481a988a5c6SKonstantin Belousov * cylinder group blocks or inode blocks updates and are written to 1482a988a5c6SKonstantin Belousov * disk by syncer. 1483a988a5c6SKonstantin Belousov */ 1484a988a5c6SKonstantin Belousov static int 1485064e6b43SKirk McKusick ffs_sync_lazy(struct mount *mp) 1486a988a5c6SKonstantin Belousov { 1487a988a5c6SKonstantin Belousov struct vnode *mvp, *vp; 1488a988a5c6SKonstantin Belousov struct inode *ip; 1489a988a5c6SKonstantin Belousov int allerror, error; 1490a988a5c6SKonstantin Belousov 1491a988a5c6SKonstantin Belousov allerror = 0; 14920297c138SKirk McKusick if ((mp->mnt_flag & MNT_NOATIME) != 0) { 14930297c138SKirk McKusick #ifdef QUOTA 14940297c138SKirk McKusick qsync(mp); 14950297c138SKirk McKusick #endif 14960297c138SKirk McKusick goto sbupdate; 14970297c138SKirk McKusick } 149880663cadSMateusz Guzik MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) { 149971469bb3SKirk McKusick if (vp->v_type == VNON) { 1500a988a5c6SKonstantin Belousov VI_UNLOCK(vp); 1501a988a5c6SKonstantin Belousov continue; 1502a988a5c6SKonstantin Belousov } 1503a988a5c6SKonstantin Belousov ip = VTOI(vp); 1504a988a5c6SKonstantin Belousov 1505a988a5c6SKonstantin Belousov /* 1506a988a5c6SKonstantin Belousov * The IN_ACCESS flag is converted to IN_MODIFIED by 1507a988a5c6SKonstantin Belousov * ufs_close() and ufs_getattr() by the calls to 1508ea573a50SKonstantin Belousov * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1509ea573a50SKonstantin Belousov * Test also all the other timestamp flags too, to pick up 1510ea573a50SKonstantin Belousov * any other cases that could be missed. 1511a988a5c6SKonstantin Belousov */ 15121eabd967SKonstantin Belousov if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1513a988a5c6SKonstantin Belousov VI_UNLOCK(vp); 1514a988a5c6SKonstantin Belousov continue; 1515a988a5c6SKonstantin Belousov } 1516a92a971bSMateusz Guzik if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0) 1517a988a5c6SKonstantin Belousov continue; 15180297c138SKirk McKusick #ifdef QUOTA 15190297c138SKirk McKusick qsyncvp(vp); 15200297c138SKirk McKusick #endif 15211eabd967SKonstantin Belousov if (sync_doupdate(ip)) 1522a988a5c6SKonstantin Belousov error = ffs_update(vp, 0); 1523a988a5c6SKonstantin Belousov if (error != 0) 1524a988a5c6SKonstantin Belousov allerror = error; 1525a988a5c6SKonstantin Belousov vput(vp); 1526a988a5c6SKonstantin Belousov } 15270297c138SKirk McKusick sbupdate: 1528a988a5c6SKonstantin Belousov if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1529a988a5c6SKonstantin Belousov (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1530a988a5c6SKonstantin Belousov allerror = error; 1531a988a5c6SKonstantin Belousov return (allerror); 1532a988a5c6SKonstantin Belousov } 1533a988a5c6SKonstantin Belousov 1534a988a5c6SKonstantin Belousov /* 1535df8bae1dSRodney W. Grimes * Go through the disk queues to initiate sandbagged IO; 1536df8bae1dSRodney W. Grimes * go through the inodes to write those that have been modified; 1537df8bae1dSRodney W. Grimes * initiate the writing of the super block if it has been modified. 1538df8bae1dSRodney W. Grimes * 1539a988a5c6SKonstantin Belousov * Note: we are always called with the filesystem marked busy using 1540a988a5c6SKonstantin Belousov * vfs_busy(). 1541df8bae1dSRodney W. Grimes */ 1542adf41577SPoul-Henning Kamp static int 1543064e6b43SKirk McKusick ffs_sync(struct mount *mp, int waitfor) 1544df8bae1dSRodney W. Grimes { 154582be0a5aSTor Egge struct vnode *mvp, *vp, *devvp; 1546dfd233edSAttilio Rao struct thread *td; 1547996c772fSJohn Dyson struct inode *ip; 1548996c772fSJohn Dyson struct ufsmount *ump = VFSTOUFS(mp); 1549996c772fSJohn Dyson struct fs *fs; 155069baeadcSKonstantin Belousov int error, count, lockreq, allerror = 0; 1551791dd2faSTor Egge int suspend; 1552791dd2faSTor Egge int suspended; 1553791dd2faSTor Egge int secondary_writes; 1554791dd2faSTor Egge int secondary_accwrites; 1555791dd2faSTor Egge int softdep_deps; 1556791dd2faSTor Egge int softdep_accdeps; 1557156cb265SPoul-Henning Kamp struct bufobj *bo; 1558df8bae1dSRodney W. Grimes 155919c87af0SKirk McKusick suspend = 0; 156019c87af0SKirk McKusick suspended = 0; 1561dfd233edSAttilio Rao td = curthread; 1562df8bae1dSRodney W. Grimes fs = ump->um_fs; 15639acea164SRobert Wing if (fs->fs_fmod != 0 && fs->fs_ronly != 0) 1564b60ee81eSKirk McKusick panic("%s: ffs_sync: modification on read-only filesystem", 1565b60ee81eSKirk McKusick fs->fs_fsmnt); 15664af9f77eSKonstantin Belousov if (waitfor == MNT_LAZY) { 15674af9f77eSKonstantin Belousov if (!rebooting) 1568a988a5c6SKonstantin Belousov return (ffs_sync_lazy(mp)); 15694af9f77eSKonstantin Belousov waitfor = MNT_NOWAIT; 15704af9f77eSKonstantin Belousov } 1571a988a5c6SKonstantin Belousov 157219c87af0SKirk McKusick /* 1573df8bae1dSRodney W. Grimes * Write back each (modified) inode. 1574df8bae1dSRodney W. Grimes */ 1575245df27cSMatthew Dillon lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1576791dd2faSTor Egge if (waitfor == MNT_SUSPEND) { 1577791dd2faSTor Egge suspend = 1; 1578791dd2faSTor Egge waitfor = MNT_WAIT; 1579791dd2faSTor Egge } 158069baeadcSKonstantin Belousov if (waitfor == MNT_WAIT) 1581245df27cSMatthew Dillon lockreq = LK_EXCLUSIVE; 15824d9b2ed3SMateusz Guzik lockreq |= LK_INTERLOCK; 1583df8bae1dSRodney W. Grimes loop: 1584791dd2faSTor Egge /* Grab snapshot of secondary write counts */ 158571469bb3SKirk McKusick MNT_ILOCK(mp); 1586791dd2faSTor Egge secondary_writes = mp->mnt_secondary_writes; 1587791dd2faSTor Egge secondary_accwrites = mp->mnt_secondary_accwrites; 158871469bb3SKirk McKusick MNT_IUNLOCK(mp); 1589791dd2faSTor Egge 1590791dd2faSTor Egge /* Grab snapshot of softdep dependency counts */ 1591791dd2faSTor Egge softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1592791dd2faSTor Egge 159371469bb3SKirk McKusick MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1594245df27cSMatthew Dillon /* 1595e0c17408SKonstantin Belousov * Depend on the vnode interlock to keep things stable enough 1596245df27cSMatthew Dillon * for a quick test. Since there might be hundreds of 1597245df27cSMatthew Dillon * thousands of vnodes, we cannot afford even a subroutine 1598245df27cSMatthew Dillon * call unless there's a good chance that we have work to do. 1599245df27cSMatthew Dillon */ 160071469bb3SKirk McKusick if (vp->v_type == VNON) { 16012f05568aSJeff Roberson VI_UNLOCK(vp); 16022f05568aSJeff Roberson continue; 16032f05568aSJeff Roberson } 1604df8bae1dSRodney W. Grimes ip = VTOI(vp); 160571469bb3SKirk McKusick if ((ip->i_flag & 1606cf60e8e4SKirk McKusick (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 160771469bb3SKirk McKusick vp->v_bufobj.bo_dirty.bv_cnt == 0) { 16082f05568aSJeff Roberson VI_UNLOCK(vp); 1609df8bae1dSRodney W. Grimes continue; 1610996c772fSJohn Dyson } 1611a92a971bSMateusz Guzik if ((error = vget(vp, lockreq)) != 0) { 16124d9b2ed3SMateusz Guzik if (error == ENOENT) { 161371469bb3SKirk McKusick MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1614df8bae1dSRodney W. Grimes goto loop; 161582be0a5aSTor Egge } 16162f05568aSJeff Roberson continue; 16172f05568aSJeff Roberson } 16180297c138SKirk McKusick #ifdef QUOTA 16190297c138SKirk McKusick qsyncvp(vp); 16200297c138SKirk McKusick #endif 16218a1509e4SKonstantin Belousov for (;;) { 16228a1509e4SKonstantin Belousov error = ffs_syncvnode(vp, waitfor, 0); 16238a1509e4SKonstantin Belousov if (error == ERELOOKUP) 16248a1509e4SKonstantin Belousov continue; 16258a1509e4SKonstantin Belousov if (error != 0) 1626df8bae1dSRodney W. Grimes allerror = error; 16278a1509e4SKonstantin Belousov break; 16288a1509e4SKonstantin Belousov } 162941d4783dSJeff Roberson vput(vp); 1630245df27cSMatthew Dillon } 1631df8bae1dSRodney W. Grimes /* 1632df8bae1dSRodney W. Grimes * Force stale filesystem control information to be flushed. 1633df8bae1dSRodney W. Grimes */ 16344af9f77eSKonstantin Belousov if (waitfor == MNT_WAIT || rebooting) { 1635b40ce416SJulian Elischer if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 16369b971133SKirk McKusick allerror = error; 1637d79ff54bSChuck Silvers if (ffs_fsfail_cleanup(ump, allerror)) 1638d79ff54bSChuck Silvers allerror = 0; 16399b971133SKirk McKusick /* Flushed work items may create new vnodes to clean */ 164071469bb3SKirk McKusick if (allerror == 0 && count) 16419b971133SKirk McKusick goto loop; 16429b971133SKirk McKusick } 164319c87af0SKirk McKusick 1644112f7372SKirk McKusick devvp = ump->um_devvp; 1645156cb265SPoul-Henning Kamp bo = &devvp->v_bufobj; 1646698b1a66SJeff Roberson BO_LOCK(bo); 164719c87af0SKirk McKusick if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1648698b1a66SJeff Roberson BO_UNLOCK(bo); 1649698b1a66SJeff Roberson vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 16504af9f77eSKonstantin Belousov error = VOP_FSYNC(devvp, waitfor, td); 1651b249ce48SMateusz Guzik VOP_UNLOCK(devvp); 16524af9f77eSKonstantin Belousov if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 16534af9f77eSKonstantin Belousov error = ffs_sbupdate(ump, waitfor, 0); 16544af9f77eSKonstantin Belousov if (error != 0) 16554af9f77eSKonstantin Belousov allerror = error; 1656d79ff54bSChuck Silvers if (ffs_fsfail_cleanup(ump, allerror)) 1657d79ff54bSChuck Silvers allerror = 0; 165871469bb3SKirk McKusick if (allerror == 0 && waitfor == MNT_WAIT) 1659112f7372SKirk McKusick goto loop; 1660791dd2faSTor Egge } else if (suspend != 0) { 1661791dd2faSTor Egge if (softdep_check_suspend(mp, 1662791dd2faSTor Egge devvp, 1663791dd2faSTor Egge softdep_deps, 1664791dd2faSTor Egge softdep_accdeps, 1665791dd2faSTor Egge secondary_writes, 166671469bb3SKirk McKusick secondary_accwrites) != 0) { 166771469bb3SKirk McKusick MNT_IUNLOCK(mp); 1668791dd2faSTor Egge goto loop; /* More work needed */ 166971469bb3SKirk McKusick } 1670791dd2faSTor Egge mtx_assert(MNT_MTX(mp), MA_OWNED); 1671ca2fa807STor Egge mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1672791dd2faSTor Egge MNT_IUNLOCK(mp); 1673791dd2faSTor Egge suspended = 1; 1674112f7372SKirk McKusick } else 1675698b1a66SJeff Roberson BO_UNLOCK(bo); 1676996c772fSJohn Dyson /* 1677996c772fSJohn Dyson * Write back modified superblock. 1678996c772fSJohn Dyson */ 1679791dd2faSTor Egge if (fs->fs_fmod != 0 && 1680791dd2faSTor Egge (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1681996c772fSJohn Dyson allerror = error; 1682d79ff54bSChuck Silvers if (ffs_fsfail_cleanup(ump, allerror)) 1683d79ff54bSChuck Silvers allerror = 0; 1684df8bae1dSRodney W. Grimes return (allerror); 1685df8bae1dSRodney W. Grimes } 1686df8bae1dSRodney W. Grimes 1687df8bae1dSRodney W. Grimes int 1688064e6b43SKirk McKusick ffs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) 1689df8bae1dSRodney W. Grimes { 16907b7ed832SKonstantin Belousov return (ffs_vgetf(mp, ino, flags, vpp, 0)); 16917b7ed832SKonstantin Belousov } 16927b7ed832SKonstantin Belousov 16937b7ed832SKonstantin Belousov int 1694064e6b43SKirk McKusick ffs_vgetf(struct mount *mp, 1695064e6b43SKirk McKusick ino_t ino, 1696064e6b43SKirk McKusick int flags, 1697064e6b43SKirk McKusick struct vnode **vpp, 1698064e6b43SKirk McKusick int ffs_flags) 16997b7ed832SKonstantin Belousov { 1700996c772fSJohn Dyson struct fs *fs; 1701996c772fSJohn Dyson struct inode *ip; 1702df8bae1dSRodney W. Grimes struct ufsmount *ump; 1703df8bae1dSRodney W. Grimes struct buf *bp; 1704df8bae1dSRodney W. Grimes struct vnode *vp; 1705d79ff54bSChuck Silvers daddr_t dbn; 1706f576a00dSSemen Ustimenko int error; 1707df8bae1dSRodney W. Grimes 1708f16c26b1SKonstantin Belousov MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 || 1709f16c26b1SKonstantin Belousov (flags & LK_EXCLUSIVE) != 0); 171016040222SKonstantin Belousov 171151f5ce0cSPoul-Henning Kamp error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 171216040222SKonstantin Belousov if (error != 0) 171314bc0685SPoul-Henning Kamp return (error); 171416040222SKonstantin Belousov if (*vpp != NULL) { 1715f16c26b1SKonstantin Belousov if ((ffs_flags & FFSV_REPLACE) == 0 || 1716f16c26b1SKonstantin Belousov ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 || 1717f16c26b1SKonstantin Belousov !VN_IS_DOOMED(*vpp))) 171816040222SKonstantin Belousov return (0); 171916040222SKonstantin Belousov vgone(*vpp); 172016040222SKonstantin Belousov vput(*vpp); 172116040222SKonstantin Belousov } 1722f576a00dSSemen Ustimenko 1723f576a00dSSemen Ustimenko /* 1724d6919865SJeff Roberson * We must promote to an exclusive lock for vnode creation. This 1725d6919865SJeff Roberson * can happen if lookup is passed LOCKSHARED. 1726d6919865SJeff Roberson */ 1727d6919865SJeff Roberson if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1728d6919865SJeff Roberson flags &= ~LK_TYPE_MASK; 1729d6919865SJeff Roberson flags |= LK_EXCLUSIVE; 1730d6919865SJeff Roberson } 1731d6919865SJeff Roberson 1732d6919865SJeff Roberson /* 173313866b3fSSemen Ustimenko * We do not lock vnode creation as it is believed to be too 1734f576a00dSSemen Ustimenko * expensive for such rare case as simultaneous creation of vnode 1735f576a00dSSemen Ustimenko * for same ino by different processes. We just allow them to race 1736f576a00dSSemen Ustimenko * and check later to decide who wins. Let the race begin! 1737f576a00dSSemen Ustimenko */ 173814bc0685SPoul-Henning Kamp 173914bc0685SPoul-Henning Kamp ump = VFSTOUFS(mp); 174014bc0685SPoul-Henning Kamp fs = ump->um_fs; 17419d5a594fSMateusz Guzik ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO); 17422f9bae59SDavid Greenman 1743df8bae1dSRodney W. Grimes /* Allocate a new vnode/inode. */ 174443a993bbSKirk McKusick error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 174543a993bbSKirk McKusick &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1746c9671602SPoul-Henning Kamp if (error) { 1747df8bae1dSRodney W. Grimes *vpp = NULL; 17489d5a594fSMateusz Guzik uma_zfree_smr(uma_inode, ip); 1749df8bae1dSRodney W. Grimes return (error); 1750df8bae1dSRodney W. Grimes } 175167e87166SBoris Popov /* 175233fc3625SJohn Baldwin * FFS supports recursive locking. 175367e87166SBoris Popov */ 1754303d3ae7SKonstantin Belousov lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); 1755628f51d2SAttilio Rao VN_LOCK_AREC(vp); 1756df8bae1dSRodney W. Grimes vp->v_data = ip; 17575d9d81e7SPoul-Henning Kamp vp->v_bufobj.bo_bsize = fs->fs_bsize; 1758df8bae1dSRodney W. Grimes ip->i_vnode = vp; 17591c85e6a3SKirk McKusick ip->i_ump = ump; 1760df8bae1dSRodney W. Grimes ip->i_number = ino; 1761e65f5a4eSKonstantin Belousov ip->i_ea_refs = 0; 176274a87c38SKirk McKusick ip->i_nextclustercg = -1; 1763e1db6897SKonstantin Belousov ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; 176472d28f97SKirk McKusick ip->i_mode = 0; /* ensure error cases below throw away vnode */ 17652bfd8992SKonstantin Belousov cluster_init_vn(&ip->i_clusterw); 176661846fc4SKonstantin Belousov #ifdef DIAGNOSTIC 176761846fc4SKonstantin Belousov ufs_init_trackers(ip); 176861846fc4SKonstantin Belousov #endif 1769df8bae1dSRodney W. Grimes #ifdef QUOTA 1770c1d9efcbSPoul-Henning Kamp { 1771c1d9efcbSPoul-Henning Kamp int i; 1772df8bae1dSRodney W. Grimes for (i = 0; i < MAXQUOTAS; i++) 1773df8bae1dSRodney W. Grimes ip->i_dquot[i] = NODQUOT; 1774c1d9efcbSPoul-Henning Kamp } 1775df8bae1dSRodney W. Grimes #endif 1776df8bae1dSRodney W. Grimes 17777b7ed832SKonstantin Belousov if (ffs_flags & FFSV_FORCEINSMQ) 17787b7ed832SKonstantin Belousov vp->v_vflag |= VV_FORCEINSMQ; 177961b9d89fSTor Egge error = insmntque(vp, mp); 178061b9d89fSTor Egge if (error != 0) { 17819d5a594fSMateusz Guzik uma_zfree_smr(uma_inode, ip); 178261b9d89fSTor Egge *vpp = NULL; 178361b9d89fSTor Egge return (error); 178461b9d89fSTor Egge } 17857b7ed832SKonstantin Belousov vp->v_vflag &= ~VV_FORCEINSMQ; 1786a80d8caaSPawel Jakub Dawidek error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 178716040222SKonstantin Belousov if (error != 0) 1788f576a00dSSemen Ustimenko return (error); 178916040222SKonstantin Belousov if (*vpp != NULL) { 179016040222SKonstantin Belousov /* 179116040222SKonstantin Belousov * Calls from ffs_valloc() (i.e. FFSV_REPLACE set) 179216040222SKonstantin Belousov * operate on empty inode, which must not be found by 179316040222SKonstantin Belousov * other threads until fully filled. Vnode for empty 179416040222SKonstantin Belousov * inode must be not re-inserted on the hash by other 179516040222SKonstantin Belousov * thread, after removal by us at the beginning. 179616040222SKonstantin Belousov */ 179716040222SKonstantin Belousov MPASS((ffs_flags & FFSV_REPLACE) == 0); 179816040222SKonstantin Belousov return (0); 179916040222SKonstantin Belousov } 18006b9d4fbbSKirk McKusick if (I_IS_UFS1(ip)) 18016b9d4fbbSKirk McKusick ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 18026b9d4fbbSKirk McKusick else 18036b9d4fbbSKirk McKusick ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1804f576a00dSSemen Ustimenko 18056b9d4fbbSKirk McKusick if ((ffs_flags & FFSV_NEWINODE) != 0) { 18066b9d4fbbSKirk McKusick /* New inode, just zero out its contents. */ 18076b9d4fbbSKirk McKusick if (I_IS_UFS1(ip)) 18086b9d4fbbSKirk McKusick memset(ip->i_din1, 0, sizeof(struct ufs1_dinode)); 18096b9d4fbbSKirk McKusick else 18106b9d4fbbSKirk McKusick memset(ip->i_din2, 0, sizeof(struct ufs2_dinode)); 18116b9d4fbbSKirk McKusick } else { 18126b9d4fbbSKirk McKusick /* Read the disk contents for the inode, copy into the inode. */ 1813d79ff54bSChuck Silvers dbn = fsbtodb(fs, ino_to_fsba(fs, ino)); 18146b9d4fbbSKirk McKusick error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, 18156b9d4fbbSKirk McKusick (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp); 1816d79ff54bSChuck Silvers if (error != 0) { 1817df8bae1dSRodney W. Grimes /* 18186b9d4fbbSKirk McKusick * The inode does not contain anything useful, so it 18196b9d4fbbSKirk McKusick * would be misleading to leave it on its hash chain. 18206b9d4fbbSKirk McKusick * With mode still zero, it will be unlinked and 18216b9d4fbbSKirk McKusick * returned to the free list by vput(). 1822df8bae1dSRodney W. Grimes */ 18236c44a3e0SMateusz Guzik vgone(vp); 1824bd7e5f99SJohn Dyson vput(vp); 1825df8bae1dSRodney W. Grimes *vpp = NULL; 1826df8bae1dSRodney W. Grimes return (error); 1827df8bae1dSRodney W. Grimes } 18289fc5d538SKirk McKusick if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) { 18299fc5d538SKirk McKusick bqrelse(bp); 18306c44a3e0SMateusz Guzik vgone(vp); 18319fc5d538SKirk McKusick vput(vp); 18329fc5d538SKirk McKusick *vpp = NULL; 18339fc5d538SKirk McKusick return (error); 18349fc5d538SKirk McKusick } 18356b9d4fbbSKirk McKusick bqrelse(bp); 18366b9d4fbbSKirk McKusick } 1837fd97fa64SKonstantin Belousov if (DOINGSOFTDEP(vp) && (!fs->fs_ronly || 1838fd97fa64SKonstantin Belousov (ffs_flags & FFSV_FORCEINODEDEP) != 0)) 1839b1897c19SJulian Elischer softdep_load_inodeblock(ip); 1840b1897c19SJulian Elischer else 1841b1897c19SJulian Elischer ip->i_effnlink = ip->i_nlink; 1842df8bae1dSRodney W. Grimes 1843df8bae1dSRodney W. Grimes /* 1844df8bae1dSRodney W. Grimes * Initialize the vnode from the inode, check for aliases. 1845df8bae1dSRodney W. Grimes * Note that the underlying vnode may have changed. 1846df8bae1dSRodney W. Grimes */ 1847e1db6897SKonstantin Belousov error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, 1848e1db6897SKonstantin Belousov &vp); 1849c9671602SPoul-Henning Kamp if (error) { 18506c44a3e0SMateusz Guzik vgone(vp); 1851df8bae1dSRodney W. Grimes vput(vp); 1852df8bae1dSRodney W. Grimes *vpp = NULL; 1853df8bae1dSRodney W. Grimes return (error); 1854df8bae1dSRodney W. Grimes } 1855de68347bSPoul-Henning Kamp 1856df8bae1dSRodney W. Grimes /* 18575c24d6eeSPoul-Henning Kamp * Finish inode initialization. 1858df8bae1dSRodney W. Grimes */ 185933fc3625SJohn Baldwin if (vp->v_type != VFIFO) { 186033fc3625SJohn Baldwin /* FFS supports shared locking for all files except fifos. */ 186133fc3625SJohn Baldwin VN_LOCK_ASHARE(vp); 186233fc3625SJohn Baldwin } 1863de68347bSPoul-Henning Kamp 1864df8bae1dSRodney W. Grimes /* 1865df8bae1dSRodney W. Grimes * Set up a generation number for this inode if it does not 1866df8bae1dSRodney W. Grimes * already have one. This should only happen on old filesystems. 1867df8bae1dSRodney W. Grimes */ 1868df8bae1dSRodney W. Grimes if (ip->i_gen == 0) { 186957d2ac2fSKevin Lo while (ip->i_gen == 0) 187057d2ac2fSKevin Lo ip->i_gen = arc4random(); 18711c85e6a3SKirk McKusick if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 1872ac4ec141SMateusz Guzik UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 1873b403319bSAlexander Kabaev DIP_SET(ip, i_gen, ip->i_gen); 18741c85e6a3SKirk McKusick } 1875df8bae1dSRodney W. Grimes } 1876763bbd2fSRobert Watson #ifdef MAC 1877763bbd2fSRobert Watson if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 1878763bbd2fSRobert Watson /* 1879763bbd2fSRobert Watson * If this vnode is already allocated, and we're running 1880763bbd2fSRobert Watson * multi-label, attempt to perform a label association 1881763bbd2fSRobert Watson * from the extended attributes on the inode. 1882763bbd2fSRobert Watson */ 188330d239bcSRobert Watson error = mac_vnode_associate_extattr(mp, vp); 1884763bbd2fSRobert Watson if (error) { 1885763bbd2fSRobert Watson /* ufs_inactive will release ip->i_devvp ref. */ 18866c44a3e0SMateusz Guzik vgone(vp); 1887763bbd2fSRobert Watson vput(vp); 1888763bbd2fSRobert Watson *vpp = NULL; 1889763bbd2fSRobert Watson return (error); 1890763bbd2fSRobert Watson } 1891763bbd2fSRobert Watson } 1892763bbd2fSRobert Watson #endif 1893763bbd2fSRobert Watson 1894829f0bcbSMateusz Guzik vn_set_state(vp, VSTATE_CONSTRUCTED); 1895df8bae1dSRodney W. Grimes *vpp = vp; 1896df8bae1dSRodney W. Grimes return (0); 1897df8bae1dSRodney W. Grimes } 1898df8bae1dSRodney W. Grimes 1899df8bae1dSRodney W. Grimes /* 1900df8bae1dSRodney W. Grimes * File handle to vnode 1901df8bae1dSRodney W. Grimes * 1902df8bae1dSRodney W. Grimes * Have to be really careful about stale file handles: 1903df8bae1dSRodney W. Grimes * - check that the inode number is valid 1904abe53f7eSKirk McKusick * - for UFS2 check that the inode number is initialized 1905df8bae1dSRodney W. Grimes * - call ffs_vget() to get the locked inode 1906df8bae1dSRodney W. Grimes * - check for an unallocated inode (i_mode == 0) 1907df8bae1dSRodney W. Grimes * - check that the given client host has export rights and return 1908df8bae1dSRodney W. Grimes * those rights via. exflagsp and credanonp 1909df8bae1dSRodney W. Grimes */ 1910adf41577SPoul-Henning Kamp static int 1911064e6b43SKirk McKusick ffs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp) 1912df8bae1dSRodney W. Grimes { 191305f4ff5dSPoul-Henning Kamp struct ufid *ufhp; 19145952c86cSKonstantin Belousov 19155952c86cSKonstantin Belousov ufhp = (struct ufid *)fhp; 19165952c86cSKonstantin Belousov return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags, 19175952c86cSKonstantin Belousov vpp, 0)); 19185952c86cSKonstantin Belousov } 19195952c86cSKonstantin Belousov 1920886fd36eSKirk McKusick /* 1921886fd36eSKirk McKusick * Return a vnode from a mounted filesystem for inode with specified 1922886fd36eSKirk McKusick * generation number. Return ESTALE if the inode with given generation 1923886fd36eSKirk McKusick * number no longer exists on that filesystem. 1924886fd36eSKirk McKusick */ 19255952c86cSKonstantin Belousov int 1926064e6b43SKirk McKusick ffs_inotovp(struct mount *mp, 1927064e6b43SKirk McKusick ino_t ino, 1928831b1ff7SKirk McKusick uint64_t gen, 1929064e6b43SKirk McKusick int lflags, 1930064e6b43SKirk McKusick struct vnode **vpp, 1931064e6b43SKirk McKusick int ffs_flags) 19325952c86cSKonstantin Belousov { 1933abe53f7eSKirk McKusick struct ufsmount *ump; 19345952c86cSKonstantin Belousov struct vnode *nvp; 193589fd61d9SKonstantin Belousov struct inode *ip; 1936df8bae1dSRodney W. Grimes struct fs *fs; 1937abe53f7eSKirk McKusick struct cg *cgp; 1938abe53f7eSKirk McKusick struct buf *bp; 1939831b1ff7SKirk McKusick uint64_t cg; 1940df8bae1dSRodney W. Grimes 1941abe53f7eSKirk McKusick ump = VFSTOUFS(mp); 1942abe53f7eSKirk McKusick fs = ump->um_fs; 194389fd61d9SKonstantin Belousov *vpp = NULL; 194489fd61d9SKonstantin Belousov 19451dc349abSEd Maste if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) 1946df8bae1dSRodney W. Grimes return (ESTALE); 19475952c86cSKonstantin Belousov 1948abe53f7eSKirk McKusick /* 1949abe53f7eSKirk McKusick * Need to check if inode is initialized because UFS2 does lazy 1950abe53f7eSKirk McKusick * initialization and nfs_fhtovp can offer arbitrary inode numbers. 1951abe53f7eSKirk McKusick */ 19525952c86cSKonstantin Belousov if (fs->fs_magic == FS_UFS2_MAGIC) { 1953abe53f7eSKirk McKusick cg = ino_to_cg(fs, ino); 1954886fd36eSKirk McKusick if (ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp) != 0) 1955886fd36eSKirk McKusick return (ESTALE); 19569c4f551eSKirk McKusick if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { 1957abe53f7eSKirk McKusick brelse(bp); 1958abe53f7eSKirk McKusick return (ESTALE); 1959abe53f7eSKirk McKusick } 1960abe53f7eSKirk McKusick brelse(bp); 19615952c86cSKonstantin Belousov } 19625952c86cSKonstantin Belousov 1963886fd36eSKirk McKusick if (ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags) != 0) 1964886fd36eSKirk McKusick return (ESTALE); 196589fd61d9SKonstantin Belousov 196689fd61d9SKonstantin Belousov ip = VTOI(nvp); 196789fd61d9SKonstantin Belousov if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) { 196889fd61d9SKonstantin Belousov if (ip->i_mode == 0) 196989fd61d9SKonstantin Belousov vgone(nvp); 197089fd61d9SKonstantin Belousov vput(nvp); 197189fd61d9SKonstantin Belousov return (ESTALE); 197289fd61d9SKonstantin Belousov } 197389fd61d9SKonstantin Belousov 197489fd61d9SKonstantin Belousov vnode_create_vobject(nvp, DIP(ip, i_size), curthread); 197589fd61d9SKonstantin Belousov *vpp = nvp; 197689fd61d9SKonstantin Belousov return (0); 1977df8bae1dSRodney W. Grimes } 1978df8bae1dSRodney W. Grimes 1979df8bae1dSRodney W. Grimes /* 19805346934fSIan Dowse * Initialize the filesystem. 1981996c772fSJohn Dyson */ 1982996c772fSJohn Dyson static int 1983064e6b43SKirk McKusick ffs_init(struct vfsconf *vfsp) 1984996c772fSJohn Dyson { 1985996c772fSJohn Dyson 19861848286aSEdward Tomasz Napierala ffs_susp_initialize(); 1987b1897c19SJulian Elischer softdep_initialize(); 1988996c772fSJohn Dyson return (ufs_init(vfsp)); 1989996c772fSJohn Dyson } 1990996c772fSJohn Dyson 1991996c772fSJohn Dyson /* 19925346934fSIan Dowse * Undo the work of ffs_init(). 19935346934fSIan Dowse */ 19945346934fSIan Dowse static int 1995064e6b43SKirk McKusick ffs_uninit(struct vfsconf *vfsp) 19965346934fSIan Dowse { 19975346934fSIan Dowse int ret; 19985346934fSIan Dowse 19995346934fSIan Dowse ret = ufs_uninit(vfsp); 20005346934fSIan Dowse softdep_uninitialize(); 20011848286aSEdward Tomasz Napierala ffs_susp_uninitialize(); 2002d79ff54bSChuck Silvers taskqueue_drain_all(taskqueue_thread); 20035346934fSIan Dowse return (ret); 20045346934fSIan Dowse } 20055346934fSIan Dowse 20065346934fSIan Dowse /* 2007dffce215SKirk McKusick * Structure used to pass information from ffs_sbupdate to its 2008dffce215SKirk McKusick * helper routine ffs_use_bwrite. 2009dffce215SKirk McKusick */ 2010dffce215SKirk McKusick struct devfd { 2011dffce215SKirk McKusick struct ufsmount *ump; 2012dffce215SKirk McKusick struct buf *sbbp; 2013dffce215SKirk McKusick int waitfor; 2014dffce215SKirk McKusick int suspended; 2015dffce215SKirk McKusick int error; 2016dffce215SKirk McKusick }; 2017dffce215SKirk McKusick 2018dffce215SKirk McKusick /* 2019df8bae1dSRodney W. Grimes * Write a superblock and associated information back to disk. 2020df8bae1dSRodney W. Grimes */ 20211a60c7fcSPawel Jakub Dawidek int 2022064e6b43SKirk McKusick ffs_sbupdate(struct ufsmount *ump, int waitfor, int suspended) 2023df8bae1dSRodney W. Grimes { 2024dffce215SKirk McKusick struct fs *fs; 20253ba649d7SJeff Roberson struct buf *sbbp; 2026dffce215SKirk McKusick struct devfd devfd; 2027df8bae1dSRodney W. Grimes 2028dffce215SKirk McKusick fs = ump->um_fs; 202974f3809aSKirk McKusick if (fs->fs_ronly == 1 && 2030927a12aeSKirk McKusick (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 20319acea164SRobert Wing (MNT_RDONLY | MNT_UPDATE)) 203274f3809aSKirk McKusick panic("ffs_sbupdate: write read-only filesystem"); 2033996c772fSJohn Dyson /* 20343ba649d7SJeff Roberson * We use the superblock's buf to serialize calls to ffs_sbupdate(). 2035*c2cd605eSKirk McKusick * Copy superblock to this buffer and have it written out. 20363ba649d7SJeff Roberson */ 2037927a12aeSKirk McKusick sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 2038927a12aeSKirk McKusick (int)fs->fs_sbsize, 0, 0, 0); 2039*c2cd605eSKirk McKusick UFS_LOCK(ump); 2040*c2cd605eSKirk McKusick fs->fs_fmod = 0; 2041*c2cd605eSKirk McKusick bcopy((caddr_t)fs, sbbp->b_data, (uint64_t)fs->fs_sbsize); 2042*c2cd605eSKirk McKusick UFS_UNLOCK(ump); 2043*c2cd605eSKirk McKusick fs = (struct fs *)sbbp->b_data; 20443ba649d7SJeff Roberson /* 2045dffce215SKirk McKusick * Initialize info needed for write function. 2046996c772fSJohn Dyson */ 2047dffce215SKirk McKusick devfd.ump = ump; 2048dffce215SKirk McKusick devfd.sbbp = sbbp; 2049dffce215SKirk McKusick devfd.waitfor = waitfor; 2050dffce215SKirk McKusick devfd.suspended = suspended; 2051dffce215SKirk McKusick devfd.error = 0; 2052dffce215SKirk McKusick return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); 2053dffce215SKirk McKusick } 2054dffce215SKirk McKusick 2055dffce215SKirk McKusick /* 2056dffce215SKirk McKusick * Write function for use by filesystem-layer routines. 2057dffce215SKirk McKusick */ 2058dffce215SKirk McKusick static int 2059dffce215SKirk McKusick ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) 2060dffce215SKirk McKusick { 2061dffce215SKirk McKusick struct devfd *devfdp; 2062dffce215SKirk McKusick struct ufsmount *ump; 2063dffce215SKirk McKusick struct buf *bp; 2064dffce215SKirk McKusick struct fs *fs; 2065dffce215SKirk McKusick int error; 2066dffce215SKirk McKusick 2067dffce215SKirk McKusick devfdp = devfd; 2068dffce215SKirk McKusick ump = devfdp->ump; 2069*c2cd605eSKirk McKusick bp = devfdp->sbbp; 2070*c2cd605eSKirk McKusick fs = (struct fs *)bp->b_data; 2071dffce215SKirk McKusick /* 2072dffce215SKirk McKusick * Writing the superblock summary information. 2073dffce215SKirk McKusick */ 2074dffce215SKirk McKusick if (loc != fs->fs_sblockloc) { 2075dffce215SKirk McKusick bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); 2076831b1ff7SKirk McKusick bcopy(buf, bp->b_data, (uint64_t)size); 2077dffce215SKirk McKusick if (devfdp->suspended) 2078791dd2faSTor Egge bp->b_flags |= B_VALIDSUSPWRT; 2079dffce215SKirk McKusick if (devfdp->waitfor != MNT_WAIT) 2080df8bae1dSRodney W. Grimes bawrite(bp); 20818aef1712SMatthew Dillon else if ((error = bwrite(bp)) != 0) 2082dffce215SKirk McKusick devfdp->error = error; 2083dffce215SKirk McKusick return (0); 2084df8bae1dSRodney W. Grimes } 2085996c772fSJohn Dyson /* 2086dffce215SKirk McKusick * Writing the superblock itself. We need to do special checks for it. 2087*c2cd605eSKirk McKusick * A negative error code is returned to indicate that a copy of the 2088*c2cd605eSKirk McKusick * superblock has been made and that the copy is discarded when the 2089*c2cd605eSKirk McKusick * I/O is done. So the the caller should not attempt to restore the 2090*c2cd605eSKirk McKusick * fs_si field after the write is done. The caller will convert the 2091*c2cd605eSKirk McKusick * error code back to its usual positive value when returning it. 2092996c772fSJohn Dyson */ 2093d79ff54bSChuck Silvers if (ffs_fsfail_cleanup(ump, devfdp->error)) 2094d79ff54bSChuck Silvers devfdp->error = 0; 2095dffce215SKirk McKusick if (devfdp->error != 0) { 2096dffce215SKirk McKusick brelse(bp); 2097*c2cd605eSKirk McKusick return (-devfdp->error - 1); 2098fa5d33e2SKirk McKusick } 2099519e3c3bSKirk McKusick if (MOUNTEDSOFTDEP(ump->um_mountp)) 2100*c2cd605eSKirk McKusick softdep_setup_sbupdate(ump, fs, bp); 2101dffce215SKirk McKusick if (devfdp->suspended) 2102791dd2faSTor Egge bp->b_flags |= B_VALIDSUSPWRT; 2103dffce215SKirk McKusick if (devfdp->waitfor != MNT_WAIT) 2104996c772fSJohn Dyson bawrite(bp); 21058aef1712SMatthew Dillon else if ((error = bwrite(bp)) != 0) 2106dffce215SKirk McKusick devfdp->error = error; 2107*c2cd605eSKirk McKusick return (-devfdp->error - 1); 2108df8bae1dSRodney W. Grimes } 2109d6fe88e4SPoul-Henning Kamp 2110d6fe88e4SPoul-Henning Kamp static int 2111d6fe88e4SPoul-Henning Kamp ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 2112dfd233edSAttilio Rao int attrnamespace, const char *attrname) 2113d6fe88e4SPoul-Henning Kamp { 2114d6fe88e4SPoul-Henning Kamp 2115d6fe88e4SPoul-Henning Kamp #ifdef UFS_EXTATTR 2116d6fe88e4SPoul-Henning Kamp return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 2117dfd233edSAttilio Rao attrname)); 2118d6fe88e4SPoul-Henning Kamp #else 2119d6fe88e4SPoul-Henning Kamp return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 2120dfd233edSAttilio Rao attrname)); 2121d6fe88e4SPoul-Henning Kamp #endif 2122d6fe88e4SPoul-Henning Kamp } 2123975512a9SPoul-Henning Kamp 2124975512a9SPoul-Henning Kamp static void 2125975512a9SPoul-Henning Kamp ffs_ifree(struct ufsmount *ump, struct inode *ip) 2126975512a9SPoul-Henning Kamp { 2127975512a9SPoul-Henning Kamp 212836329289STim J. Robbins if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 2129aa4d7a8aSPoul-Henning Kamp uma_zfree(uma_ufs1, ip->i_din1); 213036329289STim J. Robbins else if (ip->i_din2 != NULL) 21318d721e87STim J. Robbins uma_zfree(uma_ufs2, ip->i_din2); 21329d5a594fSMateusz Guzik uma_zfree_smr(uma_inode, ip); 2133975512a9SPoul-Henning Kamp } 21346e77a041SPoul-Henning Kamp 2135dd19a799SPoul-Henning Kamp static int dobkgrdwrite = 1; 2136dd19a799SPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2137dd19a799SPoul-Henning Kamp "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2138dd19a799SPoul-Henning Kamp 2139dd19a799SPoul-Henning Kamp /* 2140dd19a799SPoul-Henning Kamp * Complete a background write started from bwrite. 2141dd19a799SPoul-Henning Kamp */ 2142dd19a799SPoul-Henning Kamp static void 2143dd19a799SPoul-Henning Kamp ffs_backgroundwritedone(struct buf *bp) 2144dd19a799SPoul-Henning Kamp { 2145204ec66dSJeff Roberson struct bufobj *bufobj; 2146dd19a799SPoul-Henning Kamp struct buf *origbp; 2147dd19a799SPoul-Henning Kamp 2148d79ff54bSChuck Silvers #ifdef SOFTUPDATES 2149d79ff54bSChuck Silvers if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0) 2150d79ff54bSChuck Silvers softdep_handle_error(bp); 2151d79ff54bSChuck Silvers #endif 2152d79ff54bSChuck Silvers 2153dd19a799SPoul-Henning Kamp /* 2154dd19a799SPoul-Henning Kamp * Find the original buffer that we are writing. 2155dd19a799SPoul-Henning Kamp */ 2156204ec66dSJeff Roberson bufobj = bp->b_bufobj; 2157204ec66dSJeff Roberson BO_LOCK(bufobj); 2158dd19a799SPoul-Henning Kamp if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2159dd19a799SPoul-Henning Kamp panic("backgroundwritedone: lost buffer"); 2160b2c3df84SKonstantin Belousov 2161b2c3df84SKonstantin Belousov /* 2162b2c3df84SKonstantin Belousov * We should mark the cylinder group buffer origbp as 2163d79ff54bSChuck Silvers * dirty, to not lose the failed write. 2164b2c3df84SKonstantin Belousov */ 2165b2c3df84SKonstantin Belousov if ((bp->b_ioflags & BIO_ERROR) != 0) 2166b2c3df84SKonstantin Belousov origbp->b_vflags |= BV_BKGRDERR; 2167204ec66dSJeff Roberson BO_UNLOCK(bufobj); 2168dd19a799SPoul-Henning Kamp /* 2169dd19a799SPoul-Henning Kamp * Process dependencies then return any unfinished ones. 2170dd19a799SPoul-Henning Kamp */ 2171b2c3df84SKonstantin Belousov if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2172dd19a799SPoul-Henning Kamp buf_complete(bp); 2173dd19a799SPoul-Henning Kamp #ifdef SOFTUPDATES 217404533fc6SXin LI if (!LIST_EMPTY(&bp->b_dep)) 2175dd19a799SPoul-Henning Kamp softdep_move_dependencies(bp, origbp); 2176dd19a799SPoul-Henning Kamp #endif 2177dd19a799SPoul-Henning Kamp /* 2178204ec66dSJeff Roberson * This buffer is marked B_NOCACHE so when it is released 2179bf0db193SKonstantin Belousov * by biodone it will be tossed. Clear B_IOSTARTED in case of error. 2180dd19a799SPoul-Henning Kamp */ 2181dd19a799SPoul-Henning Kamp bp->b_flags |= B_NOCACHE; 2182bf0db193SKonstantin Belousov bp->b_flags &= ~(B_CACHE | B_IOSTARTED); 2183377f88fbSKonstantin Belousov pbrelvp(bp); 2184b2c3df84SKonstantin Belousov 2185b2c3df84SKonstantin Belousov /* 2186b2c3df84SKonstantin Belousov * Prevent brelse() from trying to keep and re-dirtying bp on 2187b2c3df84SKonstantin Belousov * errors. It causes b_bufobj dereference in 2188b2c3df84SKonstantin Belousov * bdirty()/reassignbuf(), and b_bufobj was cleared in 2189b2c3df84SKonstantin Belousov * pbrelvp() above. 2190b2c3df84SKonstantin Belousov */ 2191b2c3df84SKonstantin Belousov if ((bp->b_ioflags & BIO_ERROR) != 0) 2192b2c3df84SKonstantin Belousov bp->b_flags |= B_INVAL; 2193dd19a799SPoul-Henning Kamp bufdone(bp); 2194204ec66dSJeff Roberson BO_LOCK(bufobj); 2195dd19a799SPoul-Henning Kamp /* 2196dd19a799SPoul-Henning Kamp * Clear the BV_BKGRDINPROG flag in the original buffer 2197dd19a799SPoul-Henning Kamp * and awaken it if it is waiting for the write to complete. 2198dd19a799SPoul-Henning Kamp * If BV_BKGRDINPROG is not set in the original buffer it must 2199dd19a799SPoul-Henning Kamp * have been released and re-instantiated - which is not legal. 2200dd19a799SPoul-Henning Kamp */ 2201dd19a799SPoul-Henning Kamp KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2202dd19a799SPoul-Henning Kamp ("backgroundwritedone: lost buffer2")); 2203dd19a799SPoul-Henning Kamp origbp->b_vflags &= ~BV_BKGRDINPROG; 2204dd19a799SPoul-Henning Kamp if (origbp->b_vflags & BV_BKGRDWAIT) { 2205dd19a799SPoul-Henning Kamp origbp->b_vflags &= ~BV_BKGRDWAIT; 2206dd19a799SPoul-Henning Kamp wakeup(&origbp->b_xflags); 2207dd19a799SPoul-Henning Kamp } 2208204ec66dSJeff Roberson BO_UNLOCK(bufobj); 2209dd19a799SPoul-Henning Kamp } 2210dd19a799SPoul-Henning Kamp 2211dd19a799SPoul-Henning Kamp /* 2212dd19a799SPoul-Henning Kamp * Write, release buffer on completion. (Done by iodone 2213dd19a799SPoul-Henning Kamp * if async). Do not bother writing anything if the buffer 2214dd19a799SPoul-Henning Kamp * is invalid. 2215dd19a799SPoul-Henning Kamp * 2216dd19a799SPoul-Henning Kamp * Note that we set B_CACHE here, indicating that buffer is 2217dd19a799SPoul-Henning Kamp * fully valid and thus cacheable. This is true even of NFS 2218dd19a799SPoul-Henning Kamp * now so we set it generally. This could be set either here 2219dd19a799SPoul-Henning Kamp * or in biodone() since the I/O is synchronous. We put it 2220dd19a799SPoul-Henning Kamp * here. 2221dd19a799SPoul-Henning Kamp */ 2222dd19a799SPoul-Henning Kamp static int 2223dd19a799SPoul-Henning Kamp ffs_bufwrite(struct buf *bp) 2224dd19a799SPoul-Henning Kamp { 2225dd19a799SPoul-Henning Kamp struct buf *newbp; 222647806d1bSKirk McKusick struct cg *cgp; 2227dd19a799SPoul-Henning Kamp 2228dd19a799SPoul-Henning Kamp CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2229dd19a799SPoul-Henning Kamp if (bp->b_flags & B_INVAL) { 2230dd19a799SPoul-Henning Kamp brelse(bp); 2231dd19a799SPoul-Henning Kamp return (0); 2232dd19a799SPoul-Henning Kamp } 2233dd19a799SPoul-Henning Kamp 2234d638e093SAttilio Rao if (!BUF_ISLOCKED(bp)) 2235dd19a799SPoul-Henning Kamp panic("bufwrite: buffer is not busy???"); 2236dd19a799SPoul-Henning Kamp /* 2237dd19a799SPoul-Henning Kamp * If a background write is already in progress, delay 2238dd19a799SPoul-Henning Kamp * writing this block if it is asynchronous. Otherwise 2239dd19a799SPoul-Henning Kamp * wait for the background write to complete. 2240dd19a799SPoul-Henning Kamp */ 2241dd19a799SPoul-Henning Kamp BO_LOCK(bp->b_bufobj); 2242dd19a799SPoul-Henning Kamp if (bp->b_vflags & BV_BKGRDINPROG) { 2243dd19a799SPoul-Henning Kamp if (bp->b_flags & B_ASYNC) { 2244dd19a799SPoul-Henning Kamp BO_UNLOCK(bp->b_bufobj); 2245dd19a799SPoul-Henning Kamp bdwrite(bp); 2246dd19a799SPoul-Henning Kamp return (0); 2247dd19a799SPoul-Henning Kamp } 2248dd19a799SPoul-Henning Kamp bp->b_vflags |= BV_BKGRDWAIT; 224922a72260SJeff Roberson msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 225022a72260SJeff Roberson "bwrbg", 0); 2251dd19a799SPoul-Henning Kamp if (bp->b_vflags & BV_BKGRDINPROG) 2252dd19a799SPoul-Henning Kamp panic("bufwrite: still writing"); 2253dd19a799SPoul-Henning Kamp } 2254b2c3df84SKonstantin Belousov bp->b_vflags &= ~BV_BKGRDERR; 2255dd19a799SPoul-Henning Kamp BO_UNLOCK(bp->b_bufobj); 2256dd19a799SPoul-Henning Kamp 2257dd19a799SPoul-Henning Kamp /* 2258dd19a799SPoul-Henning Kamp * If this buffer is marked for background writing and we 2259dd19a799SPoul-Henning Kamp * do not have to wait for it, make a copy and write the 2260dd19a799SPoul-Henning Kamp * copy so as to leave this buffer ready for further use. 2261dd19a799SPoul-Henning Kamp * 2262dd19a799SPoul-Henning Kamp * This optimization eats a lot of memory. If we have a page 2263dd19a799SPoul-Henning Kamp * or buffer shortfall we can't do it. 2264dd19a799SPoul-Henning Kamp */ 2265dd19a799SPoul-Henning Kamp if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2266dd19a799SPoul-Henning Kamp (bp->b_flags & B_ASYNC) && 2267dd19a799SPoul-Henning Kamp !vm_page_count_severe() && 2268dd19a799SPoul-Henning Kamp !buf_dirty_count_severe()) { 2269dd19a799SPoul-Henning Kamp KASSERT(bp->b_iodone == NULL, 2270dd19a799SPoul-Henning Kamp ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2271dd19a799SPoul-Henning Kamp 2272dd19a799SPoul-Henning Kamp /* get a new block */ 2273c1d8b5e8SKonstantin Belousov newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2274c1d8b5e8SKonstantin Belousov if (newbp == NULL) 2275c1d8b5e8SKonstantin Belousov goto normal_write; 2276dd19a799SPoul-Henning Kamp 2277fade8dd7SJeff Roberson KASSERT(buf_mapped(bp), ("Unmapped cg")); 2278dd19a799SPoul-Henning Kamp memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2279dd19a799SPoul-Henning Kamp BO_LOCK(bp->b_bufobj); 2280dd19a799SPoul-Henning Kamp bp->b_vflags |= BV_BKGRDINPROG; 2281dd19a799SPoul-Henning Kamp BO_UNLOCK(bp->b_bufobj); 228275e3597aSKirk McKusick newbp->b_xflags |= 228375e3597aSKirk McKusick (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; 228426089666SJeff Roberson newbp->b_lblkno = bp->b_lblkno; 2285dd19a799SPoul-Henning Kamp newbp->b_blkno = bp->b_blkno; 2286dd19a799SPoul-Henning Kamp newbp->b_offset = bp->b_offset; 2287dd19a799SPoul-Henning Kamp newbp->b_iodone = ffs_backgroundwritedone; 2288dd19a799SPoul-Henning Kamp newbp->b_flags |= B_ASYNC; 2289dd19a799SPoul-Henning Kamp newbp->b_flags &= ~B_INVAL; 229026089666SJeff Roberson pbgetvp(bp->b_vp, newbp); 2291dd19a799SPoul-Henning Kamp 2292dd19a799SPoul-Henning Kamp #ifdef SOFTUPDATES 2293113db2ddSJeff Roberson /* 2294113db2ddSJeff Roberson * Move over the dependencies. If there are rollbacks, 2295113db2ddSJeff Roberson * leave the parent buffer dirtied as it will need to 2296113db2ddSJeff Roberson * be written again. 2297113db2ddSJeff Roberson */ 2298113db2ddSJeff Roberson if (LIST_EMPTY(&bp->b_dep) || 2299113db2ddSJeff Roberson softdep_move_dependencies(bp, newbp) == 0) 2300113db2ddSJeff Roberson bundirty(bp); 2301113db2ddSJeff Roberson #else 2302113db2ddSJeff Roberson bundirty(bp); 2303dd19a799SPoul-Henning Kamp #endif 2304dd19a799SPoul-Henning Kamp 2305dd19a799SPoul-Henning Kamp /* 230626089666SJeff Roberson * Initiate write on the copy, release the original. The 230726089666SJeff Roberson * BKGRDINPROG flag prevents it from going away until 230847806d1bSKirk McKusick * the background write completes. We have to recalculate 230947806d1bSKirk McKusick * its check hash in case the buffer gets freed and then 231047806d1bSKirk McKusick * reconstituted from the buffer cache during a later read. 2311dd19a799SPoul-Henning Kamp */ 231247806d1bSKirk McKusick if ((bp->b_xflags & BX_CYLGRP) != 0) { 231347806d1bSKirk McKusick cgp = (struct cg *)bp->b_data; 231447806d1bSKirk McKusick cgp->cg_ckhash = 0; 231547806d1bSKirk McKusick cgp->cg_ckhash = 231647806d1bSKirk McKusick calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 231747806d1bSKirk McKusick } 2318dd19a799SPoul-Henning Kamp bqrelse(bp); 2319dd19a799SPoul-Henning Kamp bp = newbp; 2320113db2ddSJeff Roberson } else 2321113db2ddSJeff Roberson /* Mark the buffer clean */ 2322113db2ddSJeff Roberson bundirty(bp); 2323113db2ddSJeff Roberson 2324dd19a799SPoul-Henning Kamp /* Let the normal bufwrite do the rest for us */ 2325c1d8b5e8SKonstantin Belousov normal_write: 232647806d1bSKirk McKusick /* 232747806d1bSKirk McKusick * If we are writing a cylinder group, update its time. 232847806d1bSKirk McKusick */ 232947806d1bSKirk McKusick if ((bp->b_xflags & BX_CYLGRP) != 0) { 233047806d1bSKirk McKusick cgp = (struct cg *)bp->b_data; 233147806d1bSKirk McKusick cgp->cg_old_time = cgp->cg_time = time_second; 233247806d1bSKirk McKusick } 23339248a827STor Egge return (bufwrite(bp)); 2334dd19a799SPoul-Henning Kamp } 2335dd19a799SPoul-Henning Kamp 23368dd56505SPoul-Henning Kamp static void 23376e77a041SPoul-Henning Kamp ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 23386e77a041SPoul-Henning Kamp { 2339153910e0SJeff Roberson struct vnode *vp; 23407de3839dSTor Egge struct buf *tbp; 234175e3597aSKirk McKusick int error, nocopy; 23426e77a041SPoul-Henning Kamp 2343f15ccf88SChuck Silvers /* 2344f15ccf88SChuck Silvers * This is the bufobj strategy for the private VCHR vnodes 2345f15ccf88SChuck Silvers * used by FFS to access the underlying storage device. 2346f15ccf88SChuck Silvers * We override the default bufobj strategy and thus bypass 2347f15ccf88SChuck Silvers * VOP_STRATEGY() for these vnodes. 2348f15ccf88SChuck Silvers */ 23498660b707SMateusz Guzik vp = bo2vnode(bo); 2350f15ccf88SChuck Silvers KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR || 2351f15ccf88SChuck Silvers bp->b_vp->v_rdev == NULL || 2352f15ccf88SChuck Silvers bp->b_vp->v_rdev->si_mountpt == NULL || 2353f15ccf88SChuck Silvers VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL || 2354f15ccf88SChuck Silvers vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp, 2355f15ccf88SChuck Silvers ("ffs_geom_strategy() with wrong vp")); 2356153910e0SJeff Roberson if (bp->b_iocmd == BIO_WRITE) { 2357153910e0SJeff Roberson if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2358153910e0SJeff Roberson bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2359153910e0SJeff Roberson (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2360153910e0SJeff Roberson panic("ffs_geom_strategy: bad I/O"); 2361113db2ddSJeff Roberson nocopy = bp->b_flags & B_NOCOPY; 2362113db2ddSJeff Roberson bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2363113db2ddSJeff Roberson if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 23647de3839dSTor Egge vp->v_rdev->si_snapdata != NULL) { 23657de3839dSTor Egge if ((bp->b_flags & B_CLUSTER) != 0) { 2366868bb88fSTor Egge runningbufwakeup(bp); 23677de3839dSTor Egge TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 23687de3839dSTor Egge b_cluster.cluster_entry) { 23697de3839dSTor Egge error = ffs_copyonwrite(vp, tbp); 23707de3839dSTor Egge if (error != 0 && 2371153910e0SJeff Roberson error != EOPNOTSUPP) { 2372153910e0SJeff Roberson bp->b_error = error; 2373153910e0SJeff Roberson bp->b_ioflags |= BIO_ERROR; 2374e1ef4c29SKonstantin Belousov bp->b_flags &= ~B_BARRIER; 2375153910e0SJeff Roberson bufdone(bp); 2376153910e0SJeff Roberson return; 2377153910e0SJeff Roberson } 2378153910e0SJeff Roberson } 23794efe531cSMark Johnston (void)runningbufclaim(bp, bp->b_bufsize); 23807de3839dSTor Egge } else { 23817de3839dSTor Egge error = ffs_copyonwrite(vp, bp); 23827de3839dSTor Egge if (error != 0 && error != EOPNOTSUPP) { 23837de3839dSTor Egge bp->b_error = error; 23847de3839dSTor Egge bp->b_ioflags |= BIO_ERROR; 2385e1ef4c29SKonstantin Belousov bp->b_flags &= ~B_BARRIER; 23867de3839dSTor Egge bufdone(bp); 23877de3839dSTor Egge return; 23887de3839dSTor Egge } 23897de3839dSTor Egge } 23907de3839dSTor Egge } 23917de3839dSTor Egge #ifdef SOFTUPDATES 23927de3839dSTor Egge if ((bp->b_flags & B_CLUSTER) != 0) { 23937de3839dSTor Egge TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 23947de3839dSTor Egge b_cluster.cluster_entry) { 239504533fc6SXin LI if (!LIST_EMPTY(&tbp->b_dep)) 23967de3839dSTor Egge buf_start(tbp); 23977de3839dSTor Egge } 23987de3839dSTor Egge } else { 239904533fc6SXin LI if (!LIST_EMPTY(&bp->b_dep)) 24007de3839dSTor Egge buf_start(bp); 24017de3839dSTor Egge } 24027de3839dSTor Egge 24037de3839dSTor Egge #endif 240475e3597aSKirk McKusick /* 240575e3597aSKirk McKusick * Check for metadata that needs check-hashes and update them. 240675e3597aSKirk McKusick */ 240775e3597aSKirk McKusick switch (bp->b_xflags & BX_FSPRIV) { 240875e3597aSKirk McKusick case BX_CYLGRP: 240975e3597aSKirk McKusick ((struct cg *)bp->b_data)->cg_ckhash = 0; 241075e3597aSKirk McKusick ((struct cg *)bp->b_data)->cg_ckhash = 241175e3597aSKirk McKusick calculate_crc32c(~0L, bp->b_data, bp->b_bcount); 241275e3597aSKirk McKusick break; 241375e3597aSKirk McKusick 241475e3597aSKirk McKusick case BX_SUPERBLOCK: 241575e3597aSKirk McKusick case BX_INODE: 241675e3597aSKirk McKusick case BX_INDIR: 241775e3597aSKirk McKusick case BX_DIR: 241875e3597aSKirk McKusick printf("Check-hash write is unimplemented!!!\n"); 241975e3597aSKirk McKusick break; 242075e3597aSKirk McKusick 242175e3597aSKirk McKusick case 0: 242275e3597aSKirk McKusick break; 242375e3597aSKirk McKusick 242475e3597aSKirk McKusick default: 242575e3597aSKirk McKusick printf("multiple buffer types 0x%b\n", 2426831b1ff7SKirk McKusick (bp->b_xflags & BX_FSPRIV), PRINT_UFS_BUF_XFLAGS); 242775e3597aSKirk McKusick break; 242875e3597aSKirk McKusick } 24297de3839dSTor Egge } 2430d79ff54bSChuck Silvers if (bp->b_iocmd != BIO_READ && ffs_enxio_enable) 2431d79ff54bSChuck Silvers bp->b_xflags |= BX_CVTENXIO; 243243920011SPoul-Henning Kamp g_vfs_strategy(bo, bp); 24336e77a041SPoul-Henning Kamp } 243452dfc8d7SKonstantin Belousov 24351848286aSEdward Tomasz Napierala int 24361848286aSEdward Tomasz Napierala ffs_own_mount(const struct mount *mp) 24371848286aSEdward Tomasz Napierala { 24381848286aSEdward Tomasz Napierala 24391848286aSEdward Tomasz Napierala if (mp->mnt_op == &ufs_vfsops) 24401848286aSEdward Tomasz Napierala return (1); 24411848286aSEdward Tomasz Napierala return (0); 24421848286aSEdward Tomasz Napierala } 24431848286aSEdward Tomasz Napierala 244452dfc8d7SKonstantin Belousov #ifdef DDB 2445cf058082SBrooks Davis #ifdef SOFTUPDATES 244652dfc8d7SKonstantin Belousov 2447519e3c3bSKirk McKusick /* defined in ffs_softdep.c */ 2448519e3c3bSKirk McKusick extern void db_print_ffs(struct ufsmount *ump); 244952dfc8d7SKonstantin Belousov 245052dfc8d7SKonstantin Belousov DB_SHOW_COMMAND(ffs, db_show_ffs) 245152dfc8d7SKonstantin Belousov { 245252dfc8d7SKonstantin Belousov struct mount *mp; 245352dfc8d7SKonstantin Belousov struct ufsmount *ump; 245452dfc8d7SKonstantin Belousov 245552dfc8d7SKonstantin Belousov if (have_addr) { 245652dfc8d7SKonstantin Belousov ump = VFSTOUFS((struct mount *)addr); 245752dfc8d7SKonstantin Belousov db_print_ffs(ump); 245852dfc8d7SKonstantin Belousov return; 245952dfc8d7SKonstantin Belousov } 246052dfc8d7SKonstantin Belousov 246152dfc8d7SKonstantin Belousov TAILQ_FOREACH(mp, &mountlist, mnt_list) { 246252dfc8d7SKonstantin Belousov if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 246352dfc8d7SKonstantin Belousov db_print_ffs(VFSTOUFS(mp)); 246452dfc8d7SKonstantin Belousov } 246552dfc8d7SKonstantin Belousov } 246652dfc8d7SKonstantin Belousov 2467cf058082SBrooks Davis #endif /* SOFTUPDATES */ 246852dfc8d7SKonstantin Belousov #endif /* DDB */ 2469