xref: /freebsd/sys/ufs/ffs/ffs_vfsops.c (revision 1d386b48a555f61cb7325543adbbb5c3f3407a66)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1991, 1993, 1994
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
32  */
33 
34 #include <sys/cdefs.h>
35 #include "opt_quota.h"
36 #include "opt_ufs.h"
37 #include "opt_ffs.h"
38 #include "opt_ddb.h"
39 
40 #include <sys/param.h>
41 #include <sys/gsb_crc32.h>
42 #include <sys/systm.h>
43 #include <sys/namei.h>
44 #include <sys/priv.h>
45 #include <sys/proc.h>
46 #include <sys/taskqueue.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/vnode.h>
50 #include <sys/mount.h>
51 #include <sys/bio.h>
52 #include <sys/buf.h>
53 #include <sys/conf.h>
54 #include <sys/fcntl.h>
55 #include <sys/ioccom.h>
56 #include <sys/malloc.h>
57 #include <sys/mutex.h>
58 #include <sys/rwlock.h>
59 #include <sys/sysctl.h>
60 #include <sys/vmmeter.h>
61 
62 #include <security/mac/mac_framework.h>
63 
64 #include <ufs/ufs/dir.h>
65 #include <ufs/ufs/extattr.h>
66 #include <ufs/ufs/gjournal.h>
67 #include <ufs/ufs/quota.h>
68 #include <ufs/ufs/ufsmount.h>
69 #include <ufs/ufs/inode.h>
70 #include <ufs/ufs/ufs_extern.h>
71 
72 #include <ufs/ffs/fs.h>
73 #include <ufs/ffs/ffs_extern.h>
74 
75 #include <vm/vm.h>
76 #include <vm/uma.h>
77 #include <vm/vm_page.h>
78 
79 #include <geom/geom.h>
80 #include <geom/geom_vfs.h>
81 
82 #include <ddb/ddb.h>
83 
84 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
85 VFS_SMR_DECLARE;
86 
87 static int	ffs_mountfs(struct vnode *, struct mount *, struct thread *);
88 static void	ffs_oldfscompat_read(struct fs *, struct ufsmount *,
89 		    ufs2_daddr_t);
90 static void	ffs_ifree(struct ufsmount *ump, struct inode *ip);
91 static int	ffs_sync_lazy(struct mount *mp);
92 static int	ffs_use_bread(void *devfd, off_t loc, void **bufp, int size);
93 static int	ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size);
94 
95 static vfs_init_t ffs_init;
96 static vfs_uninit_t ffs_uninit;
97 static vfs_extattrctl_t ffs_extattrctl;
98 static vfs_cmount_t ffs_cmount;
99 static vfs_unmount_t ffs_unmount;
100 static vfs_mount_t ffs_mount;
101 static vfs_statfs_t ffs_statfs;
102 static vfs_fhtovp_t ffs_fhtovp;
103 static vfs_sync_t ffs_sync;
104 
105 static struct vfsops ufs_vfsops = {
106 	.vfs_extattrctl =	ffs_extattrctl,
107 	.vfs_fhtovp =		ffs_fhtovp,
108 	.vfs_init =		ffs_init,
109 	.vfs_mount =		ffs_mount,
110 	.vfs_cmount =		ffs_cmount,
111 	.vfs_quotactl =		ufs_quotactl,
112 	.vfs_root =		vfs_cache_root,
113 	.vfs_cachedroot =	ufs_root,
114 	.vfs_statfs =		ffs_statfs,
115 	.vfs_sync =		ffs_sync,
116 	.vfs_uninit =		ffs_uninit,
117 	.vfs_unmount =		ffs_unmount,
118 	.vfs_vget =		ffs_vget,
119 	.vfs_susp_clean =	process_deferred_inactive,
120 };
121 
122 VFS_SET(ufs_vfsops, ufs, 0);
123 MODULE_VERSION(ufs, 1);
124 
125 static b_strategy_t ffs_geom_strategy;
126 static b_write_t ffs_bufwrite;
127 
128 static struct buf_ops ffs_ops = {
129 	.bop_name =	"FFS",
130 	.bop_write =	ffs_bufwrite,
131 	.bop_strategy =	ffs_geom_strategy,
132 	.bop_sync =	bufsync,
133 #ifdef NO_FFS_SNAPSHOT
134 	.bop_bdflush =	bufbdflush,
135 #else
136 	.bop_bdflush =	ffs_bdflush,
137 #endif
138 };
139 
140 /*
141  * Note that userquota and groupquota options are not currently used
142  * by UFS/FFS code and generally mount(8) does not pass those options
143  * from userland, but they can be passed by loader(8) via
144  * vfs.root.mountfrom.options.
145  */
146 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
147     "noclusterw", "noexec", "export", "force", "from", "groupquota",
148     "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir",
149     "nosymfollow", "sync", "union", "userquota", "untrusted", NULL };
150 
151 static int ffs_enxio_enable = 1;
152 SYSCTL_DECL(_vfs_ffs);
153 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN,
154     &ffs_enxio_enable, 0,
155     "enable mapping of other disk I/O errors to ENXIO");
156 
157 /*
158  * Return buffer with the contents of block "offset" from the beginning of
159  * directory "ip".  If "res" is non-zero, fill it in with a pointer to the
160  * remaining space in the directory.
161  */
162 static int
163 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp)
164 {
165 	struct inode *ip;
166 	struct fs *fs;
167 	struct buf *bp;
168 	ufs_lbn_t lbn;
169 	int bsize, error;
170 
171 	ip = VTOI(vp);
172 	fs = ITOFS(ip);
173 	lbn = lblkno(fs, offset);
174 	bsize = blksize(fs, ip, lbn);
175 
176 	*bpp = NULL;
177 	error = bread(vp, lbn, bsize, NOCRED, &bp);
178 	if (error) {
179 		return (error);
180 	}
181 	if (res)
182 		*res = (char *)bp->b_data + blkoff(fs, offset);
183 	*bpp = bp;
184 	return (0);
185 }
186 
187 /*
188  * Load up the contents of an inode and copy the appropriate pieces
189  * to the incore copy.
190  */
191 static int
192 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino)
193 {
194 	struct ufs1_dinode *dip1;
195 	struct ufs2_dinode *dip2;
196 	int error;
197 
198 	if (I_IS_UFS1(ip)) {
199 		dip1 = ip->i_din1;
200 		*dip1 =
201 		    *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
202 		ip->i_mode = dip1->di_mode;
203 		ip->i_nlink = dip1->di_nlink;
204 		ip->i_effnlink = dip1->di_nlink;
205 		ip->i_size = dip1->di_size;
206 		ip->i_flags = dip1->di_flags;
207 		ip->i_gen = dip1->di_gen;
208 		ip->i_uid = dip1->di_uid;
209 		ip->i_gid = dip1->di_gid;
210 		return (0);
211 	}
212 	dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
213 	if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 &&
214 	    !ffs_fsfail_cleanup(ITOUMP(ip), error)) {
215 		printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt,
216 		    (intmax_t)ino);
217 		return (error);
218 	}
219 	*ip->i_din2 = *dip2;
220 	dip2 = ip->i_din2;
221 	ip->i_mode = dip2->di_mode;
222 	ip->i_nlink = dip2->di_nlink;
223 	ip->i_effnlink = dip2->di_nlink;
224 	ip->i_size = dip2->di_size;
225 	ip->i_flags = dip2->di_flags;
226 	ip->i_gen = dip2->di_gen;
227 	ip->i_uid = dip2->di_uid;
228 	ip->i_gid = dip2->di_gid;
229 	return (0);
230 }
231 
232 /*
233  * Verify that a filesystem block number is a valid data block.
234  * This routine is only called on untrusted filesystems.
235  */
236 static int
237 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize)
238 {
239 	struct fs *fs;
240 	struct ufsmount *ump;
241 	ufs2_daddr_t end_daddr;
242 	int cg, havemtx;
243 
244 	KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0,
245 	    ("ffs_check_blkno called on a trusted file system"));
246 	ump = VFSTOUFS(mp);
247 	fs = ump->um_fs;
248 	cg = dtog(fs, daddr);
249 	end_daddr = daddr + numfrags(fs, blksize);
250 	/*
251 	 * Verify that the block number is a valid data block. Also check
252 	 * that it does not point to an inode block or a superblock. Accept
253 	 * blocks that are unalloacted (0) or part of snapshot metadata
254 	 * (BLK_NOCOPY or BLK_SNAP).
255 	 *
256 	 * Thus, the block must be in a valid range for the filesystem and
257 	 * either in the space before a backup superblock (except the first
258 	 * cylinder group where that space is used by the bootstrap code) or
259 	 * after the inode blocks and before the end of the cylinder group.
260 	 */
261 	if ((uint64_t)daddr <= BLK_SNAP ||
262 	    ((uint64_t)end_daddr <= fs->fs_size &&
263 	    ((cg > 0 && end_daddr <= cgsblock(fs, cg)) ||
264 	    (daddr >= cgdmin(fs, cg) &&
265 	    end_daddr <= cgbase(fs, cg) + fs->fs_fpg))))
266 		return (0);
267 	if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0)
268 		UFS_LOCK(ump);
269 	if (ppsratecheck(&ump->um_last_integritymsg,
270 	    &ump->um_secs_integritymsg, 1)) {
271 		UFS_UNLOCK(ump);
272 		uprintf("\n%s: inode %jd, out-of-range indirect block "
273 		    "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr);
274 		if (havemtx)
275 			UFS_LOCK(ump);
276 	} else if (!havemtx)
277 		UFS_UNLOCK(ump);
278 	return (EINTEGRITY);
279 }
280 
281 /*
282  * On first ENXIO error, initiate an asynchronous forcible unmount.
283  * Used to unmount filesystems whose underlying media has gone away.
284  *
285  * Return true if a cleanup is in progress.
286  */
287 int
288 ffs_fsfail_cleanup(struct ufsmount *ump, int error)
289 {
290 	int retval;
291 
292 	UFS_LOCK(ump);
293 	retval = ffs_fsfail_cleanup_locked(ump, error);
294 	UFS_UNLOCK(ump);
295 	return (retval);
296 }
297 
298 int
299 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error)
300 {
301 	mtx_assert(UFS_MTX(ump), MA_OWNED);
302 	if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) {
303 		ump->um_flags |= UM_FSFAIL_CLEANUP;
304 		if (ump->um_mountp == rootvnode->v_mount)
305 			panic("UFS: root fs would be forcibly unmounted");
306 
307 		/*
308 		 * Queue an async forced unmount.
309 		 */
310 		vfs_ref(ump->um_mountp);
311 		dounmount(ump->um_mountp,
312 		    MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread);
313 		printf("UFS: forcibly unmounting %s from %s\n",
314 		    ump->um_mountp->mnt_stat.f_mntfromname,
315 		    ump->um_mountp->mnt_stat.f_mntonname);
316 	}
317 	return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0);
318 }
319 
320 /*
321  * Wrapper used during ENXIO cleanup to allocate empty buffers when
322  * the kernel is unable to read the real one. They are needed so that
323  * the soft updates code can use them to unwind its dependencies.
324  */
325 int
326 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno,
327     daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt,
328     struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *),
329     struct buf **bpp)
330 {
331 	int error;
332 
333 	flags |= GB_CVTENXIO;
334 	error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt,
335 	    cred, flags, ckhashfunc, bpp);
336 	if (error != 0 && ffs_fsfail_cleanup(ump, error)) {
337 		error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp);
338 		KASSERT(error == 0, ("getblkx failed"));
339 		vfs_bio_bzero_buf(*bpp, 0, size);
340 	}
341 	return (error);
342 }
343 
344 static int
345 ffs_mount(struct mount *mp)
346 {
347 	struct vnode *devvp, *odevvp;
348 	struct thread *td;
349 	struct ufsmount *ump = NULL;
350 	struct fs *fs;
351 	int error, flags;
352 	int error1 __diagused;
353 	uint64_t mntorflags, saved_mnt_flag;
354 	accmode_t accmode;
355 	struct nameidata ndp;
356 	char *fspec;
357 	bool mounted_softdep;
358 
359 	td = curthread;
360 	if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
361 		return (EINVAL);
362 	if (uma_inode == NULL) {
363 		uma_inode = uma_zcreate("FFS inode",
364 		    sizeof(struct inode), NULL, NULL, NULL, NULL,
365 		    UMA_ALIGN_PTR, 0);
366 		uma_ufs1 = uma_zcreate("FFS1 dinode",
367 		    sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
368 		    UMA_ALIGN_PTR, 0);
369 		uma_ufs2 = uma_zcreate("FFS2 dinode",
370 		    sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
371 		    UMA_ALIGN_PTR, 0);
372 		VFS_SMR_ZONE_SET(uma_inode);
373 	}
374 
375 	vfs_deleteopt(mp->mnt_optnew, "groupquota");
376 	vfs_deleteopt(mp->mnt_optnew, "userquota");
377 
378 	fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
379 	if (error)
380 		return (error);
381 
382 	mntorflags = 0;
383 	if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0)
384 		mntorflags |= MNT_UNTRUSTED;
385 
386 	if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
387 		mntorflags |= MNT_ACLS;
388 
389 	if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
390 		mntorflags |= MNT_SNAPSHOT;
391 		/*
392 		 * Once we have set the MNT_SNAPSHOT flag, do not
393 		 * persist "snapshot" in the options list.
394 		 */
395 		vfs_deleteopt(mp->mnt_optnew, "snapshot");
396 		vfs_deleteopt(mp->mnt_opt, "snapshot");
397 	}
398 
399 	if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
400 		if (mntorflags & MNT_ACLS) {
401 			vfs_mount_error(mp,
402 			    "\"acls\" and \"nfsv4acls\" options "
403 			    "are mutually exclusive");
404 			return (EINVAL);
405 		}
406 		mntorflags |= MNT_NFS4ACLS;
407 	}
408 
409 	MNT_ILOCK(mp);
410 	mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
411 	mp->mnt_flag |= mntorflags;
412 	MNT_IUNLOCK(mp);
413 
414 	/*
415 	 * If this is a snapshot request, take the snapshot.
416 	 */
417 	if (mp->mnt_flag & MNT_SNAPSHOT) {
418 		if ((mp->mnt_flag & MNT_UPDATE) == 0)
419 			return (EINVAL);
420 		return (ffs_snapshot(mp, fspec));
421 	}
422 
423 	/*
424 	 * Must not call namei() while owning busy ref.
425 	 */
426 	if (mp->mnt_flag & MNT_UPDATE)
427 		vfs_unbusy(mp);
428 
429 	/*
430 	 * Not an update, or updating the name: look up the name
431 	 * and verify that it refers to a sensible disk device.
432 	 */
433 	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec);
434 	error = namei(&ndp);
435 	if ((mp->mnt_flag & MNT_UPDATE) != 0) {
436 		/*
437 		 * Unmount does not start if MNT_UPDATE is set.  Mount
438 		 * update busies mp before setting MNT_UPDATE.  We
439 		 * must be able to retain our busy ref successfully,
440 		 * without sleep.
441 		 */
442 		error1 = vfs_busy(mp, MBF_NOWAIT);
443 		MPASS(error1 == 0);
444 	}
445 	if (error != 0)
446 		return (error);
447 	NDFREE_PNBUF(&ndp);
448 	if (!vn_isdisk_error(ndp.ni_vp, &error)) {
449 		vput(ndp.ni_vp);
450 		return (error);
451 	}
452 
453 	/*
454 	 * If mount by non-root, then verify that user has necessary
455 	 * permissions on the device.
456 	 */
457 	accmode = VREAD;
458 	if ((mp->mnt_flag & MNT_RDONLY) == 0)
459 		accmode |= VWRITE;
460 	error = VOP_ACCESS(ndp.ni_vp, accmode, td->td_ucred, td);
461 	if (error)
462 		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
463 	if (error) {
464 		vput(ndp.ni_vp);
465 		return (error);
466 	}
467 
468 	/*
469 	 * New mount
470 	 *
471 	 * We need the name for the mount point (also used for
472 	 * "last mounted on") copied in. If an error occurs,
473 	 * the mount point is discarded by the upper level code.
474 	 * Note that vfs_mount_alloc() populates f_mntonname for us.
475 	 */
476 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
477 		if ((error = ffs_mountfs(ndp.ni_vp, mp, td)) != 0) {
478 			vrele(ndp.ni_vp);
479 			return (error);
480 		}
481 	} else {
482 		/*
483 		 * When updating, check whether changing from read-only to
484 		 * read/write; if there is no device name, that's all we do.
485 		 */
486 		ump = VFSTOUFS(mp);
487 		fs = ump->um_fs;
488 		odevvp = ump->um_odevvp;
489 		devvp = ump->um_devvp;
490 
491 		/*
492 		 * If it's not the same vnode, or at least the same device
493 		 * then it's not correct.
494 		 */
495 		if (ndp.ni_vp->v_rdev != ump->um_odevvp->v_rdev)
496 			error = EINVAL; /* needs translation */
497 		vput(ndp.ni_vp);
498 		if (error)
499 			return (error);
500 		if (fs->fs_ronly == 0 &&
501 		    vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
502 			/*
503 			 * Flush any dirty data and suspend filesystem.
504 			 */
505 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
506 				return (error);
507 			error = vfs_write_suspend_umnt(mp);
508 			if (error != 0)
509 				return (error);
510 
511 			fs->fs_ronly = 1;
512 			if (MOUNTEDSOFTDEP(mp)) {
513 				MNT_ILOCK(mp);
514 				mp->mnt_flag &= ~MNT_SOFTDEP;
515 				MNT_IUNLOCK(mp);
516 				mounted_softdep = true;
517 			} else
518 				mounted_softdep = false;
519 
520 			/*
521 			 * Check for and optionally get rid of files open
522 			 * for writing.
523 			 */
524 			flags = WRITECLOSE;
525 			if (mp->mnt_flag & MNT_FORCE)
526 				flags |= FORCECLOSE;
527 			if (mounted_softdep) {
528 				error = softdep_flushfiles(mp, flags, td);
529 			} else {
530 				error = ffs_flushfiles(mp, flags, td);
531 			}
532 			if (error) {
533 				fs->fs_ronly = 0;
534 				if (mounted_softdep) {
535 					MNT_ILOCK(mp);
536 					mp->mnt_flag |= MNT_SOFTDEP;
537 					MNT_IUNLOCK(mp);
538 				}
539 				vfs_write_resume(mp, 0);
540 				return (error);
541 			}
542 
543 			if (fs->fs_pendingblocks != 0 ||
544 			    fs->fs_pendinginodes != 0) {
545 				printf("WARNING: %s Update error: blocks %jd "
546 				    "files %d\n", fs->fs_fsmnt,
547 				    (intmax_t)fs->fs_pendingblocks,
548 				    fs->fs_pendinginodes);
549 				fs->fs_pendingblocks = 0;
550 				fs->fs_pendinginodes = 0;
551 			}
552 			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
553 				fs->fs_clean = 1;
554 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
555 				fs->fs_ronly = 0;
556 				fs->fs_clean = 0;
557 				if (mounted_softdep) {
558 					MNT_ILOCK(mp);
559 					mp->mnt_flag |= MNT_SOFTDEP;
560 					MNT_IUNLOCK(mp);
561 				}
562 				vfs_write_resume(mp, 0);
563 				return (error);
564 			}
565 			if (mounted_softdep)
566 				softdep_unmount(mp);
567 			g_topology_lock();
568 			/*
569 			 * Drop our write and exclusive access.
570 			 */
571 			g_access(ump->um_cp, 0, -1, -1);
572 			g_topology_unlock();
573 			MNT_ILOCK(mp);
574 			mp->mnt_flag |= MNT_RDONLY;
575 			MNT_IUNLOCK(mp);
576 			/*
577 			 * Allow the writers to note that filesystem
578 			 * is ro now.
579 			 */
580 			vfs_write_resume(mp, 0);
581 		}
582 		if ((mp->mnt_flag & MNT_RELOAD) &&
583 		    (error = ffs_reload(mp, 0)) != 0)
584 			return (error);
585 		if (fs->fs_ronly &&
586 		    !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
587 			/*
588 			 * If upgrade to read-write by non-root, then verify
589 			 * that user has necessary permissions on the device.
590 			 */
591 			vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY);
592 			error = VOP_ACCESS(odevvp, VREAD | VWRITE,
593 			    td->td_ucred, td);
594 			if (error)
595 				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
596 			VOP_UNLOCK(odevvp);
597 			if (error) {
598 				return (error);
599 			}
600 			fs->fs_flags &= ~FS_UNCLEAN;
601 			if (fs->fs_clean == 0) {
602 				fs->fs_flags |= FS_UNCLEAN;
603 				if ((mp->mnt_flag & MNT_FORCE) ||
604 				    ((fs->fs_flags &
605 				     (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
606 				     (fs->fs_flags & FS_DOSOFTDEP))) {
607 					printf("WARNING: %s was not properly "
608 					   "dismounted\n",
609 					   mp->mnt_stat.f_mntonname);
610 				} else {
611 					vfs_mount_error(mp,
612 					   "R/W mount of %s denied. %s.%s",
613 					   mp->mnt_stat.f_mntonname,
614 					   "Filesystem is not clean - run fsck",
615 					   (fs->fs_flags & FS_SUJ) == 0 ? "" :
616 					   " Forced mount will invalidate"
617 					   " journal contents");
618 					return (EPERM);
619 				}
620 			}
621 			g_topology_lock();
622 			/*
623 			 * Request exclusive write access.
624 			 */
625 			error = g_access(ump->um_cp, 0, 1, 1);
626 			g_topology_unlock();
627 			if (error)
628 				return (error);
629 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
630 				return (error);
631 			error = vfs_write_suspend_umnt(mp);
632 			if (error != 0)
633 				return (error);
634 			fs->fs_ronly = 0;
635 			MNT_ILOCK(mp);
636 			saved_mnt_flag = MNT_RDONLY;
637 			if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag &
638 			    MNT_ASYNC) != 0)
639 				saved_mnt_flag |= MNT_ASYNC;
640 			mp->mnt_flag &= ~saved_mnt_flag;
641 			MNT_IUNLOCK(mp);
642 			fs->fs_mtime = time_second;
643 			/* check to see if we need to start softdep */
644 			if ((fs->fs_flags & FS_DOSOFTDEP) &&
645 			    (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
646 				fs->fs_ronly = 1;
647 				MNT_ILOCK(mp);
648 				mp->mnt_flag |= saved_mnt_flag;
649 				MNT_IUNLOCK(mp);
650 				vfs_write_resume(mp, 0);
651 				return (error);
652 			}
653 			fs->fs_clean = 0;
654 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
655 				fs->fs_ronly = 1;
656 				if ((fs->fs_flags & FS_DOSOFTDEP) != 0)
657 					softdep_unmount(mp);
658 				MNT_ILOCK(mp);
659 				mp->mnt_flag |= saved_mnt_flag;
660 				MNT_IUNLOCK(mp);
661 				vfs_write_resume(mp, 0);
662 				return (error);
663 			}
664 			if (fs->fs_snapinum[0] != 0)
665 				ffs_snapshot_mount(mp);
666 			vfs_write_resume(mp, 0);
667 		}
668 		/*
669 		 * Soft updates is incompatible with "async",
670 		 * so if we are doing softupdates stop the user
671 		 * from setting the async flag in an update.
672 		 * Softdep_mount() clears it in an initial mount
673 		 * or ro->rw remount.
674 		 */
675 		if (MOUNTEDSOFTDEP(mp)) {
676 			/* XXX: Reset too late ? */
677 			MNT_ILOCK(mp);
678 			mp->mnt_flag &= ~MNT_ASYNC;
679 			MNT_IUNLOCK(mp);
680 		}
681 		/*
682 		 * Keep MNT_ACLS flag if it is stored in superblock.
683 		 */
684 		if ((fs->fs_flags & FS_ACLS) != 0) {
685 			/* XXX: Set too late ? */
686 			MNT_ILOCK(mp);
687 			mp->mnt_flag |= MNT_ACLS;
688 			MNT_IUNLOCK(mp);
689 		}
690 
691 		if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
692 			/* XXX: Set too late ? */
693 			MNT_ILOCK(mp);
694 			mp->mnt_flag |= MNT_NFS4ACLS;
695 			MNT_IUNLOCK(mp);
696 		}
697 
698 	}
699 
700 	MNT_ILOCK(mp);
701 	/*
702 	 * This is racy versus lookup, see ufs_fplookup_vexec for details.
703 	 */
704 	if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0)
705 		panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp);
706 	if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0)
707 		mp->mnt_kern_flag |= MNTK_FPLOOKUP;
708 	MNT_IUNLOCK(mp);
709 
710 	vfs_mountedfrom(mp, fspec);
711 	return (0);
712 }
713 
714 /*
715  * Compatibility with old mount system call.
716  */
717 
718 static int
719 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
720 {
721 	struct ufs_args args;
722 	int error;
723 
724 	if (data == NULL)
725 		return (EINVAL);
726 	error = copyin(data, &args, sizeof args);
727 	if (error)
728 		return (error);
729 
730 	ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
731 	ma = mount_arg(ma, "export", &args.export, sizeof(args.export));
732 	error = kernel_mount(ma, flags);
733 
734 	return (error);
735 }
736 
737 /*
738  * Reload all incore data for a filesystem (used after running fsck on
739  * the root filesystem and finding things to fix). If the 'force' flag
740  * is 0, the filesystem must be mounted read-only.
741  *
742  * Things to do to update the mount:
743  *	1) invalidate all cached meta-data.
744  *	2) re-read superblock from disk.
745  *	3) re-read summary information from disk.
746  *	4) invalidate all inactive vnodes.
747  *	5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary
748  *	   writers, if requested.
749  *	6) invalidate all cached file data.
750  *	7) re-read inode data for all active vnodes.
751  */
752 int
753 ffs_reload(struct mount *mp, int flags)
754 {
755 	struct vnode *vp, *mvp, *devvp;
756 	struct inode *ip;
757 	void *space;
758 	struct buf *bp;
759 	struct fs *fs, *newfs;
760 	struct ufsmount *ump;
761 	ufs2_daddr_t sblockloc;
762 	int i, blks, error;
763 	uint64_t size;
764 	int32_t *lp;
765 
766 	ump = VFSTOUFS(mp);
767 
768 	MNT_ILOCK(mp);
769 	if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) {
770 		MNT_IUNLOCK(mp);
771 		return (EINVAL);
772 	}
773 	MNT_IUNLOCK(mp);
774 
775 	/*
776 	 * Step 1: invalidate all cached meta-data.
777 	 */
778 	devvp = VFSTOUFS(mp)->um_devvp;
779 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
780 	if (vinvalbuf(devvp, 0, 0, 0) != 0)
781 		panic("ffs_reload: dirty1");
782 	VOP_UNLOCK(devvp);
783 
784 	/*
785 	 * Step 2: re-read superblock from disk.
786 	 */
787 	fs = VFSTOUFS(mp)->um_fs;
788 	if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
789 	    NOCRED, &bp)) != 0)
790 		return (error);
791 	newfs = (struct fs *)bp->b_data;
792 	if ((newfs->fs_magic != FS_UFS1_MAGIC &&
793 	     newfs->fs_magic != FS_UFS2_MAGIC) ||
794 	    newfs->fs_bsize > MAXBSIZE ||
795 	    newfs->fs_bsize < sizeof(struct fs)) {
796 			brelse(bp);
797 			return (EINTEGRITY);
798 	}
799 	/*
800 	 * Preserve the summary information, read-only status, and
801 	 * superblock location by copying these fields into our new
802 	 * superblock before using it to update the existing superblock.
803 	 */
804 	newfs->fs_si = fs->fs_si;
805 	newfs->fs_ronly = fs->fs_ronly;
806 	sblockloc = fs->fs_sblockloc;
807 	bcopy(newfs, fs, (uint64_t)fs->fs_sbsize);
808 	brelse(bp);
809 	ump->um_bsize = fs->fs_bsize;
810 	ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
811 	ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc);
812 	UFS_LOCK(ump);
813 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
814 		printf("WARNING: %s: reload pending error: blocks %jd "
815 		    "files %d\n", mp->mnt_stat.f_mntonname,
816 		    (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
817 		fs->fs_pendingblocks = 0;
818 		fs->fs_pendinginodes = 0;
819 	}
820 	UFS_UNLOCK(ump);
821 
822 	/*
823 	 * Step 3: re-read summary information from disk.
824 	 */
825 	size = fs->fs_cssize;
826 	blks = howmany(size, fs->fs_fsize);
827 	if (fs->fs_contigsumsize > 0)
828 		size += fs->fs_ncg * sizeof(int32_t);
829 	size += fs->fs_ncg * sizeof(uint8_t);
830 	free(fs->fs_csp, M_UFSMNT);
831 	space = malloc(size, M_UFSMNT, M_WAITOK);
832 	fs->fs_csp = space;
833 	for (i = 0; i < blks; i += fs->fs_frag) {
834 		size = fs->fs_bsize;
835 		if (i + fs->fs_frag > blks)
836 			size = (blks - i) * fs->fs_fsize;
837 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
838 		    NOCRED, &bp);
839 		if (error)
840 			return (error);
841 		bcopy(bp->b_data, space, (uint64_t)size);
842 		space = (char *)space + size;
843 		brelse(bp);
844 	}
845 	/*
846 	 * We no longer know anything about clusters per cylinder group.
847 	 */
848 	if (fs->fs_contigsumsize > 0) {
849 		fs->fs_maxcluster = lp = space;
850 		for (i = 0; i < fs->fs_ncg; i++)
851 			*lp++ = fs->fs_contigsumsize;
852 		space = lp;
853 	}
854 	size = fs->fs_ncg * sizeof(uint8_t);
855 	fs->fs_contigdirs = (uint8_t *)space;
856 	bzero(fs->fs_contigdirs, size);
857 	if ((flags & FFSR_UNSUSPEND) != 0) {
858 		MNT_ILOCK(mp);
859 		mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
860 		wakeup(&mp->mnt_flag);
861 		MNT_IUNLOCK(mp);
862 	}
863 
864 loop:
865 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
866 		/*
867 		 * Skip syncer vnode.
868 		 */
869 		if (vp->v_type == VNON) {
870 			VI_UNLOCK(vp);
871 			continue;
872 		}
873 		/*
874 		 * Step 4: invalidate all cached file data.
875 		 */
876 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
877 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
878 			goto loop;
879 		}
880 		if (vinvalbuf(vp, 0, 0, 0))
881 			panic("ffs_reload: dirty2");
882 		/*
883 		 * Step 5: re-read inode data for all active vnodes.
884 		 */
885 		ip = VTOI(vp);
886 		error =
887 		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
888 		    (int)fs->fs_bsize, NOCRED, &bp);
889 		if (error) {
890 			vput(vp);
891 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
892 			return (error);
893 		}
894 		if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) {
895 			brelse(bp);
896 			vput(vp);
897 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
898 			return (error);
899 		}
900 		ip->i_effnlink = ip->i_nlink;
901 		brelse(bp);
902 		vput(vp);
903 	}
904 	return (0);
905 }
906 
907 /*
908  * Common code for mount and mountroot
909  */
910 static int
911 ffs_mountfs(struct vnode *odevvp, struct mount *mp, struct thread *td)
912 {
913 	struct ufsmount *ump;
914 	struct fs *fs;
915 	struct cdev *dev;
916 	int error, i, len, ronly;
917 	struct ucred *cred;
918 	struct g_consumer *cp;
919 	struct mount *nmp;
920 	struct vnode *devvp;
921 	int candelete, canspeedup;
922 
923 	fs = NULL;
924 	ump = NULL;
925 	cred = td ? td->td_ucred : NOCRED;
926 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
927 
928 	devvp = mntfs_allocvp(mp, odevvp);
929 	KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
930 	dev = devvp->v_rdev;
931 	KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data"));
932 	if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
933 	    (uintptr_t)mp) == 0) {
934 		mntfs_freevp(devvp);
935 		return (EBUSY);
936 	}
937 	g_topology_lock();
938 	error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
939 	g_topology_unlock();
940 	if (error != 0) {
941 		atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
942 		mntfs_freevp(devvp);
943 		return (error);
944 	}
945 	dev_ref(dev);
946 	devvp->v_bufobj.bo_ops = &ffs_ops;
947 	BO_LOCK(&odevvp->v_bufobj);
948 	odevvp->v_bufobj.bo_flag |= BO_NOBUFS;
949 	BO_UNLOCK(&odevvp->v_bufobj);
950 	VOP_UNLOCK(devvp);
951 	if (dev->si_iosize_max != 0)
952 		mp->mnt_iosize_max = dev->si_iosize_max;
953 	if (mp->mnt_iosize_max > maxphys)
954 		mp->mnt_iosize_max = maxphys;
955 	if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
956 		error = EINVAL;
957 		vfs_mount_error(mp,
958 		    "Invalid sectorsize %d for superblock size %d",
959 		    cp->provider->sectorsize, SBLOCKSIZE);
960 		goto out;
961 	}
962 	/* fetch the superblock and summary information */
963 	if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0)
964 		error = ffs_sbsearch(devvp, &fs, 0, M_UFSMNT, ffs_use_bread);
965 	else
966 		error = ffs_sbget(devvp, &fs, UFS_STDSB, 0, M_UFSMNT,
967 		    ffs_use_bread);
968 	if (error != 0)
969 		goto out;
970 	fs->fs_flags &= ~FS_UNCLEAN;
971 	if (fs->fs_clean == 0) {
972 		fs->fs_flags |= FS_UNCLEAN;
973 		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
974 		    ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
975 		     (fs->fs_flags & FS_DOSOFTDEP))) {
976 			printf("WARNING: %s was not properly dismounted\n",
977 			    mp->mnt_stat.f_mntonname);
978 		} else {
979 			vfs_mount_error(mp, "R/W mount on %s denied. "
980 			    "Filesystem is not clean - run fsck.%s",
981 			    mp->mnt_stat.f_mntonname,
982 			    (fs->fs_flags & FS_SUJ) == 0 ? "" :
983 			    " Forced mount will invalidate journal contents");
984 			error = EPERM;
985 			goto out;
986 		}
987 		if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
988 		    (mp->mnt_flag & MNT_FORCE)) {
989 			printf("WARNING: %s: lost blocks %jd files %d\n",
990 			    mp->mnt_stat.f_mntonname,
991 			    (intmax_t)fs->fs_pendingblocks,
992 			    fs->fs_pendinginodes);
993 			fs->fs_pendingblocks = 0;
994 			fs->fs_pendinginodes = 0;
995 		}
996 	}
997 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
998 		printf("WARNING: %s: mount pending error: blocks %jd "
999 		    "files %d\n", mp->mnt_stat.f_mntonname,
1000 		    (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
1001 		fs->fs_pendingblocks = 0;
1002 		fs->fs_pendinginodes = 0;
1003 	}
1004 	if ((fs->fs_flags & FS_GJOURNAL) != 0) {
1005 #ifdef UFS_GJOURNAL
1006 		/*
1007 		 * Get journal provider name.
1008 		 */
1009 		len = 1024;
1010 		mp->mnt_gjprovider = malloc((uint64_t)len, M_UFSMNT, M_WAITOK);
1011 		if (g_io_getattr("GJOURNAL::provider", cp, &len,
1012 		    mp->mnt_gjprovider) == 0) {
1013 			mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len,
1014 			    M_UFSMNT, M_WAITOK);
1015 			MNT_ILOCK(mp);
1016 			mp->mnt_flag |= MNT_GJOURNAL;
1017 			MNT_IUNLOCK(mp);
1018 		} else {
1019 			if ((mp->mnt_flag & MNT_RDONLY) == 0)
1020 				printf("WARNING: %s: GJOURNAL flag on fs "
1021 				    "but no gjournal provider below\n",
1022 				    mp->mnt_stat.f_mntonname);
1023 			free(mp->mnt_gjprovider, M_UFSMNT);
1024 			mp->mnt_gjprovider = NULL;
1025 		}
1026 #else
1027 		printf("WARNING: %s: GJOURNAL flag on fs but no "
1028 		    "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
1029 #endif
1030 	} else {
1031 		mp->mnt_gjprovider = NULL;
1032 	}
1033 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
1034 	ump->um_cp = cp;
1035 	ump->um_bo = &devvp->v_bufobj;
1036 	ump->um_fs = fs;
1037 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1038 		ump->um_fstype = UFS1;
1039 		ump->um_balloc = ffs_balloc_ufs1;
1040 	} else {
1041 		ump->um_fstype = UFS2;
1042 		ump->um_balloc = ffs_balloc_ufs2;
1043 	}
1044 	ump->um_blkatoff = ffs_blkatoff;
1045 	ump->um_truncate = ffs_truncate;
1046 	ump->um_update = ffs_update;
1047 	ump->um_valloc = ffs_valloc;
1048 	ump->um_vfree = ffs_vfree;
1049 	ump->um_ifree = ffs_ifree;
1050 	ump->um_rdonly = ffs_rdonly;
1051 	ump->um_snapgone = ffs_snapgone;
1052 	if ((mp->mnt_flag & MNT_UNTRUSTED) != 0)
1053 		ump->um_check_blkno = ffs_check_blkno;
1054 	else
1055 		ump->um_check_blkno = NULL;
1056 	mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
1057 	sx_init(&ump->um_checkpath_lock, "uchpth");
1058 	ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc);
1059 	fs->fs_ronly = ronly;
1060 	fs->fs_active = NULL;
1061 	mp->mnt_data = ump;
1062 	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
1063 	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
1064 	nmp = NULL;
1065 	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
1066 	    (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
1067 		if (nmp)
1068 			vfs_rel(nmp);
1069 		vfs_getnewfsid(mp);
1070 	}
1071 	ump->um_bsize = fs->fs_bsize;
1072 	ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1073 	MNT_ILOCK(mp);
1074 	mp->mnt_flag |= MNT_LOCAL;
1075 	MNT_IUNLOCK(mp);
1076 	if ((fs->fs_flags & FS_MULTILABEL) != 0) {
1077 #ifdef MAC
1078 		MNT_ILOCK(mp);
1079 		mp->mnt_flag |= MNT_MULTILABEL;
1080 		MNT_IUNLOCK(mp);
1081 #else
1082 		printf("WARNING: %s: multilabel flag on fs but "
1083 		    "no MAC support\n", mp->mnt_stat.f_mntonname);
1084 #endif
1085 	}
1086 	if ((fs->fs_flags & FS_ACLS) != 0) {
1087 #ifdef UFS_ACL
1088 		MNT_ILOCK(mp);
1089 
1090 		if (mp->mnt_flag & MNT_NFS4ACLS)
1091 			printf("WARNING: %s: ACLs flag on fs conflicts with "
1092 			    "\"nfsv4acls\" mount option; option ignored\n",
1093 			    mp->mnt_stat.f_mntonname);
1094 		mp->mnt_flag &= ~MNT_NFS4ACLS;
1095 		mp->mnt_flag |= MNT_ACLS;
1096 
1097 		MNT_IUNLOCK(mp);
1098 #else
1099 		printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
1100 		    mp->mnt_stat.f_mntonname);
1101 #endif
1102 	}
1103 	if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
1104 #ifdef UFS_ACL
1105 		MNT_ILOCK(mp);
1106 
1107 		if (mp->mnt_flag & MNT_ACLS)
1108 			printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
1109 			    "with \"acls\" mount option; option ignored\n",
1110 			    mp->mnt_stat.f_mntonname);
1111 		mp->mnt_flag &= ~MNT_ACLS;
1112 		mp->mnt_flag |= MNT_NFS4ACLS;
1113 
1114 		MNT_IUNLOCK(mp);
1115 #else
1116 		printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
1117 		    "ACLs support\n", mp->mnt_stat.f_mntonname);
1118 #endif
1119 	}
1120 	if ((fs->fs_flags & FS_TRIM) != 0) {
1121 		len = sizeof(int);
1122 		if (g_io_getattr("GEOM::candelete", cp, &len,
1123 		    &candelete) == 0) {
1124 			if (candelete)
1125 				ump->um_flags |= UM_CANDELETE;
1126 			else
1127 				printf("WARNING: %s: TRIM flag on fs but disk "
1128 				    "does not support TRIM\n",
1129 				    mp->mnt_stat.f_mntonname);
1130 		} else {
1131 			printf("WARNING: %s: TRIM flag on fs but disk does "
1132 			    "not confirm that it supports TRIM\n",
1133 			    mp->mnt_stat.f_mntonname);
1134 		}
1135 		if (((ump->um_flags) & UM_CANDELETE) != 0) {
1136 			ump->um_trim_tq = taskqueue_create("trim", M_WAITOK,
1137 			    taskqueue_thread_enqueue, &ump->um_trim_tq);
1138 			taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS,
1139 			    "%s trim", mp->mnt_stat.f_mntonname);
1140 			ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM,
1141 			    &ump->um_trimlisthashsize);
1142 		}
1143 	}
1144 
1145 	len = sizeof(int);
1146 	if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) {
1147 		if (canspeedup)
1148 			ump->um_flags |= UM_CANSPEEDUP;
1149 	}
1150 
1151 	ump->um_mountp = mp;
1152 	ump->um_dev = dev;
1153 	ump->um_devvp = devvp;
1154 	ump->um_odevvp = odevvp;
1155 	ump->um_nindir = fs->fs_nindir;
1156 	ump->um_bptrtodb = fs->fs_fsbtodb;
1157 	ump->um_seqinc = fs->fs_frag;
1158 	for (i = 0; i < MAXQUOTAS; i++)
1159 		ump->um_quotas[i] = NULLVP;
1160 #ifdef UFS_EXTATTR
1161 	ufs_extattr_uepm_init(&ump->um_extattr);
1162 #endif
1163 	/*
1164 	 * Set FS local "last mounted on" information (NULL pad)
1165 	 */
1166 	bzero(fs->fs_fsmnt, MAXMNTLEN);
1167 	strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1168 	mp->mnt_stat.f_iosize = fs->fs_bsize;
1169 
1170 	if (mp->mnt_flag & MNT_ROOTFS) {
1171 		/*
1172 		 * Root mount; update timestamp in mount structure.
1173 		 * this will be used by the common root mount code
1174 		 * to update the system clock.
1175 		 */
1176 		mp->mnt_time = fs->fs_time;
1177 	}
1178 
1179 	if (ronly == 0) {
1180 		fs->fs_mtime = time_second;
1181 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
1182 		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1183 			ffs_flushfiles(mp, FORCECLOSE, td);
1184 			goto out;
1185 		}
1186 		if (fs->fs_snapinum[0] != 0)
1187 			ffs_snapshot_mount(mp);
1188 		fs->fs_fmod = 1;
1189 		fs->fs_clean = 0;
1190 		(void) ffs_sbupdate(ump, MNT_WAIT, 0);
1191 	}
1192 	/*
1193 	 * Initialize filesystem state information in mount struct.
1194 	 */
1195 	MNT_ILOCK(mp);
1196 	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1197 	    MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE;
1198 	MNT_IUNLOCK(mp);
1199 #ifdef UFS_EXTATTR
1200 #ifdef UFS_EXTATTR_AUTOSTART
1201 	/*
1202 	 *
1203 	 * Auto-starting does the following:
1204 	 *	- check for /.attribute in the fs, and extattr_start if so
1205 	 *	- for each file in .attribute, enable that file with
1206 	 * 	  an attribute of the same name.
1207 	 * Not clear how to report errors -- probably eat them.
1208 	 * This would all happen while the filesystem was busy/not
1209 	 * available, so would effectively be "atomic".
1210 	 */
1211 	(void) ufs_extattr_autostart(mp, td);
1212 #endif /* !UFS_EXTATTR_AUTOSTART */
1213 #endif /* !UFS_EXTATTR */
1214 	return (0);
1215 out:
1216 	if (fs != NULL) {
1217 		free(fs->fs_csp, M_UFSMNT);
1218 		free(fs->fs_si, M_UFSMNT);
1219 		free(fs, M_UFSMNT);
1220 	}
1221 	if (cp != NULL) {
1222 		g_topology_lock();
1223 		g_vfs_close(cp);
1224 		g_topology_unlock();
1225 	}
1226 	if (ump != NULL) {
1227 		mtx_destroy(UFS_MTX(ump));
1228 		sx_destroy(&ump->um_checkpath_lock);
1229 		if (mp->mnt_gjprovider != NULL) {
1230 			free(mp->mnt_gjprovider, M_UFSMNT);
1231 			mp->mnt_gjprovider = NULL;
1232 		}
1233 		MPASS(ump->um_softdep == NULL);
1234 		free(ump, M_UFSMNT);
1235 		mp->mnt_data = NULL;
1236 	}
1237 	BO_LOCK(&odevvp->v_bufobj);
1238 	odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1239 	BO_UNLOCK(&odevvp->v_bufobj);
1240 	atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
1241 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1242 	mntfs_freevp(devvp);
1243 	dev_rel(dev);
1244 	return (error);
1245 }
1246 
1247 /*
1248  * A read function for use by filesystem-layer routines.
1249  */
1250 static int
1251 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size)
1252 {
1253 	struct buf *bp;
1254 	int error;
1255 
1256 	KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp));
1257 	*bufp = malloc(size, M_UFSMNT, M_WAITOK);
1258 	if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED,
1259 	    &bp)) != 0)
1260 		return (error);
1261 	bcopy(bp->b_data, *bufp, size);
1262 	bp->b_flags |= B_INVAL | B_NOCACHE;
1263 	brelse(bp);
1264 	return (0);
1265 }
1266 
1267 static int bigcgs = 0;
1268 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
1269 
1270 /*
1271  * Sanity checks for loading old filesystem superblocks.
1272  * See ffs_oldfscompat_write below for unwound actions.
1273  *
1274  * XXX - Parts get retired eventually.
1275  * Unfortunately new bits get added.
1276  */
1277 static void
1278 ffs_oldfscompat_read(struct fs *fs,
1279 	struct ufsmount *ump,
1280 	ufs2_daddr_t sblockloc)
1281 {
1282 	off_t maxfilesize;
1283 
1284 	/*
1285 	 * If not yet done, update fs_flags location and value of fs_sblockloc.
1286 	 */
1287 	if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
1288 		fs->fs_flags = fs->fs_old_flags;
1289 		fs->fs_old_flags |= FS_FLAGS_UPDATED;
1290 		fs->fs_sblockloc = sblockloc;
1291 	}
1292 	/*
1293 	 * If not yet done, update UFS1 superblock with new wider fields.
1294 	 */
1295 	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
1296 		fs->fs_maxbsize = fs->fs_bsize;
1297 		fs->fs_time = fs->fs_old_time;
1298 		fs->fs_size = fs->fs_old_size;
1299 		fs->fs_dsize = fs->fs_old_dsize;
1300 		fs->fs_csaddr = fs->fs_old_csaddr;
1301 		fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1302 		fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1303 		fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1304 		fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1305 	}
1306 	if (fs->fs_magic == FS_UFS1_MAGIC &&
1307 	    fs->fs_old_inodefmt < FS_44INODEFMT) {
1308 		fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1;
1309 		fs->fs_qbmask = ~fs->fs_bmask;
1310 		fs->fs_qfmask = ~fs->fs_fmask;
1311 	}
1312 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1313 		ump->um_savedmaxfilesize = fs->fs_maxfilesize;
1314 		maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1;
1315 		if (fs->fs_maxfilesize > maxfilesize)
1316 			fs->fs_maxfilesize = maxfilesize;
1317 	}
1318 	/* Compatibility for old filesystems */
1319 	if (fs->fs_avgfilesize <= 0)
1320 		fs->fs_avgfilesize = AVFILESIZ;
1321 	if (fs->fs_avgfpdir <= 0)
1322 		fs->fs_avgfpdir = AFPDIR;
1323 	if (bigcgs) {
1324 		fs->fs_save_cgsize = fs->fs_cgsize;
1325 		fs->fs_cgsize = fs->fs_bsize;
1326 	}
1327 }
1328 
1329 /*
1330  * Unwinding superblock updates for old filesystems.
1331  * See ffs_oldfscompat_read above for details.
1332  *
1333  * XXX - Parts get retired eventually.
1334  * Unfortunately new bits get added.
1335  */
1336 void
1337 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1338 {
1339 
1340 	/*
1341 	 * Copy back UFS2 updated fields that UFS1 inspects.
1342 	 */
1343 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1344 		fs->fs_old_time = fs->fs_time;
1345 		fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1346 		fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1347 		fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1348 		fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1349 		fs->fs_maxfilesize = ump->um_savedmaxfilesize;
1350 	}
1351 	if (bigcgs) {
1352 		fs->fs_cgsize = fs->fs_save_cgsize;
1353 		fs->fs_save_cgsize = 0;
1354 	}
1355 }
1356 
1357 /*
1358  * unmount system call
1359  */
1360 static int
1361 ffs_unmount(struct mount *mp, int mntflags)
1362 {
1363 	struct thread *td;
1364 	struct ufsmount *ump = VFSTOUFS(mp);
1365 	struct fs *fs;
1366 	int error, flags, susp;
1367 #ifdef UFS_EXTATTR
1368 	int e_restart;
1369 #endif
1370 
1371 	flags = 0;
1372 	td = curthread;
1373 	fs = ump->um_fs;
1374 	if (mntflags & MNT_FORCE)
1375 		flags |= FORCECLOSE;
1376 	susp = fs->fs_ronly == 0;
1377 #ifdef UFS_EXTATTR
1378 	if ((error = ufs_extattr_stop(mp, td))) {
1379 		if (error != EOPNOTSUPP)
1380 			printf("WARNING: unmount %s: ufs_extattr_stop "
1381 			    "returned errno %d\n", mp->mnt_stat.f_mntonname,
1382 			    error);
1383 		e_restart = 0;
1384 	} else {
1385 		ufs_extattr_uepm_destroy(&ump->um_extattr);
1386 		e_restart = 1;
1387 	}
1388 #endif
1389 	if (susp) {
1390 		error = vfs_write_suspend_umnt(mp);
1391 		if (error != 0)
1392 			goto fail1;
1393 	}
1394 	if (MOUNTEDSOFTDEP(mp))
1395 		error = softdep_flushfiles(mp, flags, td);
1396 	else
1397 		error = ffs_flushfiles(mp, flags, td);
1398 	if (error != 0 && !ffs_fsfail_cleanup(ump, error))
1399 		goto fail;
1400 
1401 	UFS_LOCK(ump);
1402 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1403 		printf("WARNING: unmount %s: pending error: blocks %jd "
1404 		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1405 		    fs->fs_pendinginodes);
1406 		fs->fs_pendingblocks = 0;
1407 		fs->fs_pendinginodes = 0;
1408 	}
1409 	UFS_UNLOCK(ump);
1410 	if (MOUNTEDSOFTDEP(mp))
1411 		softdep_unmount(mp);
1412 	MPASS(ump->um_softdep == NULL);
1413 	if (fs->fs_ronly == 0) {
1414 		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1415 		error = ffs_sbupdate(ump, MNT_WAIT, 0);
1416 		if (ffs_fsfail_cleanup(ump, error))
1417 			error = 0;
1418 		if (error != 0 && !ffs_fsfail_cleanup(ump, error)) {
1419 			fs->fs_clean = 0;
1420 			goto fail;
1421 		}
1422 	}
1423 	if (susp)
1424 		vfs_write_resume(mp, VR_START_WRITE);
1425 	if (ump->um_trim_tq != NULL) {
1426 		MPASS(ump->um_trim_inflight == 0);
1427 		taskqueue_free(ump->um_trim_tq);
1428 		free (ump->um_trimhash, M_TRIM);
1429 	}
1430 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1431 	g_topology_lock();
1432 	g_vfs_close(ump->um_cp);
1433 	g_topology_unlock();
1434 	BO_LOCK(&ump->um_odevvp->v_bufobj);
1435 	ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1436 	BO_UNLOCK(&ump->um_odevvp->v_bufobj);
1437 	atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
1438 	mntfs_freevp(ump->um_devvp);
1439 	vrele(ump->um_odevvp);
1440 	dev_rel(ump->um_dev);
1441 	mtx_destroy(UFS_MTX(ump));
1442 	sx_destroy(&ump->um_checkpath_lock);
1443 	if (mp->mnt_gjprovider != NULL) {
1444 		free(mp->mnt_gjprovider, M_UFSMNT);
1445 		mp->mnt_gjprovider = NULL;
1446 	}
1447 	free(fs->fs_csp, M_UFSMNT);
1448 	free(fs->fs_si, M_UFSMNT);
1449 	free(fs, M_UFSMNT);
1450 	free(ump, M_UFSMNT);
1451 	mp->mnt_data = NULL;
1452 	if (td->td_su == mp) {
1453 		td->td_su = NULL;
1454 		vfs_rel(mp);
1455 	}
1456 	return (error);
1457 
1458 fail:
1459 	if (susp)
1460 		vfs_write_resume(mp, VR_START_WRITE);
1461 fail1:
1462 #ifdef UFS_EXTATTR
1463 	if (e_restart) {
1464 		ufs_extattr_uepm_init(&ump->um_extattr);
1465 #ifdef UFS_EXTATTR_AUTOSTART
1466 		(void) ufs_extattr_autostart(mp, td);
1467 #endif
1468 	}
1469 #endif
1470 
1471 	return (error);
1472 }
1473 
1474 /*
1475  * Flush out all the files in a filesystem.
1476  */
1477 int
1478 ffs_flushfiles(struct mount *mp, int flags, struct thread *td)
1479 {
1480 	struct ufsmount *ump;
1481 	int qerror, error;
1482 
1483 	ump = VFSTOUFS(mp);
1484 	qerror = 0;
1485 #ifdef QUOTA
1486 	if (mp->mnt_flag & MNT_QUOTA) {
1487 		int i;
1488 		error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1489 		if (error)
1490 			return (error);
1491 		for (i = 0; i < MAXQUOTAS; i++) {
1492 			error = quotaoff(td, mp, i);
1493 			if (error != 0) {
1494 				if ((flags & EARLYFLUSH) == 0)
1495 					return (error);
1496 				else
1497 					qerror = error;
1498 			}
1499 		}
1500 
1501 		/*
1502 		 * Here we fall through to vflush again to ensure that
1503 		 * we have gotten rid of all the system vnodes, unless
1504 		 * quotas must not be closed.
1505 		 */
1506 	}
1507 #endif
1508 	/* devvp is not locked there */
1509 	if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1510 		if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1511 			return (error);
1512 		ffs_snapshot_unmount(mp);
1513 		flags |= FORCECLOSE;
1514 		/*
1515 		 * Here we fall through to vflush again to ensure
1516 		 * that we have gotten rid of all the system vnodes.
1517 		 */
1518 	}
1519 
1520 	/*
1521 	 * Do not close system files if quotas were not closed, to be
1522 	 * able to sync the remaining dquots.  The freeblks softupdate
1523 	 * workitems might hold a reference on a dquot, preventing
1524 	 * quotaoff() from completing.  Next round of
1525 	 * softdep_flushworklist() iteration should process the
1526 	 * blockers, allowing the next run of quotaoff() to finally
1527 	 * flush held dquots.
1528 	 *
1529 	 * Otherwise, flush all the files.
1530 	 */
1531 	if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1532 		return (error);
1533 
1534 	/*
1535 	 * If this is a forcible unmount and there were any files that
1536 	 * were unlinked but still open, then vflush() will have
1537 	 * truncated and freed those files, which might have started
1538 	 * some trim work.  Wait here for any trims to complete
1539 	 * and process the blkfrees which follow the trims.
1540 	 * This may create more dirty devvp buffers and softdep deps.
1541 	 */
1542 	if (ump->um_trim_tq != NULL) {
1543 		while (ump->um_trim_inflight != 0)
1544 			pause("ufsutr", hz);
1545 		taskqueue_drain_all(ump->um_trim_tq);
1546 	}
1547 
1548 	/*
1549 	 * Flush filesystem metadata.
1550 	 */
1551 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1552 	error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1553 	VOP_UNLOCK(ump->um_devvp);
1554 	return (error);
1555 }
1556 
1557 /*
1558  * Get filesystem statistics.
1559  */
1560 static int
1561 ffs_statfs(struct mount *mp, struct statfs *sbp)
1562 {
1563 	struct ufsmount *ump;
1564 	struct fs *fs;
1565 
1566 	ump = VFSTOUFS(mp);
1567 	fs = ump->um_fs;
1568 	if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1569 		panic("ffs_statfs");
1570 	sbp->f_version = STATFS_VERSION;
1571 	sbp->f_bsize = fs->fs_fsize;
1572 	sbp->f_iosize = fs->fs_bsize;
1573 	sbp->f_blocks = fs->fs_dsize;
1574 	UFS_LOCK(ump);
1575 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1576 	    fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1577 	sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1578 	    dbtofsb(fs, fs->fs_pendingblocks);
1579 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1580 	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1581 	UFS_UNLOCK(ump);
1582 	sbp->f_namemax = UFS_MAXNAMLEN;
1583 	return (0);
1584 }
1585 
1586 static bool
1587 sync_doupdate(struct inode *ip)
1588 {
1589 
1590 	return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1591 	    IN_UPDATE)) != 0);
1592 }
1593 
1594 static int
1595 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused)
1596 {
1597 	struct inode *ip;
1598 
1599 	/*
1600 	 * Flags are safe to access because ->v_data invalidation
1601 	 * is held off by listmtx.
1602 	 */
1603 	if (vp->v_type == VNON)
1604 		return (false);
1605 	ip = VTOI(vp);
1606 	if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0)
1607 		return (false);
1608 	return (true);
1609 }
1610 
1611 /*
1612  * For a lazy sync, we only care about access times, quotas and the
1613  * superblock.  Other filesystem changes are already converted to
1614  * cylinder group blocks or inode blocks updates and are written to
1615  * disk by syncer.
1616  */
1617 static int
1618 ffs_sync_lazy(struct mount *mp)
1619 {
1620 	struct vnode *mvp, *vp;
1621 	struct inode *ip;
1622 	int allerror, error;
1623 
1624 	allerror = 0;
1625 	if ((mp->mnt_flag & MNT_NOATIME) != 0) {
1626 #ifdef QUOTA
1627 		qsync(mp);
1628 #endif
1629 		goto sbupdate;
1630 	}
1631 	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) {
1632 		if (vp->v_type == VNON) {
1633 			VI_UNLOCK(vp);
1634 			continue;
1635 		}
1636 		ip = VTOI(vp);
1637 
1638 		/*
1639 		 * The IN_ACCESS flag is converted to IN_MODIFIED by
1640 		 * ufs_close() and ufs_getattr() by the calls to
1641 		 * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1642 		 * Test also all the other timestamp flags too, to pick up
1643 		 * any other cases that could be missed.
1644 		 */
1645 		if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) {
1646 			VI_UNLOCK(vp);
1647 			continue;
1648 		}
1649 		if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0)
1650 			continue;
1651 #ifdef QUOTA
1652 		qsyncvp(vp);
1653 #endif
1654 		if (sync_doupdate(ip))
1655 			error = ffs_update(vp, 0);
1656 		if (error != 0)
1657 			allerror = error;
1658 		vput(vp);
1659 	}
1660 sbupdate:
1661 	if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1662 	    (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1663 		allerror = error;
1664 	return (allerror);
1665 }
1666 
1667 /*
1668  * Go through the disk queues to initiate sandbagged IO;
1669  * go through the inodes to write those that have been modified;
1670  * initiate the writing of the super block if it has been modified.
1671  *
1672  * Note: we are always called with the filesystem marked busy using
1673  * vfs_busy().
1674  */
1675 static int
1676 ffs_sync(struct mount *mp, int waitfor)
1677 {
1678 	struct vnode *mvp, *vp, *devvp;
1679 	struct thread *td;
1680 	struct inode *ip;
1681 	struct ufsmount *ump = VFSTOUFS(mp);
1682 	struct fs *fs;
1683 	int error, count, lockreq, allerror = 0;
1684 	int suspend;
1685 	int suspended;
1686 	int secondary_writes;
1687 	int secondary_accwrites;
1688 	int softdep_deps;
1689 	int softdep_accdeps;
1690 	struct bufobj *bo;
1691 
1692 	suspend = 0;
1693 	suspended = 0;
1694 	td = curthread;
1695 	fs = ump->um_fs;
1696 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0)
1697 		panic("%s: ffs_sync: modification on read-only filesystem",
1698 		    fs->fs_fsmnt);
1699 	if (waitfor == MNT_LAZY) {
1700 		if (!rebooting)
1701 			return (ffs_sync_lazy(mp));
1702 		waitfor = MNT_NOWAIT;
1703 	}
1704 
1705 	/*
1706 	 * Write back each (modified) inode.
1707 	 */
1708 	lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1709 	if (waitfor == MNT_SUSPEND) {
1710 		suspend = 1;
1711 		waitfor = MNT_WAIT;
1712 	}
1713 	if (waitfor == MNT_WAIT)
1714 		lockreq = LK_EXCLUSIVE;
1715 	lockreq |= LK_INTERLOCK;
1716 loop:
1717 	/* Grab snapshot of secondary write counts */
1718 	MNT_ILOCK(mp);
1719 	secondary_writes = mp->mnt_secondary_writes;
1720 	secondary_accwrites = mp->mnt_secondary_accwrites;
1721 	MNT_IUNLOCK(mp);
1722 
1723 	/* Grab snapshot of softdep dependency counts */
1724 	softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1725 
1726 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1727 		/*
1728 		 * Depend on the vnode interlock to keep things stable enough
1729 		 * for a quick test.  Since there might be hundreds of
1730 		 * thousands of vnodes, we cannot afford even a subroutine
1731 		 * call unless there's a good chance that we have work to do.
1732 		 */
1733 		if (vp->v_type == VNON) {
1734 			VI_UNLOCK(vp);
1735 			continue;
1736 		}
1737 		ip = VTOI(vp);
1738 		if ((ip->i_flag &
1739 		    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1740 		    vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1741 			VI_UNLOCK(vp);
1742 			continue;
1743 		}
1744 		if ((error = vget(vp, lockreq)) != 0) {
1745 			if (error == ENOENT) {
1746 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1747 				goto loop;
1748 			}
1749 			continue;
1750 		}
1751 #ifdef QUOTA
1752 		qsyncvp(vp);
1753 #endif
1754 		for (;;) {
1755 			error = ffs_syncvnode(vp, waitfor, 0);
1756 			if (error == ERELOOKUP)
1757 				continue;
1758 			if (error != 0)
1759 				allerror = error;
1760 			break;
1761 		}
1762 		vput(vp);
1763 	}
1764 	/*
1765 	 * Force stale filesystem control information to be flushed.
1766 	 */
1767 	if (waitfor == MNT_WAIT || rebooting) {
1768 		if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1769 			allerror = error;
1770 		if (ffs_fsfail_cleanup(ump, allerror))
1771 			allerror = 0;
1772 		/* Flushed work items may create new vnodes to clean */
1773 		if (allerror == 0 && count)
1774 			goto loop;
1775 	}
1776 
1777 	devvp = ump->um_devvp;
1778 	bo = &devvp->v_bufobj;
1779 	BO_LOCK(bo);
1780 	if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1781 		BO_UNLOCK(bo);
1782 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1783 		error = VOP_FSYNC(devvp, waitfor, td);
1784 		VOP_UNLOCK(devvp);
1785 		if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
1786 			error = ffs_sbupdate(ump, waitfor, 0);
1787 		if (error != 0)
1788 			allerror = error;
1789 		if (ffs_fsfail_cleanup(ump, allerror))
1790 			allerror = 0;
1791 		if (allerror == 0 && waitfor == MNT_WAIT)
1792 			goto loop;
1793 	} else if (suspend != 0) {
1794 		if (softdep_check_suspend(mp,
1795 					  devvp,
1796 					  softdep_deps,
1797 					  softdep_accdeps,
1798 					  secondary_writes,
1799 					  secondary_accwrites) != 0) {
1800 			MNT_IUNLOCK(mp);
1801 			goto loop;	/* More work needed */
1802 		}
1803 		mtx_assert(MNT_MTX(mp), MA_OWNED);
1804 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1805 		MNT_IUNLOCK(mp);
1806 		suspended = 1;
1807 	} else
1808 		BO_UNLOCK(bo);
1809 	/*
1810 	 * Write back modified superblock.
1811 	 */
1812 	if (fs->fs_fmod != 0 &&
1813 	    (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1814 		allerror = error;
1815 	if (ffs_fsfail_cleanup(ump, allerror))
1816 		allerror = 0;
1817 	return (allerror);
1818 }
1819 
1820 int
1821 ffs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
1822 {
1823 	return (ffs_vgetf(mp, ino, flags, vpp, 0));
1824 }
1825 
1826 int
1827 ffs_vgetf(struct mount *mp,
1828 	ino_t ino,
1829 	int flags,
1830 	struct vnode **vpp,
1831 	int ffs_flags)
1832 {
1833 	struct fs *fs;
1834 	struct inode *ip;
1835 	struct ufsmount *ump;
1836 	struct buf *bp;
1837 	struct vnode *vp;
1838 	daddr_t dbn;
1839 	int error;
1840 
1841 	MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 ||
1842 	    (flags & LK_EXCLUSIVE) != 0);
1843 
1844 	error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1845 	if (error != 0)
1846 		return (error);
1847 	if (*vpp != NULL) {
1848 		if ((ffs_flags & FFSV_REPLACE) == 0 ||
1849 		    ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 ||
1850 		    !VN_IS_DOOMED(*vpp)))
1851 			return (0);
1852 		vgone(*vpp);
1853 		vput(*vpp);
1854 	}
1855 
1856 	/*
1857 	 * We must promote to an exclusive lock for vnode creation.  This
1858 	 * can happen if lookup is passed LOCKSHARED.
1859 	 */
1860 	if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1861 		flags &= ~LK_TYPE_MASK;
1862 		flags |= LK_EXCLUSIVE;
1863 	}
1864 
1865 	/*
1866 	 * We do not lock vnode creation as it is believed to be too
1867 	 * expensive for such rare case as simultaneous creation of vnode
1868 	 * for same ino by different processes. We just allow them to race
1869 	 * and check later to decide who wins. Let the race begin!
1870 	 */
1871 
1872 	ump = VFSTOUFS(mp);
1873 	fs = ump->um_fs;
1874 	ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO);
1875 
1876 	/* Allocate a new vnode/inode. */
1877 	error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ?
1878 	    &ffs_vnodeops1 : &ffs_vnodeops2, &vp);
1879 	if (error) {
1880 		*vpp = NULL;
1881 		uma_zfree_smr(uma_inode, ip);
1882 		return (error);
1883 	}
1884 	/*
1885 	 * FFS supports recursive locking.
1886 	 */
1887 	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1888 	VN_LOCK_AREC(vp);
1889 	vp->v_data = ip;
1890 	vp->v_bufobj.bo_bsize = fs->fs_bsize;
1891 	ip->i_vnode = vp;
1892 	ip->i_ump = ump;
1893 	ip->i_number = ino;
1894 	ip->i_ea_refs = 0;
1895 	ip->i_nextclustercg = -1;
1896 	ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2;
1897 	ip->i_mode = 0; /* ensure error cases below throw away vnode */
1898 	cluster_init_vn(&ip->i_clusterw);
1899 #ifdef DIAGNOSTIC
1900 	ufs_init_trackers(ip);
1901 #endif
1902 #ifdef QUOTA
1903 	{
1904 		int i;
1905 		for (i = 0; i < MAXQUOTAS; i++)
1906 			ip->i_dquot[i] = NODQUOT;
1907 	}
1908 #endif
1909 
1910 	if (ffs_flags & FFSV_FORCEINSMQ)
1911 		vp->v_vflag |= VV_FORCEINSMQ;
1912 	error = insmntque(vp, mp);
1913 	if (error != 0) {
1914 		uma_zfree_smr(uma_inode, ip);
1915 		*vpp = NULL;
1916 		return (error);
1917 	}
1918 	vp->v_vflag &= ~VV_FORCEINSMQ;
1919 	error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1920 	if (error != 0)
1921 		return (error);
1922 	if (*vpp != NULL) {
1923 		/*
1924 		 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set)
1925 		 * operate on empty inode, which must not be found by
1926 		 * other threads until fully filled.  Vnode for empty
1927 		 * inode must be not re-inserted on the hash by other
1928 		 * thread, after removal by us at the beginning.
1929 		 */
1930 		MPASS((ffs_flags & FFSV_REPLACE) == 0);
1931 		return (0);
1932 	}
1933 	if (I_IS_UFS1(ip))
1934 		ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1935 	else
1936 		ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1937 
1938 	if ((ffs_flags & FFSV_NEWINODE) != 0) {
1939 		/* New inode, just zero out its contents. */
1940 		if (I_IS_UFS1(ip))
1941 			memset(ip->i_din1, 0, sizeof(struct ufs1_dinode));
1942 		else
1943 			memset(ip->i_din2, 0, sizeof(struct ufs2_dinode));
1944 	} else {
1945 		/* Read the disk contents for the inode, copy into the inode. */
1946 		dbn = fsbtodb(fs, ino_to_fsba(fs, ino));
1947 		error = ffs_breadz(ump, ump->um_devvp, dbn, dbn,
1948 		    (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp);
1949 		if (error != 0) {
1950 			/*
1951 			 * The inode does not contain anything useful, so it
1952 			 * would be misleading to leave it on its hash chain.
1953 			 * With mode still zero, it will be unlinked and
1954 			 * returned to the free list by vput().
1955 			 */
1956 			vgone(vp);
1957 			vput(vp);
1958 			*vpp = NULL;
1959 			return (error);
1960 		}
1961 		if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) {
1962 			bqrelse(bp);
1963 			vgone(vp);
1964 			vput(vp);
1965 			*vpp = NULL;
1966 			return (error);
1967 		}
1968 		bqrelse(bp);
1969 	}
1970 	if (DOINGSOFTDEP(vp) && (!fs->fs_ronly ||
1971 	    (ffs_flags & FFSV_FORCEINODEDEP) != 0))
1972 		softdep_load_inodeblock(ip);
1973 	else
1974 		ip->i_effnlink = ip->i_nlink;
1975 
1976 	/*
1977 	 * Initialize the vnode from the inode, check for aliases.
1978 	 * Note that the underlying vnode may have changed.
1979 	 */
1980 	error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2,
1981 	    &vp);
1982 	if (error) {
1983 		vgone(vp);
1984 		vput(vp);
1985 		*vpp = NULL;
1986 		return (error);
1987 	}
1988 
1989 	/*
1990 	 * Finish inode initialization.
1991 	 */
1992 	if (vp->v_type != VFIFO) {
1993 		/* FFS supports shared locking for all files except fifos. */
1994 		VN_LOCK_ASHARE(vp);
1995 	}
1996 
1997 	/*
1998 	 * Set up a generation number for this inode if it does not
1999 	 * already have one. This should only happen on old filesystems.
2000 	 */
2001 	if (ip->i_gen == 0) {
2002 		while (ip->i_gen == 0)
2003 			ip->i_gen = arc4random();
2004 		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
2005 			UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
2006 			DIP_SET(ip, i_gen, ip->i_gen);
2007 		}
2008 	}
2009 #ifdef MAC
2010 	if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
2011 		/*
2012 		 * If this vnode is already allocated, and we're running
2013 		 * multi-label, attempt to perform a label association
2014 		 * from the extended attributes on the inode.
2015 		 */
2016 		error = mac_vnode_associate_extattr(mp, vp);
2017 		if (error) {
2018 			/* ufs_inactive will release ip->i_devvp ref. */
2019 			vgone(vp);
2020 			vput(vp);
2021 			*vpp = NULL;
2022 			return (error);
2023 		}
2024 	}
2025 #endif
2026 
2027 	vn_set_state(vp, VSTATE_CONSTRUCTED);
2028 	*vpp = vp;
2029 	return (0);
2030 }
2031 
2032 /*
2033  * File handle to vnode
2034  *
2035  * Have to be really careful about stale file handles:
2036  * - check that the inode number is valid
2037  * - for UFS2 check that the inode number is initialized
2038  * - call ffs_vget() to get the locked inode
2039  * - check for an unallocated inode (i_mode == 0)
2040  * - check that the given client host has export rights and return
2041  *   those rights via. exflagsp and credanonp
2042  */
2043 static int
2044 ffs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
2045 {
2046 	struct ufid *ufhp;
2047 
2048 	ufhp = (struct ufid *)fhp;
2049 	return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags,
2050 	    vpp, 0));
2051 }
2052 
2053 /*
2054  * Return a vnode from a mounted filesystem for inode with specified
2055  * generation number. Return ESTALE if the inode with given generation
2056  * number no longer exists on that filesystem.
2057  */
2058 int
2059 ffs_inotovp(struct mount *mp,
2060 	ino_t ino,
2061 	uint64_t gen,
2062 	int lflags,
2063 	struct vnode **vpp,
2064 	int ffs_flags)
2065 {
2066 	struct ufsmount *ump;
2067 	struct vnode *nvp;
2068 	struct inode *ip;
2069 	struct fs *fs;
2070 	struct cg *cgp;
2071 	struct buf *bp;
2072 	uint64_t cg;
2073 
2074 	ump = VFSTOUFS(mp);
2075 	fs = ump->um_fs;
2076 	*vpp = NULL;
2077 
2078 	if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg)
2079 		return (ESTALE);
2080 
2081 	/*
2082 	 * Need to check if inode is initialized because UFS2 does lazy
2083 	 * initialization and nfs_fhtovp can offer arbitrary inode numbers.
2084 	 */
2085 	if (fs->fs_magic == FS_UFS2_MAGIC) {
2086 		cg = ino_to_cg(fs, ino);
2087 		if (ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp) != 0)
2088 			return (ESTALE);
2089 		if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) {
2090 			brelse(bp);
2091 			return (ESTALE);
2092 		}
2093 		brelse(bp);
2094 	}
2095 
2096 	if (ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags) != 0)
2097 		return (ESTALE);
2098 
2099 	ip = VTOI(nvp);
2100 	if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) {
2101 		if (ip->i_mode == 0)
2102 			vgone(nvp);
2103 		vput(nvp);
2104 		return (ESTALE);
2105 	}
2106 
2107 	vnode_create_vobject(nvp, DIP(ip, i_size), curthread);
2108 	*vpp = nvp;
2109 	return (0);
2110 }
2111 
2112 /*
2113  * Initialize the filesystem.
2114  */
2115 static int
2116 ffs_init(struct vfsconf *vfsp)
2117 {
2118 
2119 	ffs_susp_initialize();
2120 	softdep_initialize();
2121 	return (ufs_init(vfsp));
2122 }
2123 
2124 /*
2125  * Undo the work of ffs_init().
2126  */
2127 static int
2128 ffs_uninit(struct vfsconf *vfsp)
2129 {
2130 	int ret;
2131 
2132 	ret = ufs_uninit(vfsp);
2133 	softdep_uninitialize();
2134 	ffs_susp_uninitialize();
2135 	taskqueue_drain_all(taskqueue_thread);
2136 	return (ret);
2137 }
2138 
2139 /*
2140  * Structure used to pass information from ffs_sbupdate to its
2141  * helper routine ffs_use_bwrite.
2142  */
2143 struct devfd {
2144 	struct ufsmount	*ump;
2145 	struct buf	*sbbp;
2146 	int		 waitfor;
2147 	int		 suspended;
2148 	int		 error;
2149 };
2150 
2151 /*
2152  * Write a superblock and associated information back to disk.
2153  */
2154 int
2155 ffs_sbupdate(struct ufsmount *ump, int waitfor, int suspended)
2156 {
2157 	struct fs *fs;
2158 	struct buf *sbbp;
2159 	struct devfd devfd;
2160 
2161 	fs = ump->um_fs;
2162 	if (fs->fs_ronly == 1 &&
2163 	    (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
2164 	    (MNT_RDONLY | MNT_UPDATE))
2165 		panic("ffs_sbupdate: write read-only filesystem");
2166 	/*
2167 	 * We use the superblock's buf to serialize calls to ffs_sbupdate().
2168 	 */
2169 	sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
2170 	    (int)fs->fs_sbsize, 0, 0, 0);
2171 	/*
2172 	 * Initialize info needed for write function.
2173 	 */
2174 	devfd.ump = ump;
2175 	devfd.sbbp = sbbp;
2176 	devfd.waitfor = waitfor;
2177 	devfd.suspended = suspended;
2178 	devfd.error = 0;
2179 	return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite));
2180 }
2181 
2182 /*
2183  * Write function for use by filesystem-layer routines.
2184  */
2185 static int
2186 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size)
2187 {
2188 	struct devfd *devfdp;
2189 	struct ufsmount *ump;
2190 	struct buf *bp;
2191 	struct fs *fs;
2192 	int error;
2193 
2194 	devfdp = devfd;
2195 	ump = devfdp->ump;
2196 	fs = ump->um_fs;
2197 	/*
2198 	 * Writing the superblock summary information.
2199 	 */
2200 	if (loc != fs->fs_sblockloc) {
2201 		bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0);
2202 		bcopy(buf, bp->b_data, (uint64_t)size);
2203 		if (devfdp->suspended)
2204 			bp->b_flags |= B_VALIDSUSPWRT;
2205 		if (devfdp->waitfor != MNT_WAIT)
2206 			bawrite(bp);
2207 		else if ((error = bwrite(bp)) != 0)
2208 			devfdp->error = error;
2209 		return (0);
2210 	}
2211 	/*
2212 	 * Writing the superblock itself. We need to do special checks for it.
2213 	 */
2214 	bp = devfdp->sbbp;
2215 	if (ffs_fsfail_cleanup(ump, devfdp->error))
2216 		devfdp->error = 0;
2217 	if (devfdp->error != 0) {
2218 		brelse(bp);
2219 		return (devfdp->error);
2220 	}
2221 	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
2222 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2223 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2224 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
2225 		fs->fs_sblockloc = SBLOCK_UFS1;
2226 	}
2227 	if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
2228 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2229 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2230 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
2231 		fs->fs_sblockloc = SBLOCK_UFS2;
2232 	}
2233 	if (MOUNTEDSOFTDEP(ump->um_mountp))
2234 		softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
2235 	UFS_LOCK(ump);
2236 	bcopy((caddr_t)fs, bp->b_data, (uint64_t)fs->fs_sbsize);
2237 	UFS_UNLOCK(ump);
2238 	fs = (struct fs *)bp->b_data;
2239 	fs->fs_fmod = 0;
2240 	ffs_oldfscompat_write(fs, ump);
2241 	fs->fs_si = NULL;
2242 	/* Recalculate the superblock hash */
2243 	fs->fs_ckhash = ffs_calc_sbhash(fs);
2244 	if (devfdp->suspended)
2245 		bp->b_flags |= B_VALIDSUSPWRT;
2246 	if (devfdp->waitfor != MNT_WAIT)
2247 		bawrite(bp);
2248 	else if ((error = bwrite(bp)) != 0)
2249 		devfdp->error = error;
2250 	return (devfdp->error);
2251 }
2252 
2253 static int
2254 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
2255 	int attrnamespace, const char *attrname)
2256 {
2257 
2258 #ifdef UFS_EXTATTR
2259 	return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
2260 	    attrname));
2261 #else
2262 	return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
2263 	    attrname));
2264 #endif
2265 }
2266 
2267 static void
2268 ffs_ifree(struct ufsmount *ump, struct inode *ip)
2269 {
2270 
2271 	if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
2272 		uma_zfree(uma_ufs1, ip->i_din1);
2273 	else if (ip->i_din2 != NULL)
2274 		uma_zfree(uma_ufs2, ip->i_din2);
2275 	uma_zfree_smr(uma_inode, ip);
2276 }
2277 
2278 static int dobkgrdwrite = 1;
2279 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
2280     "Do background writes (honoring the BV_BKGRDWRITE flag)?");
2281 
2282 /*
2283  * Complete a background write started from bwrite.
2284  */
2285 static void
2286 ffs_backgroundwritedone(struct buf *bp)
2287 {
2288 	struct bufobj *bufobj;
2289 	struct buf *origbp;
2290 
2291 #ifdef SOFTUPDATES
2292 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0)
2293 		softdep_handle_error(bp);
2294 #endif
2295 
2296 	/*
2297 	 * Find the original buffer that we are writing.
2298 	 */
2299 	bufobj = bp->b_bufobj;
2300 	BO_LOCK(bufobj);
2301 	if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
2302 		panic("backgroundwritedone: lost buffer");
2303 
2304 	/*
2305 	 * We should mark the cylinder group buffer origbp as
2306 	 * dirty, to not lose the failed write.
2307 	 */
2308 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2309 		origbp->b_vflags |= BV_BKGRDERR;
2310 	BO_UNLOCK(bufobj);
2311 	/*
2312 	 * Process dependencies then return any unfinished ones.
2313 	 */
2314 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0)
2315 		buf_complete(bp);
2316 #ifdef SOFTUPDATES
2317 	if (!LIST_EMPTY(&bp->b_dep))
2318 		softdep_move_dependencies(bp, origbp);
2319 #endif
2320 	/*
2321 	 * This buffer is marked B_NOCACHE so when it is released
2322 	 * by biodone it will be tossed.  Clear B_IOSTARTED in case of error.
2323 	 */
2324 	bp->b_flags |= B_NOCACHE;
2325 	bp->b_flags &= ~(B_CACHE | B_IOSTARTED);
2326 	pbrelvp(bp);
2327 
2328 	/*
2329 	 * Prevent brelse() from trying to keep and re-dirtying bp on
2330 	 * errors. It causes b_bufobj dereference in
2331 	 * bdirty()/reassignbuf(), and b_bufobj was cleared in
2332 	 * pbrelvp() above.
2333 	 */
2334 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2335 		bp->b_flags |= B_INVAL;
2336 	bufdone(bp);
2337 	BO_LOCK(bufobj);
2338 	/*
2339 	 * Clear the BV_BKGRDINPROG flag in the original buffer
2340 	 * and awaken it if it is waiting for the write to complete.
2341 	 * If BV_BKGRDINPROG is not set in the original buffer it must
2342 	 * have been released and re-instantiated - which is not legal.
2343 	 */
2344 	KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
2345 	    ("backgroundwritedone: lost buffer2"));
2346 	origbp->b_vflags &= ~BV_BKGRDINPROG;
2347 	if (origbp->b_vflags & BV_BKGRDWAIT) {
2348 		origbp->b_vflags &= ~BV_BKGRDWAIT;
2349 		wakeup(&origbp->b_xflags);
2350 	}
2351 	BO_UNLOCK(bufobj);
2352 }
2353 
2354 /*
2355  * Write, release buffer on completion.  (Done by iodone
2356  * if async).  Do not bother writing anything if the buffer
2357  * is invalid.
2358  *
2359  * Note that we set B_CACHE here, indicating that buffer is
2360  * fully valid and thus cacheable.  This is true even of NFS
2361  * now so we set it generally.  This could be set either here
2362  * or in biodone() since the I/O is synchronous.  We put it
2363  * here.
2364  */
2365 static int
2366 ffs_bufwrite(struct buf *bp)
2367 {
2368 	struct buf *newbp;
2369 	struct cg *cgp;
2370 
2371 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2372 	if (bp->b_flags & B_INVAL) {
2373 		brelse(bp);
2374 		return (0);
2375 	}
2376 
2377 	if (!BUF_ISLOCKED(bp))
2378 		panic("bufwrite: buffer is not busy???");
2379 	/*
2380 	 * If a background write is already in progress, delay
2381 	 * writing this block if it is asynchronous. Otherwise
2382 	 * wait for the background write to complete.
2383 	 */
2384 	BO_LOCK(bp->b_bufobj);
2385 	if (bp->b_vflags & BV_BKGRDINPROG) {
2386 		if (bp->b_flags & B_ASYNC) {
2387 			BO_UNLOCK(bp->b_bufobj);
2388 			bdwrite(bp);
2389 			return (0);
2390 		}
2391 		bp->b_vflags |= BV_BKGRDWAIT;
2392 		msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2393 		    "bwrbg", 0);
2394 		if (bp->b_vflags & BV_BKGRDINPROG)
2395 			panic("bufwrite: still writing");
2396 	}
2397 	bp->b_vflags &= ~BV_BKGRDERR;
2398 	BO_UNLOCK(bp->b_bufobj);
2399 
2400 	/*
2401 	 * If this buffer is marked for background writing and we
2402 	 * do not have to wait for it, make a copy and write the
2403 	 * copy so as to leave this buffer ready for further use.
2404 	 *
2405 	 * This optimization eats a lot of memory.  If we have a page
2406 	 * or buffer shortfall we can't do it.
2407 	 */
2408 	if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2409 	    (bp->b_flags & B_ASYNC) &&
2410 	    !vm_page_count_severe() &&
2411 	    !buf_dirty_count_severe()) {
2412 		KASSERT(bp->b_iodone == NULL,
2413 		    ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2414 
2415 		/* get a new block */
2416 		newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2417 		if (newbp == NULL)
2418 			goto normal_write;
2419 
2420 		KASSERT(buf_mapped(bp), ("Unmapped cg"));
2421 		memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2422 		BO_LOCK(bp->b_bufobj);
2423 		bp->b_vflags |= BV_BKGRDINPROG;
2424 		BO_UNLOCK(bp->b_bufobj);
2425 		newbp->b_xflags |=
2426 		    (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER;
2427 		newbp->b_lblkno = bp->b_lblkno;
2428 		newbp->b_blkno = bp->b_blkno;
2429 		newbp->b_offset = bp->b_offset;
2430 		newbp->b_iodone = ffs_backgroundwritedone;
2431 		newbp->b_flags |= B_ASYNC;
2432 		newbp->b_flags &= ~B_INVAL;
2433 		pbgetvp(bp->b_vp, newbp);
2434 
2435 #ifdef SOFTUPDATES
2436 		/*
2437 		 * Move over the dependencies.  If there are rollbacks,
2438 		 * leave the parent buffer dirtied as it will need to
2439 		 * be written again.
2440 		 */
2441 		if (LIST_EMPTY(&bp->b_dep) ||
2442 		    softdep_move_dependencies(bp, newbp) == 0)
2443 			bundirty(bp);
2444 #else
2445 		bundirty(bp);
2446 #endif
2447 
2448 		/*
2449 		 * Initiate write on the copy, release the original.  The
2450 		 * BKGRDINPROG flag prevents it from going away until
2451 		 * the background write completes. We have to recalculate
2452 		 * its check hash in case the buffer gets freed and then
2453 		 * reconstituted from the buffer cache during a later read.
2454 		 */
2455 		if ((bp->b_xflags & BX_CYLGRP) != 0) {
2456 			cgp = (struct cg *)bp->b_data;
2457 			cgp->cg_ckhash = 0;
2458 			cgp->cg_ckhash =
2459 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2460 		}
2461 		bqrelse(bp);
2462 		bp = newbp;
2463 	} else
2464 		/* Mark the buffer clean */
2465 		bundirty(bp);
2466 
2467 	/* Let the normal bufwrite do the rest for us */
2468 normal_write:
2469 	/*
2470 	 * If we are writing a cylinder group, update its time.
2471 	 */
2472 	if ((bp->b_xflags & BX_CYLGRP) != 0) {
2473 		cgp = (struct cg *)bp->b_data;
2474 		cgp->cg_old_time = cgp->cg_time = time_second;
2475 	}
2476 	return (bufwrite(bp));
2477 }
2478 
2479 static void
2480 ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2481 {
2482 	struct vnode *vp;
2483 	struct buf *tbp;
2484 	int error, nocopy;
2485 
2486 	/*
2487 	 * This is the bufobj strategy for the private VCHR vnodes
2488 	 * used by FFS to access the underlying storage device.
2489 	 * We override the default bufobj strategy and thus bypass
2490 	 * VOP_STRATEGY() for these vnodes.
2491 	 */
2492 	vp = bo2vnode(bo);
2493 	KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR ||
2494 	    bp->b_vp->v_rdev == NULL ||
2495 	    bp->b_vp->v_rdev->si_mountpt == NULL ||
2496 	    VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL ||
2497 	    vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp,
2498 	    ("ffs_geom_strategy() with wrong vp"));
2499 	if (bp->b_iocmd == BIO_WRITE) {
2500 		if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2501 		    bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2502 		    (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2503 			panic("ffs_geom_strategy: bad I/O");
2504 		nocopy = bp->b_flags & B_NOCOPY;
2505 		bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2506 		if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2507 		    vp->v_rdev->si_snapdata != NULL) {
2508 			if ((bp->b_flags & B_CLUSTER) != 0) {
2509 				runningbufwakeup(bp);
2510 				TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2511 					      b_cluster.cluster_entry) {
2512 					error = ffs_copyonwrite(vp, tbp);
2513 					if (error != 0 &&
2514 					    error != EOPNOTSUPP) {
2515 						bp->b_error = error;
2516 						bp->b_ioflags |= BIO_ERROR;
2517 						bp->b_flags &= ~B_BARRIER;
2518 						bufdone(bp);
2519 						return;
2520 					}
2521 				}
2522 				bp->b_runningbufspace = bp->b_bufsize;
2523 				atomic_add_long(&runningbufspace,
2524 					       bp->b_runningbufspace);
2525 			} else {
2526 				error = ffs_copyonwrite(vp, bp);
2527 				if (error != 0 && error != EOPNOTSUPP) {
2528 					bp->b_error = error;
2529 					bp->b_ioflags |= BIO_ERROR;
2530 					bp->b_flags &= ~B_BARRIER;
2531 					bufdone(bp);
2532 					return;
2533 				}
2534 			}
2535 		}
2536 #ifdef SOFTUPDATES
2537 		if ((bp->b_flags & B_CLUSTER) != 0) {
2538 			TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2539 				      b_cluster.cluster_entry) {
2540 				if (!LIST_EMPTY(&tbp->b_dep))
2541 					buf_start(tbp);
2542 			}
2543 		} else {
2544 			if (!LIST_EMPTY(&bp->b_dep))
2545 				buf_start(bp);
2546 		}
2547 
2548 #endif
2549 		/*
2550 		 * Check for metadata that needs check-hashes and update them.
2551 		 */
2552 		switch (bp->b_xflags & BX_FSPRIV) {
2553 		case BX_CYLGRP:
2554 			((struct cg *)bp->b_data)->cg_ckhash = 0;
2555 			((struct cg *)bp->b_data)->cg_ckhash =
2556 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2557 			break;
2558 
2559 		case BX_SUPERBLOCK:
2560 		case BX_INODE:
2561 		case BX_INDIR:
2562 		case BX_DIR:
2563 			printf("Check-hash write is unimplemented!!!\n");
2564 			break;
2565 
2566 		case 0:
2567 			break;
2568 
2569 		default:
2570 			printf("multiple buffer types 0x%b\n",
2571 			    (bp->b_xflags & BX_FSPRIV), PRINT_UFS_BUF_XFLAGS);
2572 			break;
2573 		}
2574 	}
2575 	if (bp->b_iocmd != BIO_READ && ffs_enxio_enable)
2576 		bp->b_xflags |= BX_CVTENXIO;
2577 	g_vfs_strategy(bo, bp);
2578 }
2579 
2580 int
2581 ffs_own_mount(const struct mount *mp)
2582 {
2583 
2584 	if (mp->mnt_op == &ufs_vfsops)
2585 		return (1);
2586 	return (0);
2587 }
2588 
2589 #ifdef	DDB
2590 #ifdef SOFTUPDATES
2591 
2592 /* defined in ffs_softdep.c */
2593 extern void db_print_ffs(struct ufsmount *ump);
2594 
2595 DB_SHOW_COMMAND(ffs, db_show_ffs)
2596 {
2597 	struct mount *mp;
2598 	struct ufsmount *ump;
2599 
2600 	if (have_addr) {
2601 		ump = VFSTOUFS((struct mount *)addr);
2602 		db_print_ffs(ump);
2603 		return;
2604 	}
2605 
2606 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2607 		if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2608 			db_print_ffs(VFSTOUFS(mp));
2609 	}
2610 }
2611 
2612 #endif	/* SOFTUPDATES */
2613 #endif	/* DDB */
2614