xref: /freebsd/sys/ufs/ffs/ffs_vfsops.c (revision 661ca921e8cd56b17fc6615bc7e596e56e0e7c31)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1991, 1993, 1994
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 #include "opt_quota.h"
34 #include "opt_ufs.h"
35 #include "opt_ffs.h"
36 #include "opt_ddb.h"
37 
38 #include <sys/param.h>
39 #include <sys/gsb_crc32.h>
40 #include <sys/systm.h>
41 #include <sys/namei.h>
42 #include <sys/priv.h>
43 #include <sys/proc.h>
44 #include <sys/taskqueue.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/fcntl.h>
53 #include <sys/ioccom.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/rwlock.h>
57 #include <sys/sysctl.h>
58 #include <sys/vmmeter.h>
59 
60 #include <security/mac/mac_framework.h>
61 
62 #include <ufs/ufs/dir.h>
63 #include <ufs/ufs/extattr.h>
64 #include <ufs/ufs/gjournal.h>
65 #include <ufs/ufs/quota.h>
66 #include <ufs/ufs/ufsmount.h>
67 #include <ufs/ufs/inode.h>
68 #include <ufs/ufs/ufs_extern.h>
69 
70 #include <ufs/ffs/fs.h>
71 #include <ufs/ffs/ffs_extern.h>
72 
73 #include <vm/vm.h>
74 #include <vm/uma.h>
75 #include <vm/vm_page.h>
76 
77 #include <geom/geom.h>
78 #include <geom/geom_vfs.h>
79 
80 #include <ddb/ddb.h>
81 
82 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
83 VFS_SMR_DECLARE;
84 
85 static int	ffs_mountfs(struct vnode *, struct mount *, struct thread *);
86 static void	ffs_ifree(struct ufsmount *ump, struct inode *ip);
87 static int	ffs_sync_lazy(struct mount *mp);
88 static int	ffs_use_bread(void *devfd, off_t loc, void **bufp, int size);
89 static int	ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size);
90 
91 static vfs_init_t ffs_init;
92 static vfs_uninit_t ffs_uninit;
93 static vfs_extattrctl_t ffs_extattrctl;
94 static vfs_cmount_t ffs_cmount;
95 static vfs_unmount_t ffs_unmount;
96 static vfs_mount_t ffs_mount;
97 static vfs_statfs_t ffs_statfs;
98 static vfs_fhtovp_t ffs_fhtovp;
99 static vfs_sync_t ffs_sync;
100 
101 static struct vfsops ufs_vfsops = {
102 	.vfs_extattrctl =	ffs_extattrctl,
103 	.vfs_fhtovp =		ffs_fhtovp,
104 	.vfs_init =		ffs_init,
105 	.vfs_mount =		ffs_mount,
106 	.vfs_cmount =		ffs_cmount,
107 	.vfs_quotactl =		ufs_quotactl,
108 	.vfs_root =		vfs_cache_root,
109 	.vfs_cachedroot =	ufs_root,
110 	.vfs_statfs =		ffs_statfs,
111 	.vfs_sync =		ffs_sync,
112 	.vfs_uninit =		ffs_uninit,
113 	.vfs_unmount =		ffs_unmount,
114 	.vfs_vget =		ffs_vget,
115 	.vfs_susp_clean =	process_deferred_inactive,
116 };
117 
118 VFS_SET(ufs_vfsops, ufs, VFCF_FILEREVINC);
119 MODULE_VERSION(ufs, 1);
120 
121 static b_strategy_t ffs_geom_strategy;
122 static b_write_t ffs_bufwrite;
123 
124 static struct buf_ops ffs_ops = {
125 	.bop_name =	"FFS",
126 	.bop_write =	ffs_bufwrite,
127 	.bop_strategy =	ffs_geom_strategy,
128 	.bop_sync =	bufsync,
129 #ifdef NO_FFS_SNAPSHOT
130 	.bop_bdflush =	bufbdflush,
131 #else
132 	.bop_bdflush =	ffs_bdflush,
133 #endif
134 };
135 
136 /*
137  * Note that userquota and groupquota options are not currently used
138  * by UFS/FFS code and generally mount(8) does not pass those options
139  * from userland, but they can be passed by loader(8) via
140  * vfs.root.mountfrom.options.
141  */
142 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
143     "noclusterw", "noexec", "export", "force", "from", "groupquota",
144     "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir",
145     "nosymfollow", "sync", "union", "userquota", "untrusted", NULL };
146 
147 static int ffs_enxio_enable = 1;
148 SYSCTL_DECL(_vfs_ffs);
149 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN,
150     &ffs_enxio_enable, 0,
151     "enable mapping of other disk I/O errors to ENXIO");
152 
153 /*
154  * Return buffer with the contents of block "offset" from the beginning of
155  * directory "ip".  If "res" is non-zero, fill it in with a pointer to the
156  * remaining space in the directory.
157  */
158 static int
159 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp)
160 {
161 	struct inode *ip;
162 	struct fs *fs;
163 	struct buf *bp;
164 	ufs_lbn_t lbn;
165 	int bsize, error;
166 
167 	ip = VTOI(vp);
168 	fs = ITOFS(ip);
169 	lbn = lblkno(fs, offset);
170 	bsize = blksize(fs, ip, lbn);
171 
172 	*bpp = NULL;
173 	error = bread(vp, lbn, bsize, NOCRED, &bp);
174 	if (error) {
175 		return (error);
176 	}
177 	if (res)
178 		*res = (char *)bp->b_data + blkoff(fs, offset);
179 	*bpp = bp;
180 	return (0);
181 }
182 
183 /*
184  * Load up the contents of an inode and copy the appropriate pieces
185  * to the incore copy.
186  */
187 static int
188 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino)
189 {
190 	struct ufs1_dinode *dip1;
191 	struct ufs2_dinode *dip2;
192 	int error;
193 
194 	if (I_IS_UFS1(ip)) {
195 		dip1 = ip->i_din1;
196 		*dip1 =
197 		    *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
198 		ip->i_mode = dip1->di_mode;
199 		ip->i_nlink = dip1->di_nlink;
200 		ip->i_effnlink = dip1->di_nlink;
201 		ip->i_size = dip1->di_size;
202 		ip->i_flags = dip1->di_flags;
203 		ip->i_gen = dip1->di_gen;
204 		ip->i_uid = dip1->di_uid;
205 		ip->i_gid = dip1->di_gid;
206 		return (0);
207 	}
208 	dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
209 	if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 &&
210 	    !ffs_fsfail_cleanup(ITOUMP(ip), error)) {
211 		printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt,
212 		    (intmax_t)ino);
213 		return (error);
214 	}
215 	*ip->i_din2 = *dip2;
216 	dip2 = ip->i_din2;
217 	ip->i_mode = dip2->di_mode;
218 	ip->i_nlink = dip2->di_nlink;
219 	ip->i_effnlink = dip2->di_nlink;
220 	ip->i_size = dip2->di_size;
221 	ip->i_flags = dip2->di_flags;
222 	ip->i_gen = dip2->di_gen;
223 	ip->i_uid = dip2->di_uid;
224 	ip->i_gid = dip2->di_gid;
225 	return (0);
226 }
227 
228 /*
229  * Verify that a filesystem block number is a valid data block.
230  * This routine is only called on untrusted filesystems.
231  */
232 static int
233 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize)
234 {
235 	struct fs *fs;
236 	struct ufsmount *ump;
237 	ufs2_daddr_t end_daddr;
238 	int cg, havemtx;
239 
240 	KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0,
241 	    ("ffs_check_blkno called on a trusted file system"));
242 	ump = VFSTOUFS(mp);
243 	fs = ump->um_fs;
244 	cg = dtog(fs, daddr);
245 	end_daddr = daddr + numfrags(fs, blksize);
246 	/*
247 	 * Verify that the block number is a valid data block. Also check
248 	 * that it does not point to an inode block or a superblock. Accept
249 	 * blocks that are unalloacted (0) or part of snapshot metadata
250 	 * (BLK_NOCOPY or BLK_SNAP).
251 	 *
252 	 * Thus, the block must be in a valid range for the filesystem and
253 	 * either in the space before a backup superblock (except the first
254 	 * cylinder group where that space is used by the bootstrap code) or
255 	 * after the inode blocks and before the end of the cylinder group.
256 	 */
257 	if ((uint64_t)daddr <= BLK_SNAP ||
258 	    ((uint64_t)end_daddr <= fs->fs_size &&
259 	    ((cg > 0 && end_daddr <= cgsblock(fs, cg)) ||
260 	    (daddr >= cgdmin(fs, cg) &&
261 	    end_daddr <= cgbase(fs, cg) + fs->fs_fpg))))
262 		return (0);
263 	if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0)
264 		UFS_LOCK(ump);
265 	if (ppsratecheck(&ump->um_last_integritymsg,
266 	    &ump->um_secs_integritymsg, 1)) {
267 		UFS_UNLOCK(ump);
268 		uprintf("\n%s: inode %jd, out-of-range indirect block "
269 		    "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr);
270 		if (havemtx)
271 			UFS_LOCK(ump);
272 	} else if (!havemtx)
273 		UFS_UNLOCK(ump);
274 	return (EINTEGRITY);
275 }
276 
277 /*
278  * On first ENXIO error, initiate an asynchronous forcible unmount.
279  * Used to unmount filesystems whose underlying media has gone away.
280  *
281  * Return true if a cleanup is in progress.
282  */
283 int
284 ffs_fsfail_cleanup(struct ufsmount *ump, int error)
285 {
286 	int retval;
287 
288 	UFS_LOCK(ump);
289 	retval = ffs_fsfail_cleanup_locked(ump, error);
290 	UFS_UNLOCK(ump);
291 	return (retval);
292 }
293 
294 int
295 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error)
296 {
297 	mtx_assert(UFS_MTX(ump), MA_OWNED);
298 	if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) {
299 		ump->um_flags |= UM_FSFAIL_CLEANUP;
300 		if (ump->um_mountp == rootvnode->v_mount)
301 			panic("UFS: root fs would be forcibly unmounted");
302 
303 		/*
304 		 * Queue an async forced unmount.
305 		 */
306 		vfs_ref(ump->um_mountp);
307 		dounmount(ump->um_mountp,
308 		    MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread);
309 		printf("UFS: forcibly unmounting %s from %s\n",
310 		    ump->um_mountp->mnt_stat.f_mntfromname,
311 		    ump->um_mountp->mnt_stat.f_mntonname);
312 	}
313 	return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0);
314 }
315 
316 /*
317  * Wrapper used during ENXIO cleanup to allocate empty buffers when
318  * the kernel is unable to read the real one. They are needed so that
319  * the soft updates code can use them to unwind its dependencies.
320  */
321 int
322 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno,
323     daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt,
324     struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *),
325     struct buf **bpp)
326 {
327 	int error;
328 
329 	flags |= GB_CVTENXIO;
330 	error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt,
331 	    cred, flags, ckhashfunc, bpp);
332 	if (error != 0 && ffs_fsfail_cleanup(ump, error)) {
333 		error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp);
334 		KASSERT(error == 0, ("getblkx failed"));
335 		vfs_bio_bzero_buf(*bpp, 0, size);
336 	}
337 	return (error);
338 }
339 
340 static int
341 ffs_mount(struct mount *mp)
342 {
343 	struct vnode *devvp, *odevvp;
344 	struct thread *td;
345 	struct ufsmount *ump = NULL;
346 	struct fs *fs;
347 	int error, flags;
348 	int error1 __diagused;
349 	uint64_t mntorflags, saved_mnt_flag;
350 	accmode_t accmode;
351 	struct nameidata ndp;
352 	char *fspec;
353 	bool mounted_softdep;
354 
355 	td = curthread;
356 	if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
357 		return (EINVAL);
358 	if (uma_inode == NULL) {
359 		uma_inode = uma_zcreate("FFS inode",
360 		    sizeof(struct inode), NULL, NULL, NULL, NULL,
361 		    UMA_ALIGN_PTR, 0);
362 		uma_ufs1 = uma_zcreate("FFS1 dinode",
363 		    sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
364 		    UMA_ALIGN_PTR, 0);
365 		uma_ufs2 = uma_zcreate("FFS2 dinode",
366 		    sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
367 		    UMA_ALIGN_PTR, 0);
368 		VFS_SMR_ZONE_SET(uma_inode);
369 	}
370 
371 	vfs_deleteopt(mp->mnt_optnew, "groupquota");
372 	vfs_deleteopt(mp->mnt_optnew, "userquota");
373 
374 	fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
375 	if (error)
376 		return (error);
377 
378 	mntorflags = 0;
379 	if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0)
380 		mntorflags |= MNT_UNTRUSTED;
381 
382 	if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
383 		mntorflags |= MNT_ACLS;
384 
385 	if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
386 		mntorflags |= MNT_SNAPSHOT;
387 		/*
388 		 * Once we have set the MNT_SNAPSHOT flag, do not
389 		 * persist "snapshot" in the options list.
390 		 */
391 		vfs_deleteopt(mp->mnt_optnew, "snapshot");
392 		vfs_deleteopt(mp->mnt_opt, "snapshot");
393 	}
394 
395 	if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
396 		if (mntorflags & MNT_ACLS) {
397 			vfs_mount_error(mp,
398 			    "\"acls\" and \"nfsv4acls\" options "
399 			    "are mutually exclusive");
400 			return (EINVAL);
401 		}
402 		mntorflags |= MNT_NFS4ACLS;
403 	}
404 
405 	MNT_ILOCK(mp);
406 	mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
407 	mp->mnt_flag |= mntorflags;
408 	MNT_IUNLOCK(mp);
409 
410 	/*
411 	 * If this is a snapshot request, take the snapshot.
412 	 */
413 	if (mp->mnt_flag & MNT_SNAPSHOT) {
414 		if ((mp->mnt_flag & MNT_UPDATE) == 0)
415 			return (EINVAL);
416 		return (ffs_snapshot(mp, fspec));
417 	}
418 
419 	/*
420 	 * Must not call namei() while owning busy ref.
421 	 */
422 	if (mp->mnt_flag & MNT_UPDATE)
423 		vfs_unbusy(mp);
424 
425 	/*
426 	 * Not an update, or updating the name: look up the name
427 	 * and verify that it refers to a sensible disk device.
428 	 */
429 	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec);
430 	error = namei(&ndp);
431 	if ((mp->mnt_flag & MNT_UPDATE) != 0) {
432 		/*
433 		 * Unmount does not start if MNT_UPDATE is set.  Mount
434 		 * update busies mp before setting MNT_UPDATE.  We
435 		 * must be able to retain our busy ref successfully,
436 		 * without sleep.
437 		 */
438 		error1 = vfs_busy(mp, MBF_NOWAIT);
439 		MPASS(error1 == 0);
440 	}
441 	if (error != 0)
442 		return (error);
443 	NDFREE_PNBUF(&ndp);
444 	if (!vn_isdisk_error(ndp.ni_vp, &error)) {
445 		vput(ndp.ni_vp);
446 		return (error);
447 	}
448 
449 	/*
450 	 * If mount by non-root, then verify that user has necessary
451 	 * permissions on the device.
452 	 */
453 	accmode = VREAD;
454 	if ((mp->mnt_flag & MNT_RDONLY) == 0)
455 		accmode |= VWRITE;
456 	error = VOP_ACCESS(ndp.ni_vp, accmode, td->td_ucred, td);
457 	if (error)
458 		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
459 	if (error) {
460 		vput(ndp.ni_vp);
461 		return (error);
462 	}
463 
464 	/*
465 	 * New mount
466 	 *
467 	 * We need the name for the mount point (also used for
468 	 * "last mounted on") copied in. If an error occurs,
469 	 * the mount point is discarded by the upper level code.
470 	 * Note that vfs_mount_alloc() populates f_mntonname for us.
471 	 */
472 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
473 		if ((error = ffs_mountfs(ndp.ni_vp, mp, td)) != 0) {
474 			vrele(ndp.ni_vp);
475 			return (error);
476 		}
477 	} else {
478 		/*
479 		 * When updating, check whether changing from read-only to
480 		 * read/write; if there is no device name, that's all we do.
481 		 */
482 		ump = VFSTOUFS(mp);
483 		fs = ump->um_fs;
484 		odevvp = ump->um_odevvp;
485 		devvp = ump->um_devvp;
486 
487 		/*
488 		 * If it's not the same vnode, or at least the same device
489 		 * then it's not correct.
490 		 */
491 		if (ndp.ni_vp->v_rdev != ump->um_odevvp->v_rdev)
492 			error = EINVAL; /* needs translation */
493 		vput(ndp.ni_vp);
494 		if (error)
495 			return (error);
496 		if (fs->fs_ronly == 0 &&
497 		    vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
498 			/*
499 			 * Flush any dirty data and suspend filesystem.
500 			 */
501 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
502 				return (error);
503 			error = vfs_write_suspend_umnt(mp);
504 			if (error != 0)
505 				return (error);
506 
507 			fs->fs_ronly = 1;
508 			if (MOUNTEDSOFTDEP(mp)) {
509 				MNT_ILOCK(mp);
510 				mp->mnt_flag &= ~MNT_SOFTDEP;
511 				MNT_IUNLOCK(mp);
512 				mounted_softdep = true;
513 			} else
514 				mounted_softdep = false;
515 
516 			/*
517 			 * Check for and optionally get rid of files open
518 			 * for writing.
519 			 */
520 			flags = WRITECLOSE;
521 			if (mp->mnt_flag & MNT_FORCE)
522 				flags |= FORCECLOSE;
523 			if (mounted_softdep) {
524 				error = softdep_flushfiles(mp, flags, td);
525 			} else {
526 				error = ffs_flushfiles(mp, flags, td);
527 			}
528 			if (error) {
529 				fs->fs_ronly = 0;
530 				if (mounted_softdep) {
531 					MNT_ILOCK(mp);
532 					mp->mnt_flag |= MNT_SOFTDEP;
533 					MNT_IUNLOCK(mp);
534 				}
535 				vfs_write_resume(mp, 0);
536 				return (error);
537 			}
538 
539 			if (fs->fs_pendingblocks != 0 ||
540 			    fs->fs_pendinginodes != 0) {
541 				printf("WARNING: %s Update error: blocks %jd "
542 				    "files %d\n", fs->fs_fsmnt,
543 				    (intmax_t)fs->fs_pendingblocks,
544 				    fs->fs_pendinginodes);
545 				fs->fs_pendingblocks = 0;
546 				fs->fs_pendinginodes = 0;
547 			}
548 			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
549 				fs->fs_clean = 1;
550 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
551 				fs->fs_ronly = 0;
552 				fs->fs_clean = 0;
553 				if (mounted_softdep) {
554 					MNT_ILOCK(mp);
555 					mp->mnt_flag |= MNT_SOFTDEP;
556 					MNT_IUNLOCK(mp);
557 				}
558 				vfs_write_resume(mp, 0);
559 				return (error);
560 			}
561 			if (mounted_softdep)
562 				softdep_unmount(mp);
563 			g_topology_lock();
564 			/*
565 			 * Drop our write and exclusive access.
566 			 */
567 			g_access(ump->um_cp, 0, -1, -1);
568 			g_topology_unlock();
569 			MNT_ILOCK(mp);
570 			mp->mnt_flag |= MNT_RDONLY;
571 			MNT_IUNLOCK(mp);
572 			/*
573 			 * Allow the writers to note that filesystem
574 			 * is ro now.
575 			 */
576 			vfs_write_resume(mp, 0);
577 		}
578 		if ((mp->mnt_flag & MNT_RELOAD) &&
579 		    (error = ffs_reload(mp, 0)) != 0) {
580 			return (error);
581 		} else {
582 			/* ffs_reload replaces the superblock structure */
583 			fs = ump->um_fs;
584 		}
585 		if (fs->fs_ronly &&
586 		    !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
587 			/*
588 			 * If upgrade to read-write by non-root, then verify
589 			 * that user has necessary permissions on the device.
590 			 */
591 			vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY);
592 			error = VOP_ACCESS(odevvp, VREAD | VWRITE,
593 			    td->td_ucred, td);
594 			if (error)
595 				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
596 			VOP_UNLOCK(odevvp);
597 			if (error) {
598 				return (error);
599 			}
600 			fs->fs_flags &= ~FS_UNCLEAN;
601 			if (fs->fs_clean == 0) {
602 				fs->fs_flags |= FS_UNCLEAN;
603 				if ((mp->mnt_flag & MNT_FORCE) ||
604 				    ((fs->fs_flags &
605 				     (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
606 				     (fs->fs_flags & FS_DOSOFTDEP))) {
607 					printf("WARNING: %s was not properly "
608 					   "dismounted\n",
609 					   mp->mnt_stat.f_mntonname);
610 				} else {
611 					vfs_mount_error(mp,
612 					   "R/W mount of %s denied. %s.%s",
613 					   mp->mnt_stat.f_mntonname,
614 					   "Filesystem is not clean - run fsck",
615 					   (fs->fs_flags & FS_SUJ) == 0 ? "" :
616 					   " Forced mount will invalidate"
617 					   " journal contents");
618 					return (EPERM);
619 				}
620 			}
621 			g_topology_lock();
622 			/*
623 			 * Request exclusive write access.
624 			 */
625 			error = g_access(ump->um_cp, 0, 1, 1);
626 			g_topology_unlock();
627 			if (error)
628 				return (error);
629 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
630 				return (error);
631 			error = vfs_write_suspend_umnt(mp);
632 			if (error != 0)
633 				return (error);
634 			fs->fs_ronly = 0;
635 			MNT_ILOCK(mp);
636 			saved_mnt_flag = MNT_RDONLY;
637 			if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag &
638 			    MNT_ASYNC) != 0)
639 				saved_mnt_flag |= MNT_ASYNC;
640 			mp->mnt_flag &= ~saved_mnt_flag;
641 			MNT_IUNLOCK(mp);
642 			fs->fs_mtime = time_second;
643 			/* check to see if we need to start softdep */
644 			if ((fs->fs_flags & FS_DOSOFTDEP) &&
645 			    (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
646 				fs->fs_ronly = 1;
647 				MNT_ILOCK(mp);
648 				mp->mnt_flag |= saved_mnt_flag;
649 				MNT_IUNLOCK(mp);
650 				vfs_write_resume(mp, 0);
651 				return (error);
652 			}
653 			fs->fs_clean = 0;
654 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
655 				fs->fs_ronly = 1;
656 				if ((fs->fs_flags & FS_DOSOFTDEP) != 0)
657 					softdep_unmount(mp);
658 				MNT_ILOCK(mp);
659 				mp->mnt_flag |= saved_mnt_flag;
660 				MNT_IUNLOCK(mp);
661 				vfs_write_resume(mp, 0);
662 				return (error);
663 			}
664 			if (fs->fs_snapinum[0] != 0)
665 				ffs_snapshot_mount(mp);
666 			vfs_write_resume(mp, 0);
667 		}
668 		/*
669 		 * Soft updates is incompatible with "async",
670 		 * so if we are doing softupdates stop the user
671 		 * from setting the async flag in an update.
672 		 * Softdep_mount() clears it in an initial mount
673 		 * or ro->rw remount.
674 		 */
675 		if (MOUNTEDSOFTDEP(mp)) {
676 			/* XXX: Reset too late ? */
677 			MNT_ILOCK(mp);
678 			mp->mnt_flag &= ~MNT_ASYNC;
679 			MNT_IUNLOCK(mp);
680 		}
681 		/*
682 		 * Keep MNT_ACLS flag if it is stored in superblock.
683 		 */
684 		if ((fs->fs_flags & FS_ACLS) != 0) {
685 			/* XXX: Set too late ? */
686 			MNT_ILOCK(mp);
687 			mp->mnt_flag |= MNT_ACLS;
688 			MNT_IUNLOCK(mp);
689 		}
690 
691 		if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
692 			/* XXX: Set too late ? */
693 			MNT_ILOCK(mp);
694 			mp->mnt_flag |= MNT_NFS4ACLS;
695 			MNT_IUNLOCK(mp);
696 		}
697 
698 	}
699 
700 	MNT_ILOCK(mp);
701 	/*
702 	 * This is racy versus lookup, see ufs_fplookup_vexec for details.
703 	 */
704 	if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0)
705 		panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp);
706 	if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0)
707 		mp->mnt_kern_flag |= MNTK_FPLOOKUP;
708 	MNT_IUNLOCK(mp);
709 
710 	vfs_mountedfrom(mp, fspec);
711 	return (0);
712 }
713 
714 /*
715  * Compatibility with old mount system call.
716  */
717 
718 static int
719 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
720 {
721 	struct ufs_args args;
722 	int error;
723 
724 	if (data == NULL)
725 		return (EINVAL);
726 	error = copyin(data, &args, sizeof args);
727 	if (error)
728 		return (error);
729 
730 	ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
731 	ma = mount_arg(ma, "export", &args.export, sizeof(args.export));
732 	error = kernel_mount(ma, flags);
733 
734 	return (error);
735 }
736 
737 /*
738  * Reload all incore data for a filesystem (used after running fsck on
739  * the root filesystem and finding things to fix). If the 'force' flag
740  * is 0, the filesystem must be mounted read-only.
741  *
742  * Things to do to update the mount:
743  *	1) invalidate all cached meta-data.
744  *	2) re-read superblock from disk.
745  *	3) If requested, clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags
746  *	   to allow secondary writers.
747  *	4) invalidate all cached file data.
748  *	5) re-read inode data for all active vnodes.
749  */
750 int
751 ffs_reload(struct mount *mp, int flags)
752 {
753 	struct vnode *vp, *mvp, *devvp;
754 	struct inode *ip;
755 	struct buf *bp;
756 	struct fs *fs, *newfs;
757 	struct ufsmount *ump;
758 	int error;
759 
760 	ump = VFSTOUFS(mp);
761 
762 	MNT_ILOCK(mp);
763 	if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) {
764 		MNT_IUNLOCK(mp);
765 		return (EINVAL);
766 	}
767 	MNT_IUNLOCK(mp);
768 
769 	/*
770 	 * Step 1: invalidate all cached meta-data.
771 	 */
772 	devvp = VFSTOUFS(mp)->um_devvp;
773 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
774 	if (vinvalbuf(devvp, 0, 0, 0) != 0)
775 		panic("ffs_reload: dirty1");
776 	VOP_UNLOCK(devvp);
777 
778 	/*
779 	 * Step 2: re-read superblock from disk.
780 	 */
781 	if ((error = ffs_sbget(devvp, &newfs, UFS_STDSB, 0, M_UFSMNT,
782 	    ffs_use_bread)) != 0)
783 		return (error);
784 	/*
785 	 * Replace our superblock with the new superblock. Preserve
786 	 * our read-only status.
787 	 */
788 	fs = VFSTOUFS(mp)->um_fs;
789 	newfs->fs_ronly = fs->fs_ronly;
790 	free(fs->fs_csp, M_UFSMNT);
791 	free(fs->fs_si, M_UFSMNT);
792 	free(fs, M_UFSMNT);
793 	fs = VFSTOUFS(mp)->um_fs = newfs;
794 	ump->um_bsize = fs->fs_bsize;
795 	ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
796 	UFS_LOCK(ump);
797 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
798 		printf("WARNING: %s: reload pending error: blocks %jd "
799 		    "files %d\n", mp->mnt_stat.f_mntonname,
800 		    (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
801 		fs->fs_pendingblocks = 0;
802 		fs->fs_pendinginodes = 0;
803 	}
804 	UFS_UNLOCK(ump);
805 	/*
806 	 * Step 3: If requested, clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags
807 	 * to allow secondary writers.
808 	 */
809 	if ((flags & FFSR_UNSUSPEND) != 0) {
810 		MNT_ILOCK(mp);
811 		mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
812 		wakeup(&mp->mnt_flag);
813 		MNT_IUNLOCK(mp);
814 	}
815 
816 loop:
817 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
818 		/*
819 		 * Skip syncer vnode.
820 		 */
821 		if (vp->v_type == VNON) {
822 			VI_UNLOCK(vp);
823 			continue;
824 		}
825 		/*
826 		 * Step 4: invalidate all cached file data.
827 		 */
828 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
829 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
830 			goto loop;
831 		}
832 		if (vinvalbuf(vp, 0, 0, 0))
833 			panic("ffs_reload: dirty2");
834 		/*
835 		 * Step 5: re-read inode data for all active vnodes.
836 		 */
837 		ip = VTOI(vp);
838 		error =
839 		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
840 		    (int)fs->fs_bsize, NOCRED, &bp);
841 		if (error) {
842 			vput(vp);
843 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
844 			return (error);
845 		}
846 		if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) {
847 			brelse(bp);
848 			vput(vp);
849 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
850 			return (error);
851 		}
852 		ip->i_effnlink = ip->i_nlink;
853 		brelse(bp);
854 		vput(vp);
855 	}
856 	return (0);
857 }
858 
859 /*
860  * Common code for mount and mountroot
861  */
862 static int
863 ffs_mountfs(struct vnode *odevvp, struct mount *mp, struct thread *td)
864 {
865 	struct ufsmount *ump;
866 	struct fs *fs;
867 	struct cdev *dev;
868 	int error, i, len, ronly;
869 	struct ucred *cred;
870 	struct g_consumer *cp;
871 	struct mount *nmp;
872 	struct vnode *devvp;
873 	int candelete, canspeedup;
874 
875 	fs = NULL;
876 	ump = NULL;
877 	cred = td ? td->td_ucred : NOCRED;
878 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
879 
880 	devvp = mntfs_allocvp(mp, odevvp);
881 	KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
882 	dev = devvp->v_rdev;
883 	KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data"));
884 	if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
885 	    (uintptr_t)mp) == 0) {
886 		mntfs_freevp(devvp);
887 		return (EBUSY);
888 	}
889 	g_topology_lock();
890 	error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
891 	g_topology_unlock();
892 	if (error != 0) {
893 		atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
894 		mntfs_freevp(devvp);
895 		return (error);
896 	}
897 	dev_ref(dev);
898 	devvp->v_bufobj.bo_ops = &ffs_ops;
899 	BO_LOCK(&odevvp->v_bufobj);
900 	odevvp->v_bufobj.bo_flag |= BO_NOBUFS;
901 	BO_UNLOCK(&odevvp->v_bufobj);
902 	VOP_UNLOCK(devvp);
903 	if (dev->si_iosize_max != 0)
904 		mp->mnt_iosize_max = dev->si_iosize_max;
905 	if (mp->mnt_iosize_max > maxphys)
906 		mp->mnt_iosize_max = maxphys;
907 	if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
908 		error = EINVAL;
909 		vfs_mount_error(mp,
910 		    "Invalid sectorsize %d for superblock size %d",
911 		    cp->provider->sectorsize, SBLOCKSIZE);
912 		goto out;
913 	}
914 	/* fetch the superblock and summary information */
915 	if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0)
916 		error = ffs_sbsearch(devvp, &fs, 0, M_UFSMNT, ffs_use_bread);
917 	else
918 		error = ffs_sbget(devvp, &fs, UFS_STDSB, 0, M_UFSMNT,
919 		    ffs_use_bread);
920 	if (error != 0)
921 		goto out;
922 	fs->fs_flags &= ~FS_UNCLEAN;
923 	if (fs->fs_clean == 0) {
924 		fs->fs_flags |= FS_UNCLEAN;
925 		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
926 		    ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
927 		     (fs->fs_flags & FS_DOSOFTDEP))) {
928 			printf("WARNING: %s was not properly dismounted\n",
929 			    mp->mnt_stat.f_mntonname);
930 		} else {
931 			vfs_mount_error(mp, "R/W mount on %s denied. "
932 			    "Filesystem is not clean - run fsck.%s",
933 			    mp->mnt_stat.f_mntonname,
934 			    (fs->fs_flags & FS_SUJ) == 0 ? "" :
935 			    " Forced mount will invalidate journal contents");
936 			error = EPERM;
937 			goto out;
938 		}
939 		if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
940 		    (mp->mnt_flag & MNT_FORCE)) {
941 			printf("WARNING: %s: lost blocks %jd files %d\n",
942 			    mp->mnt_stat.f_mntonname,
943 			    (intmax_t)fs->fs_pendingblocks,
944 			    fs->fs_pendinginodes);
945 			fs->fs_pendingblocks = 0;
946 			fs->fs_pendinginodes = 0;
947 		}
948 	}
949 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
950 		printf("WARNING: %s: mount pending error: blocks %jd "
951 		    "files %d\n", mp->mnt_stat.f_mntonname,
952 		    (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
953 		fs->fs_pendingblocks = 0;
954 		fs->fs_pendinginodes = 0;
955 	}
956 	if ((fs->fs_flags & FS_GJOURNAL) != 0) {
957 #ifdef UFS_GJOURNAL
958 		/*
959 		 * Get journal provider name.
960 		 */
961 		len = 1024;
962 		mp->mnt_gjprovider = malloc((uint64_t)len, M_UFSMNT, M_WAITOK);
963 		if (g_io_getattr("GJOURNAL::provider", cp, &len,
964 		    mp->mnt_gjprovider) == 0) {
965 			mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len,
966 			    M_UFSMNT, M_WAITOK);
967 			MNT_ILOCK(mp);
968 			mp->mnt_flag |= MNT_GJOURNAL;
969 			MNT_IUNLOCK(mp);
970 		} else {
971 			if ((mp->mnt_flag & MNT_RDONLY) == 0)
972 				printf("WARNING: %s: GJOURNAL flag on fs "
973 				    "but no gjournal provider below\n",
974 				    mp->mnt_stat.f_mntonname);
975 			free(mp->mnt_gjprovider, M_UFSMNT);
976 			mp->mnt_gjprovider = NULL;
977 		}
978 #else
979 		printf("WARNING: %s: GJOURNAL flag on fs but no "
980 		    "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
981 #endif
982 	} else {
983 		mp->mnt_gjprovider = NULL;
984 	}
985 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
986 	ump->um_cp = cp;
987 	ump->um_bo = &devvp->v_bufobj;
988 	ump->um_fs = fs;
989 	if (fs->fs_magic == FS_UFS1_MAGIC) {
990 		ump->um_fstype = UFS1;
991 		ump->um_balloc = ffs_balloc_ufs1;
992 	} else {
993 		ump->um_fstype = UFS2;
994 		ump->um_balloc = ffs_balloc_ufs2;
995 	}
996 	ump->um_blkatoff = ffs_blkatoff;
997 	ump->um_truncate = ffs_truncate;
998 	ump->um_update = ffs_update;
999 	ump->um_valloc = ffs_valloc;
1000 	ump->um_vfree = ffs_vfree;
1001 	ump->um_ifree = ffs_ifree;
1002 	ump->um_rdonly = ffs_rdonly;
1003 	ump->um_snapgone = ffs_snapgone;
1004 	if ((mp->mnt_flag & MNT_UNTRUSTED) != 0)
1005 		ump->um_check_blkno = ffs_check_blkno;
1006 	else
1007 		ump->um_check_blkno = NULL;
1008 	mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
1009 	sx_init(&ump->um_checkpath_lock, "uchpth");
1010 	fs->fs_ronly = ronly;
1011 	fs->fs_active = NULL;
1012 	mp->mnt_data = ump;
1013 	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
1014 	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
1015 	nmp = NULL;
1016 	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
1017 	    (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
1018 		if (nmp)
1019 			vfs_rel(nmp);
1020 		vfs_getnewfsid(mp);
1021 	}
1022 	ump->um_bsize = fs->fs_bsize;
1023 	ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1024 	MNT_ILOCK(mp);
1025 	mp->mnt_flag |= MNT_LOCAL;
1026 	MNT_IUNLOCK(mp);
1027 	if ((fs->fs_flags & FS_MULTILABEL) != 0) {
1028 #ifdef MAC
1029 		MNT_ILOCK(mp);
1030 		mp->mnt_flag |= MNT_MULTILABEL;
1031 		MNT_IUNLOCK(mp);
1032 #else
1033 		printf("WARNING: %s: multilabel flag on fs but "
1034 		    "no MAC support\n", mp->mnt_stat.f_mntonname);
1035 #endif
1036 	}
1037 	if ((fs->fs_flags & FS_ACLS) != 0) {
1038 #ifdef UFS_ACL
1039 		MNT_ILOCK(mp);
1040 
1041 		if (mp->mnt_flag & MNT_NFS4ACLS)
1042 			printf("WARNING: %s: ACLs flag on fs conflicts with "
1043 			    "\"nfsv4acls\" mount option; option ignored\n",
1044 			    mp->mnt_stat.f_mntonname);
1045 		mp->mnt_flag &= ~MNT_NFS4ACLS;
1046 		mp->mnt_flag |= MNT_ACLS;
1047 
1048 		MNT_IUNLOCK(mp);
1049 #else
1050 		printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
1051 		    mp->mnt_stat.f_mntonname);
1052 #endif
1053 	}
1054 	if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
1055 #ifdef UFS_ACL
1056 		MNT_ILOCK(mp);
1057 
1058 		if (mp->mnt_flag & MNT_ACLS)
1059 			printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
1060 			    "with \"acls\" mount option; option ignored\n",
1061 			    mp->mnt_stat.f_mntonname);
1062 		mp->mnt_flag &= ~MNT_ACLS;
1063 		mp->mnt_flag |= MNT_NFS4ACLS;
1064 
1065 		MNT_IUNLOCK(mp);
1066 #else
1067 		printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
1068 		    "ACLs support\n", mp->mnt_stat.f_mntonname);
1069 #endif
1070 	}
1071 	if ((fs->fs_flags & FS_TRIM) != 0) {
1072 		len = sizeof(int);
1073 		if (g_io_getattr("GEOM::candelete", cp, &len,
1074 		    &candelete) == 0) {
1075 			if (candelete)
1076 				ump->um_flags |= UM_CANDELETE;
1077 			else
1078 				printf("WARNING: %s: TRIM flag on fs but disk "
1079 				    "does not support TRIM\n",
1080 				    mp->mnt_stat.f_mntonname);
1081 		} else {
1082 			printf("WARNING: %s: TRIM flag on fs but disk does "
1083 			    "not confirm that it supports TRIM\n",
1084 			    mp->mnt_stat.f_mntonname);
1085 		}
1086 		if (((ump->um_flags) & UM_CANDELETE) != 0) {
1087 			ump->um_trim_tq = taskqueue_create("trim", M_WAITOK,
1088 			    taskqueue_thread_enqueue, &ump->um_trim_tq);
1089 			taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS,
1090 			    "%s trim", mp->mnt_stat.f_mntonname);
1091 			ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM,
1092 			    &ump->um_trimlisthashsize);
1093 		}
1094 	}
1095 
1096 	len = sizeof(int);
1097 	if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) {
1098 		if (canspeedup)
1099 			ump->um_flags |= UM_CANSPEEDUP;
1100 	}
1101 
1102 	ump->um_mountp = mp;
1103 	ump->um_dev = dev;
1104 	ump->um_devvp = devvp;
1105 	ump->um_odevvp = odevvp;
1106 	ump->um_nindir = fs->fs_nindir;
1107 	ump->um_bptrtodb = fs->fs_fsbtodb;
1108 	ump->um_seqinc = fs->fs_frag;
1109 	for (i = 0; i < MAXQUOTAS; i++)
1110 		ump->um_quotas[i] = NULLVP;
1111 #ifdef UFS_EXTATTR
1112 	ufs_extattr_uepm_init(&ump->um_extattr);
1113 #endif
1114 	/*
1115 	 * Set FS local "last mounted on" information (NULL pad)
1116 	 */
1117 	bzero(fs->fs_fsmnt, MAXMNTLEN);
1118 	strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1119 	mp->mnt_stat.f_iosize = fs->fs_bsize;
1120 
1121 	if (mp->mnt_flag & MNT_ROOTFS) {
1122 		/*
1123 		 * Root mount; update timestamp in mount structure.
1124 		 * this will be used by the common root mount code
1125 		 * to update the system clock.
1126 		 */
1127 		mp->mnt_time = fs->fs_time;
1128 	}
1129 
1130 	if (ronly == 0) {
1131 		fs->fs_mtime = time_second;
1132 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
1133 		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1134 			ffs_flushfiles(mp, FORCECLOSE, td);
1135 			goto out;
1136 		}
1137 		if (fs->fs_snapinum[0] != 0)
1138 			ffs_snapshot_mount(mp);
1139 		fs->fs_fmod = 1;
1140 		fs->fs_clean = 0;
1141 		(void) ffs_sbupdate(ump, MNT_WAIT, 0);
1142 	}
1143 	/*
1144 	 * Initialize filesystem state information in mount struct.
1145 	 */
1146 	MNT_ILOCK(mp);
1147 	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1148 	    MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE;
1149 	MNT_IUNLOCK(mp);
1150 #ifdef UFS_EXTATTR
1151 #ifdef UFS_EXTATTR_AUTOSTART
1152 	/*
1153 	 *
1154 	 * Auto-starting does the following:
1155 	 *	- check for /.attribute in the fs, and extattr_start if so
1156 	 *	- for each file in .attribute, enable that file with
1157 	 * 	  an attribute of the same name.
1158 	 * Not clear how to report errors -- probably eat them.
1159 	 * This would all happen while the filesystem was busy/not
1160 	 * available, so would effectively be "atomic".
1161 	 */
1162 	(void) ufs_extattr_autostart(mp, td);
1163 #endif /* !UFS_EXTATTR_AUTOSTART */
1164 #endif /* !UFS_EXTATTR */
1165 	return (0);
1166 out:
1167 	if (fs != NULL) {
1168 		free(fs->fs_csp, M_UFSMNT);
1169 		free(fs->fs_si, M_UFSMNT);
1170 		free(fs, M_UFSMNT);
1171 	}
1172 	if (cp != NULL) {
1173 		g_topology_lock();
1174 		g_vfs_close(cp);
1175 		g_topology_unlock();
1176 	}
1177 	if (ump != NULL) {
1178 		mtx_destroy(UFS_MTX(ump));
1179 		sx_destroy(&ump->um_checkpath_lock);
1180 		if (mp->mnt_gjprovider != NULL) {
1181 			free(mp->mnt_gjprovider, M_UFSMNT);
1182 			mp->mnt_gjprovider = NULL;
1183 		}
1184 		MPASS(ump->um_softdep == NULL);
1185 		free(ump, M_UFSMNT);
1186 		mp->mnt_data = NULL;
1187 	}
1188 	BO_LOCK(&odevvp->v_bufobj);
1189 	odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1190 	BO_UNLOCK(&odevvp->v_bufobj);
1191 	atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
1192 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1193 	mntfs_freevp(devvp);
1194 	dev_rel(dev);
1195 	return (error);
1196 }
1197 
1198 /*
1199  * A read function for use by filesystem-layer routines.
1200  */
1201 static int
1202 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size)
1203 {
1204 	struct buf *bp;
1205 	int error;
1206 
1207 	KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp));
1208 	*bufp = malloc(size, M_UFSMNT, M_WAITOK);
1209 	if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED,
1210 	    &bp)) != 0)
1211 		return (error);
1212 	bcopy(bp->b_data, *bufp, size);
1213 	bp->b_flags |= B_INVAL | B_NOCACHE;
1214 	brelse(bp);
1215 	return (0);
1216 }
1217 
1218 /*
1219  * unmount system call
1220  */
1221 static int
1222 ffs_unmount(struct mount *mp, int mntflags)
1223 {
1224 	struct thread *td;
1225 	struct ufsmount *ump = VFSTOUFS(mp);
1226 	struct fs *fs;
1227 	int error, flags, susp;
1228 #ifdef UFS_EXTATTR
1229 	int e_restart;
1230 #endif
1231 
1232 	flags = 0;
1233 	td = curthread;
1234 	fs = ump->um_fs;
1235 	if (mntflags & MNT_FORCE)
1236 		flags |= FORCECLOSE;
1237 	susp = fs->fs_ronly == 0;
1238 #ifdef UFS_EXTATTR
1239 	if ((error = ufs_extattr_stop(mp, td))) {
1240 		if (error != EOPNOTSUPP)
1241 			printf("WARNING: unmount %s: ufs_extattr_stop "
1242 			    "returned errno %d\n", mp->mnt_stat.f_mntonname,
1243 			    error);
1244 		e_restart = 0;
1245 	} else {
1246 		ufs_extattr_uepm_destroy(&ump->um_extattr);
1247 		e_restart = 1;
1248 	}
1249 #endif
1250 	if (susp) {
1251 		error = vfs_write_suspend_umnt(mp);
1252 		if (error != 0)
1253 			goto fail1;
1254 	}
1255 	if (MOUNTEDSOFTDEP(mp))
1256 		error = softdep_flushfiles(mp, flags, td);
1257 	else
1258 		error = ffs_flushfiles(mp, flags, td);
1259 	if (error != 0 && !ffs_fsfail_cleanup(ump, error))
1260 		goto fail;
1261 
1262 	UFS_LOCK(ump);
1263 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1264 		printf("WARNING: unmount %s: pending error: blocks %jd "
1265 		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1266 		    fs->fs_pendinginodes);
1267 		fs->fs_pendingblocks = 0;
1268 		fs->fs_pendinginodes = 0;
1269 	}
1270 	UFS_UNLOCK(ump);
1271 	if (MOUNTEDSOFTDEP(mp))
1272 		softdep_unmount(mp);
1273 	MPASS(ump->um_softdep == NULL);
1274 	if (fs->fs_ronly == 0) {
1275 		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1276 		error = ffs_sbupdate(ump, MNT_WAIT, 0);
1277 		if (ffs_fsfail_cleanup(ump, error))
1278 			error = 0;
1279 		if (error != 0 && !ffs_fsfail_cleanup(ump, error)) {
1280 			fs->fs_clean = 0;
1281 			goto fail;
1282 		}
1283 	}
1284 	if (susp)
1285 		vfs_write_resume(mp, VR_START_WRITE);
1286 	if (ump->um_trim_tq != NULL) {
1287 		MPASS(ump->um_trim_inflight == 0);
1288 		taskqueue_free(ump->um_trim_tq);
1289 		free (ump->um_trimhash, M_TRIM);
1290 	}
1291 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1292 	g_topology_lock();
1293 	g_vfs_close(ump->um_cp);
1294 	g_topology_unlock();
1295 	BO_LOCK(&ump->um_odevvp->v_bufobj);
1296 	ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1297 	BO_UNLOCK(&ump->um_odevvp->v_bufobj);
1298 	atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
1299 	mntfs_freevp(ump->um_devvp);
1300 	vrele(ump->um_odevvp);
1301 	dev_rel(ump->um_dev);
1302 	mtx_destroy(UFS_MTX(ump));
1303 	sx_destroy(&ump->um_checkpath_lock);
1304 	if (mp->mnt_gjprovider != NULL) {
1305 		free(mp->mnt_gjprovider, M_UFSMNT);
1306 		mp->mnt_gjprovider = NULL;
1307 	}
1308 	free(fs->fs_csp, M_UFSMNT);
1309 	free(fs->fs_si, M_UFSMNT);
1310 	free(fs, M_UFSMNT);
1311 	free(ump, M_UFSMNT);
1312 	mp->mnt_data = NULL;
1313 	if (td->td_su == mp) {
1314 		td->td_su = NULL;
1315 		vfs_rel(mp);
1316 	}
1317 	return (error);
1318 
1319 fail:
1320 	if (susp)
1321 		vfs_write_resume(mp, VR_START_WRITE);
1322 fail1:
1323 #ifdef UFS_EXTATTR
1324 	if (e_restart) {
1325 		ufs_extattr_uepm_init(&ump->um_extattr);
1326 #ifdef UFS_EXTATTR_AUTOSTART
1327 		(void) ufs_extattr_autostart(mp, td);
1328 #endif
1329 	}
1330 #endif
1331 
1332 	return (error);
1333 }
1334 
1335 /*
1336  * Flush out all the files in a filesystem.
1337  */
1338 int
1339 ffs_flushfiles(struct mount *mp, int flags, struct thread *td)
1340 {
1341 	struct ufsmount *ump;
1342 	int qerror, error;
1343 
1344 	ump = VFSTOUFS(mp);
1345 	qerror = 0;
1346 #ifdef QUOTA
1347 	if (mp->mnt_flag & MNT_QUOTA) {
1348 		int i;
1349 		error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1350 		if (error)
1351 			return (error);
1352 		for (i = 0; i < MAXQUOTAS; i++) {
1353 			error = quotaoff(td, mp, i);
1354 			if (error != 0) {
1355 				if ((flags & EARLYFLUSH) == 0)
1356 					return (error);
1357 				else
1358 					qerror = error;
1359 			}
1360 		}
1361 
1362 		/*
1363 		 * Here we fall through to vflush again to ensure that
1364 		 * we have gotten rid of all the system vnodes, unless
1365 		 * quotas must not be closed.
1366 		 */
1367 	}
1368 #endif
1369 	/* devvp is not locked there */
1370 	if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1371 		if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1372 			return (error);
1373 		ffs_snapshot_unmount(mp);
1374 		flags |= FORCECLOSE;
1375 		/*
1376 		 * Here we fall through to vflush again to ensure
1377 		 * that we have gotten rid of all the system vnodes.
1378 		 */
1379 	}
1380 
1381 	/*
1382 	 * Do not close system files if quotas were not closed, to be
1383 	 * able to sync the remaining dquots.  The freeblks softupdate
1384 	 * workitems might hold a reference on a dquot, preventing
1385 	 * quotaoff() from completing.  Next round of
1386 	 * softdep_flushworklist() iteration should process the
1387 	 * blockers, allowing the next run of quotaoff() to finally
1388 	 * flush held dquots.
1389 	 *
1390 	 * Otherwise, flush all the files.
1391 	 */
1392 	if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1393 		return (error);
1394 
1395 	/*
1396 	 * If this is a forcible unmount and there were any files that
1397 	 * were unlinked but still open, then vflush() will have
1398 	 * truncated and freed those files, which might have started
1399 	 * some trim work.  Wait here for any trims to complete
1400 	 * and process the blkfrees which follow the trims.
1401 	 * This may create more dirty devvp buffers and softdep deps.
1402 	 */
1403 	if (ump->um_trim_tq != NULL) {
1404 		while (ump->um_trim_inflight != 0)
1405 			pause("ufsutr", hz);
1406 		taskqueue_drain_all(ump->um_trim_tq);
1407 	}
1408 
1409 	/*
1410 	 * Flush filesystem metadata.
1411 	 */
1412 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1413 	error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1414 	VOP_UNLOCK(ump->um_devvp);
1415 	return (error);
1416 }
1417 
1418 /*
1419  * Get filesystem statistics.
1420  */
1421 static int
1422 ffs_statfs(struct mount *mp, struct statfs *sbp)
1423 {
1424 	struct ufsmount *ump;
1425 	struct fs *fs;
1426 
1427 	ump = VFSTOUFS(mp);
1428 	fs = ump->um_fs;
1429 	if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1430 		panic("ffs_statfs");
1431 	sbp->f_version = STATFS_VERSION;
1432 	sbp->f_bsize = fs->fs_fsize;
1433 	sbp->f_iosize = fs->fs_bsize;
1434 	sbp->f_blocks = fs->fs_dsize;
1435 	UFS_LOCK(ump);
1436 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1437 	    fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1438 	sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1439 	    dbtofsb(fs, fs->fs_pendingblocks);
1440 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1441 	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1442 	UFS_UNLOCK(ump);
1443 	sbp->f_namemax = UFS_MAXNAMLEN;
1444 	return (0);
1445 }
1446 
1447 static bool
1448 sync_doupdate(struct inode *ip)
1449 {
1450 
1451 	return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1452 	    IN_UPDATE)) != 0);
1453 }
1454 
1455 static int
1456 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused)
1457 {
1458 	struct inode *ip;
1459 
1460 	/*
1461 	 * Flags are safe to access because ->v_data invalidation
1462 	 * is held off by listmtx.
1463 	 */
1464 	if (vp->v_type == VNON)
1465 		return (false);
1466 	ip = VTOI(vp);
1467 	if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0)
1468 		return (false);
1469 	return (true);
1470 }
1471 
1472 /*
1473  * For a lazy sync, we only care about access times, quotas and the
1474  * superblock.  Other filesystem changes are already converted to
1475  * cylinder group blocks or inode blocks updates and are written to
1476  * disk by syncer.
1477  */
1478 static int
1479 ffs_sync_lazy(struct mount *mp)
1480 {
1481 	struct vnode *mvp, *vp;
1482 	struct inode *ip;
1483 	int allerror, error;
1484 
1485 	allerror = 0;
1486 	if ((mp->mnt_flag & MNT_NOATIME) != 0) {
1487 #ifdef QUOTA
1488 		qsync(mp);
1489 #endif
1490 		goto sbupdate;
1491 	}
1492 	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) {
1493 		if (vp->v_type == VNON) {
1494 			VI_UNLOCK(vp);
1495 			continue;
1496 		}
1497 		ip = VTOI(vp);
1498 
1499 		/*
1500 		 * The IN_ACCESS flag is converted to IN_MODIFIED by
1501 		 * ufs_close() and ufs_getattr() by the calls to
1502 		 * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1503 		 * Test also all the other timestamp flags too, to pick up
1504 		 * any other cases that could be missed.
1505 		 */
1506 		if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) {
1507 			VI_UNLOCK(vp);
1508 			continue;
1509 		}
1510 		if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0)
1511 			continue;
1512 #ifdef QUOTA
1513 		qsyncvp(vp);
1514 #endif
1515 		if (sync_doupdate(ip))
1516 			error = ffs_update(vp, 0);
1517 		if (error != 0)
1518 			allerror = error;
1519 		vput(vp);
1520 	}
1521 sbupdate:
1522 	if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1523 	    (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1524 		allerror = error;
1525 	return (allerror);
1526 }
1527 
1528 /*
1529  * Go through the disk queues to initiate sandbagged IO;
1530  * go through the inodes to write those that have been modified;
1531  * initiate the writing of the super block if it has been modified.
1532  *
1533  * Note: we are always called with the filesystem marked busy using
1534  * vfs_busy().
1535  */
1536 static int
1537 ffs_sync(struct mount *mp, int waitfor)
1538 {
1539 	struct vnode *mvp, *vp, *devvp;
1540 	struct thread *td;
1541 	struct inode *ip;
1542 	struct ufsmount *ump = VFSTOUFS(mp);
1543 	struct fs *fs;
1544 	int error, count, lockreq, allerror = 0;
1545 	int suspend;
1546 	int suspended;
1547 	int secondary_writes;
1548 	int secondary_accwrites;
1549 	int softdep_deps;
1550 	int softdep_accdeps;
1551 	struct bufobj *bo;
1552 
1553 	suspend = 0;
1554 	suspended = 0;
1555 	td = curthread;
1556 	fs = ump->um_fs;
1557 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0)
1558 		panic("%s: ffs_sync: modification on read-only filesystem",
1559 		    fs->fs_fsmnt);
1560 	if (waitfor == MNT_LAZY) {
1561 		if (!rebooting)
1562 			return (ffs_sync_lazy(mp));
1563 		waitfor = MNT_NOWAIT;
1564 	}
1565 
1566 	/*
1567 	 * Write back each (modified) inode.
1568 	 */
1569 	lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1570 	if (waitfor == MNT_SUSPEND) {
1571 		suspend = 1;
1572 		waitfor = MNT_WAIT;
1573 	}
1574 	if (waitfor == MNT_WAIT)
1575 		lockreq = LK_EXCLUSIVE;
1576 	lockreq |= LK_INTERLOCK;
1577 loop:
1578 	/* Grab snapshot of secondary write counts */
1579 	MNT_ILOCK(mp);
1580 	secondary_writes = mp->mnt_secondary_writes;
1581 	secondary_accwrites = mp->mnt_secondary_accwrites;
1582 	MNT_IUNLOCK(mp);
1583 
1584 	/* Grab snapshot of softdep dependency counts */
1585 	softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1586 
1587 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1588 		/*
1589 		 * Depend on the vnode interlock to keep things stable enough
1590 		 * for a quick test.  Since there might be hundreds of
1591 		 * thousands of vnodes, we cannot afford even a subroutine
1592 		 * call unless there's a good chance that we have work to do.
1593 		 */
1594 		if (vp->v_type == VNON) {
1595 			VI_UNLOCK(vp);
1596 			continue;
1597 		}
1598 		ip = VTOI(vp);
1599 		if ((ip->i_flag &
1600 		    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1601 		    vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1602 			VI_UNLOCK(vp);
1603 			continue;
1604 		}
1605 		if ((error = vget(vp, lockreq)) != 0) {
1606 			if (error == ENOENT) {
1607 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1608 				goto loop;
1609 			}
1610 			continue;
1611 		}
1612 #ifdef QUOTA
1613 		qsyncvp(vp);
1614 #endif
1615 		for (;;) {
1616 			error = ffs_syncvnode(vp, waitfor, 0);
1617 			if (error == ERELOOKUP)
1618 				continue;
1619 			if (error != 0)
1620 				allerror = error;
1621 			break;
1622 		}
1623 		vput(vp);
1624 	}
1625 	/*
1626 	 * Force stale filesystem control information to be flushed.
1627 	 */
1628 	if (waitfor == MNT_WAIT || rebooting) {
1629 		if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1630 			allerror = error;
1631 		if (ffs_fsfail_cleanup(ump, allerror))
1632 			allerror = 0;
1633 		/* Flushed work items may create new vnodes to clean */
1634 		if (allerror == 0 && count)
1635 			goto loop;
1636 	}
1637 
1638 	devvp = ump->um_devvp;
1639 	bo = &devvp->v_bufobj;
1640 	BO_LOCK(bo);
1641 	if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1642 		BO_UNLOCK(bo);
1643 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1644 		error = VOP_FSYNC(devvp, waitfor, td);
1645 		VOP_UNLOCK(devvp);
1646 		if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
1647 			error = ffs_sbupdate(ump, waitfor, 0);
1648 		if (error != 0)
1649 			allerror = error;
1650 		if (ffs_fsfail_cleanup(ump, allerror))
1651 			allerror = 0;
1652 		if (allerror == 0 && waitfor == MNT_WAIT)
1653 			goto loop;
1654 	} else if (suspend != 0) {
1655 		if (softdep_check_suspend(mp,
1656 					  devvp,
1657 					  softdep_deps,
1658 					  softdep_accdeps,
1659 					  secondary_writes,
1660 					  secondary_accwrites) != 0) {
1661 			MNT_IUNLOCK(mp);
1662 			goto loop;	/* More work needed */
1663 		}
1664 		mtx_assert(MNT_MTX(mp), MA_OWNED);
1665 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1666 		MNT_IUNLOCK(mp);
1667 		suspended = 1;
1668 	} else
1669 		BO_UNLOCK(bo);
1670 	/*
1671 	 * Write back modified superblock.
1672 	 */
1673 	if (fs->fs_fmod != 0 &&
1674 	    (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1675 		allerror = error;
1676 	if (ffs_fsfail_cleanup(ump, allerror))
1677 		allerror = 0;
1678 	return (allerror);
1679 }
1680 
1681 int
1682 ffs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
1683 {
1684 	return (ffs_vgetf(mp, ino, flags, vpp, 0));
1685 }
1686 
1687 int
1688 ffs_vgetf(struct mount *mp,
1689 	ino_t ino,
1690 	int flags,
1691 	struct vnode **vpp,
1692 	int ffs_flags)
1693 {
1694 	struct fs *fs;
1695 	struct inode *ip;
1696 	struct ufsmount *ump;
1697 	struct buf *bp;
1698 	struct vnode *vp;
1699 	daddr_t dbn;
1700 	int error;
1701 
1702 	MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 ||
1703 	    (flags & LK_EXCLUSIVE) != 0);
1704 
1705 	error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1706 	if (error != 0)
1707 		return (error);
1708 	if (*vpp != NULL) {
1709 		if ((ffs_flags & FFSV_REPLACE) == 0 ||
1710 		    ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 ||
1711 		    !VN_IS_DOOMED(*vpp)))
1712 			return (0);
1713 		vgone(*vpp);
1714 		vput(*vpp);
1715 	}
1716 
1717 	/*
1718 	 * We must promote to an exclusive lock for vnode creation.  This
1719 	 * can happen if lookup is passed LOCKSHARED.
1720 	 */
1721 	if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1722 		flags &= ~LK_TYPE_MASK;
1723 		flags |= LK_EXCLUSIVE;
1724 	}
1725 
1726 	/*
1727 	 * We do not lock vnode creation as it is believed to be too
1728 	 * expensive for such rare case as simultaneous creation of vnode
1729 	 * for same ino by different processes. We just allow them to race
1730 	 * and check later to decide who wins. Let the race begin!
1731 	 */
1732 
1733 	ump = VFSTOUFS(mp);
1734 	fs = ump->um_fs;
1735 	ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO);
1736 
1737 	/* Allocate a new vnode/inode. */
1738 	error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ?
1739 	    &ffs_vnodeops1 : &ffs_vnodeops2, &vp);
1740 	if (error) {
1741 		*vpp = NULL;
1742 		uma_zfree_smr(uma_inode, ip);
1743 		return (error);
1744 	}
1745 	/*
1746 	 * FFS supports recursive locking.
1747 	 */
1748 	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1749 	VN_LOCK_AREC(vp);
1750 	vp->v_data = ip;
1751 	vp->v_bufobj.bo_bsize = fs->fs_bsize;
1752 	ip->i_vnode = vp;
1753 	ip->i_ump = ump;
1754 	ip->i_number = ino;
1755 	ip->i_ea_refs = 0;
1756 	ip->i_nextclustercg = -1;
1757 	ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2;
1758 	ip->i_mode = 0; /* ensure error cases below throw away vnode */
1759 	cluster_init_vn(&ip->i_clusterw);
1760 #ifdef DIAGNOSTIC
1761 	ufs_init_trackers(ip);
1762 #endif
1763 #ifdef QUOTA
1764 	{
1765 		int i;
1766 		for (i = 0; i < MAXQUOTAS; i++)
1767 			ip->i_dquot[i] = NODQUOT;
1768 	}
1769 #endif
1770 
1771 	if (ffs_flags & FFSV_FORCEINSMQ)
1772 		vp->v_vflag |= VV_FORCEINSMQ;
1773 	error = insmntque(vp, mp);
1774 	if (error != 0) {
1775 		uma_zfree_smr(uma_inode, ip);
1776 		*vpp = NULL;
1777 		return (error);
1778 	}
1779 	vp->v_vflag &= ~VV_FORCEINSMQ;
1780 	error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1781 	if (error != 0)
1782 		return (error);
1783 	if (*vpp != NULL) {
1784 		/*
1785 		 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set)
1786 		 * operate on empty inode, which must not be found by
1787 		 * other threads until fully filled.  Vnode for empty
1788 		 * inode must be not re-inserted on the hash by other
1789 		 * thread, after removal by us at the beginning.
1790 		 */
1791 		MPASS((ffs_flags & FFSV_REPLACE) == 0);
1792 		return (0);
1793 	}
1794 	if (I_IS_UFS1(ip))
1795 		ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1796 	else
1797 		ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1798 
1799 	if ((ffs_flags & FFSV_NEWINODE) != 0) {
1800 		/* New inode, just zero out its contents. */
1801 		if (I_IS_UFS1(ip))
1802 			memset(ip->i_din1, 0, sizeof(struct ufs1_dinode));
1803 		else
1804 			memset(ip->i_din2, 0, sizeof(struct ufs2_dinode));
1805 	} else {
1806 		/* Read the disk contents for the inode, copy into the inode. */
1807 		dbn = fsbtodb(fs, ino_to_fsba(fs, ino));
1808 		error = ffs_breadz(ump, ump->um_devvp, dbn, dbn,
1809 		    (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp);
1810 		if (error != 0) {
1811 			/*
1812 			 * The inode does not contain anything useful, so it
1813 			 * would be misleading to leave it on its hash chain.
1814 			 * With mode still zero, it will be unlinked and
1815 			 * returned to the free list by vput().
1816 			 */
1817 			vgone(vp);
1818 			vput(vp);
1819 			*vpp = NULL;
1820 			return (error);
1821 		}
1822 		if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) {
1823 			bqrelse(bp);
1824 			vgone(vp);
1825 			vput(vp);
1826 			*vpp = NULL;
1827 			return (error);
1828 		}
1829 		bqrelse(bp);
1830 	}
1831 	if (DOINGSOFTDEP(vp) && (!fs->fs_ronly ||
1832 	    (ffs_flags & FFSV_FORCEINODEDEP) != 0))
1833 		softdep_load_inodeblock(ip);
1834 	else
1835 		ip->i_effnlink = ip->i_nlink;
1836 
1837 	/*
1838 	 * Initialize the vnode from the inode, check for aliases.
1839 	 * Note that the underlying vnode may have changed.
1840 	 */
1841 	error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2,
1842 	    &vp);
1843 	if (error) {
1844 		vgone(vp);
1845 		vput(vp);
1846 		*vpp = NULL;
1847 		return (error);
1848 	}
1849 
1850 	/*
1851 	 * Finish inode initialization.
1852 	 */
1853 	if (vp->v_type != VFIFO) {
1854 		/* FFS supports shared locking for all files except fifos. */
1855 		VN_LOCK_ASHARE(vp);
1856 	}
1857 
1858 	/*
1859 	 * Set up a generation number for this inode if it does not
1860 	 * already have one. This should only happen on old filesystems.
1861 	 */
1862 	if (ip->i_gen == 0) {
1863 		while (ip->i_gen == 0)
1864 			ip->i_gen = arc4random();
1865 		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
1866 			UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
1867 			DIP_SET(ip, i_gen, ip->i_gen);
1868 		}
1869 	}
1870 #ifdef MAC
1871 	if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
1872 		/*
1873 		 * If this vnode is already allocated, and we're running
1874 		 * multi-label, attempt to perform a label association
1875 		 * from the extended attributes on the inode.
1876 		 */
1877 		error = mac_vnode_associate_extattr(mp, vp);
1878 		if (error) {
1879 			/* ufs_inactive will release ip->i_devvp ref. */
1880 			vgone(vp);
1881 			vput(vp);
1882 			*vpp = NULL;
1883 			return (error);
1884 		}
1885 	}
1886 #endif
1887 
1888 	vn_set_state(vp, VSTATE_CONSTRUCTED);
1889 	*vpp = vp;
1890 	return (0);
1891 }
1892 
1893 /*
1894  * File handle to vnode
1895  *
1896  * Have to be really careful about stale file handles:
1897  * - check that the inode number is valid
1898  * - for UFS2 check that the inode number is initialized
1899  * - call ffs_vget() to get the locked inode
1900  * - check for an unallocated inode (i_mode == 0)
1901  * - check that the given client host has export rights and return
1902  *   those rights via. exflagsp and credanonp
1903  */
1904 static int
1905 ffs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
1906 {
1907 	struct ufid *ufhp;
1908 
1909 	ufhp = (struct ufid *)fhp;
1910 	return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags,
1911 	    vpp, 0));
1912 }
1913 
1914 /*
1915  * Return a vnode from a mounted filesystem for inode with specified
1916  * generation number. Return ESTALE if the inode with given generation
1917  * number no longer exists on that filesystem.
1918  */
1919 int
1920 ffs_inotovp(struct mount *mp,
1921 	ino_t ino,
1922 	uint64_t gen,
1923 	int lflags,
1924 	struct vnode **vpp,
1925 	int ffs_flags)
1926 {
1927 	struct ufsmount *ump;
1928 	struct vnode *nvp;
1929 	struct inode *ip;
1930 	struct fs *fs;
1931 	struct cg *cgp;
1932 	struct buf *bp;
1933 	uint64_t cg;
1934 
1935 	ump = VFSTOUFS(mp);
1936 	fs = ump->um_fs;
1937 	*vpp = NULL;
1938 
1939 	if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg)
1940 		return (ESTALE);
1941 
1942 	/*
1943 	 * Need to check if inode is initialized because UFS2 does lazy
1944 	 * initialization and nfs_fhtovp can offer arbitrary inode numbers.
1945 	 */
1946 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1947 		cg = ino_to_cg(fs, ino);
1948 		if (ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp) != 0)
1949 			return (ESTALE);
1950 		if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) {
1951 			brelse(bp);
1952 			return (ESTALE);
1953 		}
1954 		brelse(bp);
1955 	}
1956 
1957 	if (ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags) != 0)
1958 		return (ESTALE);
1959 
1960 	ip = VTOI(nvp);
1961 	if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) {
1962 		if (ip->i_mode == 0)
1963 			vgone(nvp);
1964 		vput(nvp);
1965 		return (ESTALE);
1966 	}
1967 
1968 	vnode_create_vobject(nvp, DIP(ip, i_size), curthread);
1969 	*vpp = nvp;
1970 	return (0);
1971 }
1972 
1973 /*
1974  * Initialize the filesystem.
1975  */
1976 static int
1977 ffs_init(struct vfsconf *vfsp)
1978 {
1979 
1980 	ffs_susp_initialize();
1981 	softdep_initialize();
1982 	return (ufs_init(vfsp));
1983 }
1984 
1985 /*
1986  * Undo the work of ffs_init().
1987  */
1988 static int
1989 ffs_uninit(struct vfsconf *vfsp)
1990 {
1991 	int ret;
1992 
1993 	ret = ufs_uninit(vfsp);
1994 	softdep_uninitialize();
1995 	ffs_susp_uninitialize();
1996 	taskqueue_drain_all(taskqueue_thread);
1997 	return (ret);
1998 }
1999 
2000 /*
2001  * Structure used to pass information from ffs_sbupdate to its
2002  * helper routine ffs_use_bwrite.
2003  */
2004 struct devfd {
2005 	struct ufsmount	*ump;
2006 	struct buf	*sbbp;
2007 	int		 waitfor;
2008 	int		 suspended;
2009 	int		 error;
2010 };
2011 
2012 /*
2013  * Write a superblock and associated information back to disk.
2014  */
2015 int
2016 ffs_sbupdate(struct ufsmount *ump, int waitfor, int suspended)
2017 {
2018 	struct fs *fs;
2019 	struct buf *sbbp;
2020 	struct devfd devfd;
2021 
2022 	fs = ump->um_fs;
2023 	if (fs->fs_ronly == 1 &&
2024 	    (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
2025 	    (MNT_RDONLY | MNT_UPDATE))
2026 		panic("ffs_sbupdate: write read-only filesystem");
2027 	/*
2028 	 * We use the superblock's buf to serialize calls to ffs_sbupdate().
2029 	 */
2030 	sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
2031 	    (int)fs->fs_sbsize, 0, 0, 0);
2032 	/*
2033 	 * Initialize info needed for write function.
2034 	 */
2035 	devfd.ump = ump;
2036 	devfd.sbbp = sbbp;
2037 	devfd.waitfor = waitfor;
2038 	devfd.suspended = suspended;
2039 	devfd.error = 0;
2040 	return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite));
2041 }
2042 
2043 /*
2044  * Write function for use by filesystem-layer routines.
2045  */
2046 static int
2047 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size)
2048 {
2049 	struct devfd *devfdp;
2050 	struct ufsmount *ump;
2051 	struct buf *bp;
2052 	struct fs *fs;
2053 	int error;
2054 
2055 	devfdp = devfd;
2056 	ump = devfdp->ump;
2057 	fs = ump->um_fs;
2058 	/*
2059 	 * Writing the superblock summary information.
2060 	 */
2061 	if (loc != fs->fs_sblockloc) {
2062 		bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0);
2063 		bcopy(buf, bp->b_data, (uint64_t)size);
2064 		if (devfdp->suspended)
2065 			bp->b_flags |= B_VALIDSUSPWRT;
2066 		if (devfdp->waitfor != MNT_WAIT)
2067 			bawrite(bp);
2068 		else if ((error = bwrite(bp)) != 0)
2069 			devfdp->error = error;
2070 		return (0);
2071 	}
2072 	/*
2073 	 * Writing the superblock itself. We need to do special checks for it.
2074 	 */
2075 	bp = devfdp->sbbp;
2076 	if (ffs_fsfail_cleanup(ump, devfdp->error))
2077 		devfdp->error = 0;
2078 	if (devfdp->error != 0) {
2079 		brelse(bp);
2080 		return (devfdp->error);
2081 	}
2082 	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
2083 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2084 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2085 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
2086 		fs->fs_sblockloc = SBLOCK_UFS1;
2087 	}
2088 	if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
2089 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2090 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2091 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
2092 		fs->fs_sblockloc = SBLOCK_UFS2;
2093 	}
2094 	if (MOUNTEDSOFTDEP(ump->um_mountp))
2095 		softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
2096 	UFS_LOCK(ump);
2097 	bcopy((caddr_t)fs, bp->b_data, (uint64_t)fs->fs_sbsize);
2098 	UFS_UNLOCK(ump);
2099 	fs = (struct fs *)bp->b_data;
2100 	fs->fs_fmod = 0;
2101 	ffs_oldfscompat_write(fs);
2102 	fs->fs_si = NULL;
2103 	/* Recalculate the superblock hash */
2104 	fs->fs_ckhash = ffs_calc_sbhash(fs);
2105 	if (devfdp->suspended)
2106 		bp->b_flags |= B_VALIDSUSPWRT;
2107 	if (devfdp->waitfor != MNT_WAIT)
2108 		bawrite(bp);
2109 	else if ((error = bwrite(bp)) != 0)
2110 		devfdp->error = error;
2111 	return (devfdp->error);
2112 }
2113 
2114 static int
2115 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
2116 	int attrnamespace, const char *attrname)
2117 {
2118 
2119 #ifdef UFS_EXTATTR
2120 	return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
2121 	    attrname));
2122 #else
2123 	return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
2124 	    attrname));
2125 #endif
2126 }
2127 
2128 static void
2129 ffs_ifree(struct ufsmount *ump, struct inode *ip)
2130 {
2131 
2132 	if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
2133 		uma_zfree(uma_ufs1, ip->i_din1);
2134 	else if (ip->i_din2 != NULL)
2135 		uma_zfree(uma_ufs2, ip->i_din2);
2136 	uma_zfree_smr(uma_inode, ip);
2137 }
2138 
2139 static int dobkgrdwrite = 1;
2140 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
2141     "Do background writes (honoring the BV_BKGRDWRITE flag)?");
2142 
2143 /*
2144  * Complete a background write started from bwrite.
2145  */
2146 static void
2147 ffs_backgroundwritedone(struct buf *bp)
2148 {
2149 	struct bufobj *bufobj;
2150 	struct buf *origbp;
2151 
2152 #ifdef SOFTUPDATES
2153 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0)
2154 		softdep_handle_error(bp);
2155 #endif
2156 
2157 	/*
2158 	 * Find the original buffer that we are writing.
2159 	 */
2160 	bufobj = bp->b_bufobj;
2161 	BO_LOCK(bufobj);
2162 	if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
2163 		panic("backgroundwritedone: lost buffer");
2164 
2165 	/*
2166 	 * We should mark the cylinder group buffer origbp as
2167 	 * dirty, to not lose the failed write.
2168 	 */
2169 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2170 		origbp->b_vflags |= BV_BKGRDERR;
2171 	BO_UNLOCK(bufobj);
2172 	/*
2173 	 * Process dependencies then return any unfinished ones.
2174 	 */
2175 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0)
2176 		buf_complete(bp);
2177 #ifdef SOFTUPDATES
2178 	if (!LIST_EMPTY(&bp->b_dep))
2179 		softdep_move_dependencies(bp, origbp);
2180 #endif
2181 	/*
2182 	 * This buffer is marked B_NOCACHE so when it is released
2183 	 * by biodone it will be tossed.  Clear B_IOSTARTED in case of error.
2184 	 */
2185 	bp->b_flags |= B_NOCACHE;
2186 	bp->b_flags &= ~(B_CACHE | B_IOSTARTED);
2187 	pbrelvp(bp);
2188 
2189 	/*
2190 	 * Prevent brelse() from trying to keep and re-dirtying bp on
2191 	 * errors. It causes b_bufobj dereference in
2192 	 * bdirty()/reassignbuf(), and b_bufobj was cleared in
2193 	 * pbrelvp() above.
2194 	 */
2195 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2196 		bp->b_flags |= B_INVAL;
2197 	bufdone(bp);
2198 	BO_LOCK(bufobj);
2199 	/*
2200 	 * Clear the BV_BKGRDINPROG flag in the original buffer
2201 	 * and awaken it if it is waiting for the write to complete.
2202 	 * If BV_BKGRDINPROG is not set in the original buffer it must
2203 	 * have been released and re-instantiated - which is not legal.
2204 	 */
2205 	KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
2206 	    ("backgroundwritedone: lost buffer2"));
2207 	origbp->b_vflags &= ~BV_BKGRDINPROG;
2208 	if (origbp->b_vflags & BV_BKGRDWAIT) {
2209 		origbp->b_vflags &= ~BV_BKGRDWAIT;
2210 		wakeup(&origbp->b_xflags);
2211 	}
2212 	BO_UNLOCK(bufobj);
2213 }
2214 
2215 /*
2216  * Write, release buffer on completion.  (Done by iodone
2217  * if async).  Do not bother writing anything if the buffer
2218  * is invalid.
2219  *
2220  * Note that we set B_CACHE here, indicating that buffer is
2221  * fully valid and thus cacheable.  This is true even of NFS
2222  * now so we set it generally.  This could be set either here
2223  * or in biodone() since the I/O is synchronous.  We put it
2224  * here.
2225  */
2226 static int
2227 ffs_bufwrite(struct buf *bp)
2228 {
2229 	struct buf *newbp;
2230 	struct cg *cgp;
2231 
2232 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2233 	if (bp->b_flags & B_INVAL) {
2234 		brelse(bp);
2235 		return (0);
2236 	}
2237 
2238 	if (!BUF_ISLOCKED(bp))
2239 		panic("bufwrite: buffer is not busy???");
2240 	/*
2241 	 * If a background write is already in progress, delay
2242 	 * writing this block if it is asynchronous. Otherwise
2243 	 * wait for the background write to complete.
2244 	 */
2245 	BO_LOCK(bp->b_bufobj);
2246 	if (bp->b_vflags & BV_BKGRDINPROG) {
2247 		if (bp->b_flags & B_ASYNC) {
2248 			BO_UNLOCK(bp->b_bufobj);
2249 			bdwrite(bp);
2250 			return (0);
2251 		}
2252 		bp->b_vflags |= BV_BKGRDWAIT;
2253 		msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2254 		    "bwrbg", 0);
2255 		if (bp->b_vflags & BV_BKGRDINPROG)
2256 			panic("bufwrite: still writing");
2257 	}
2258 	bp->b_vflags &= ~BV_BKGRDERR;
2259 	BO_UNLOCK(bp->b_bufobj);
2260 
2261 	/*
2262 	 * If this buffer is marked for background writing and we
2263 	 * do not have to wait for it, make a copy and write the
2264 	 * copy so as to leave this buffer ready for further use.
2265 	 *
2266 	 * This optimization eats a lot of memory.  If we have a page
2267 	 * or buffer shortfall we can't do it.
2268 	 */
2269 	if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2270 	    (bp->b_flags & B_ASYNC) &&
2271 	    !vm_page_count_severe() &&
2272 	    !buf_dirty_count_severe()) {
2273 		KASSERT(bp->b_iodone == NULL,
2274 		    ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2275 
2276 		/* get a new block */
2277 		newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2278 		if (newbp == NULL)
2279 			goto normal_write;
2280 
2281 		KASSERT(buf_mapped(bp), ("Unmapped cg"));
2282 		memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2283 		BO_LOCK(bp->b_bufobj);
2284 		bp->b_vflags |= BV_BKGRDINPROG;
2285 		BO_UNLOCK(bp->b_bufobj);
2286 		newbp->b_xflags |=
2287 		    (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER;
2288 		newbp->b_lblkno = bp->b_lblkno;
2289 		newbp->b_blkno = bp->b_blkno;
2290 		newbp->b_offset = bp->b_offset;
2291 		newbp->b_iodone = ffs_backgroundwritedone;
2292 		newbp->b_flags |= B_ASYNC;
2293 		newbp->b_flags &= ~B_INVAL;
2294 		pbgetvp(bp->b_vp, newbp);
2295 
2296 #ifdef SOFTUPDATES
2297 		/*
2298 		 * Move over the dependencies.  If there are rollbacks,
2299 		 * leave the parent buffer dirtied as it will need to
2300 		 * be written again.
2301 		 */
2302 		if (LIST_EMPTY(&bp->b_dep) ||
2303 		    softdep_move_dependencies(bp, newbp) == 0)
2304 			bundirty(bp);
2305 #else
2306 		bundirty(bp);
2307 #endif
2308 
2309 		/*
2310 		 * Initiate write on the copy, release the original.  The
2311 		 * BKGRDINPROG flag prevents it from going away until
2312 		 * the background write completes. We have to recalculate
2313 		 * its check hash in case the buffer gets freed and then
2314 		 * reconstituted from the buffer cache during a later read.
2315 		 */
2316 		if ((bp->b_xflags & BX_CYLGRP) != 0) {
2317 			cgp = (struct cg *)bp->b_data;
2318 			cgp->cg_ckhash = 0;
2319 			cgp->cg_ckhash =
2320 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2321 		}
2322 		bqrelse(bp);
2323 		bp = newbp;
2324 	} else
2325 		/* Mark the buffer clean */
2326 		bundirty(bp);
2327 
2328 	/* Let the normal bufwrite do the rest for us */
2329 normal_write:
2330 	/*
2331 	 * If we are writing a cylinder group, update its time.
2332 	 */
2333 	if ((bp->b_xflags & BX_CYLGRP) != 0) {
2334 		cgp = (struct cg *)bp->b_data;
2335 		cgp->cg_old_time = cgp->cg_time = time_second;
2336 	}
2337 	return (bufwrite(bp));
2338 }
2339 
2340 static void
2341 ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2342 {
2343 	struct vnode *vp;
2344 	struct buf *tbp;
2345 	int error, nocopy;
2346 
2347 	/*
2348 	 * This is the bufobj strategy for the private VCHR vnodes
2349 	 * used by FFS to access the underlying storage device.
2350 	 * We override the default bufobj strategy and thus bypass
2351 	 * VOP_STRATEGY() for these vnodes.
2352 	 */
2353 	vp = bo2vnode(bo);
2354 	KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR ||
2355 	    bp->b_vp->v_rdev == NULL ||
2356 	    bp->b_vp->v_rdev->si_mountpt == NULL ||
2357 	    VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL ||
2358 	    vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp,
2359 	    ("ffs_geom_strategy() with wrong vp"));
2360 	if (bp->b_iocmd == BIO_WRITE) {
2361 		if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2362 		    bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2363 		    (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2364 			panic("ffs_geom_strategy: bad I/O");
2365 		nocopy = bp->b_flags & B_NOCOPY;
2366 		bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2367 		if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2368 		    vp->v_rdev->si_snapdata != NULL) {
2369 			if ((bp->b_flags & B_CLUSTER) != 0) {
2370 				runningbufwakeup(bp);
2371 				TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2372 					      b_cluster.cluster_entry) {
2373 					error = ffs_copyonwrite(vp, tbp);
2374 					if (error != 0 &&
2375 					    error != EOPNOTSUPP) {
2376 						bp->b_error = error;
2377 						bp->b_ioflags |= BIO_ERROR;
2378 						bp->b_flags &= ~B_BARRIER;
2379 						bufdone(bp);
2380 						return;
2381 					}
2382 				}
2383 				(void)runningbufclaim(bp, bp->b_bufsize);
2384 			} else {
2385 				error = ffs_copyonwrite(vp, bp);
2386 				if (error != 0 && error != EOPNOTSUPP) {
2387 					bp->b_error = error;
2388 					bp->b_ioflags |= BIO_ERROR;
2389 					bp->b_flags &= ~B_BARRIER;
2390 					bufdone(bp);
2391 					return;
2392 				}
2393 			}
2394 		}
2395 #ifdef SOFTUPDATES
2396 		if ((bp->b_flags & B_CLUSTER) != 0) {
2397 			TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2398 				      b_cluster.cluster_entry) {
2399 				if (!LIST_EMPTY(&tbp->b_dep))
2400 					buf_start(tbp);
2401 			}
2402 		} else {
2403 			if (!LIST_EMPTY(&bp->b_dep))
2404 				buf_start(bp);
2405 		}
2406 
2407 #endif
2408 		/*
2409 		 * Check for metadata that needs check-hashes and update them.
2410 		 */
2411 		switch (bp->b_xflags & BX_FSPRIV) {
2412 		case BX_CYLGRP:
2413 			((struct cg *)bp->b_data)->cg_ckhash = 0;
2414 			((struct cg *)bp->b_data)->cg_ckhash =
2415 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2416 			break;
2417 
2418 		case BX_SUPERBLOCK:
2419 		case BX_INODE:
2420 		case BX_INDIR:
2421 		case BX_DIR:
2422 			printf("Check-hash write is unimplemented!!!\n");
2423 			break;
2424 
2425 		case 0:
2426 			break;
2427 
2428 		default:
2429 			printf("multiple buffer types 0x%b\n",
2430 			    (bp->b_xflags & BX_FSPRIV), PRINT_UFS_BUF_XFLAGS);
2431 			break;
2432 		}
2433 	}
2434 	if (bp->b_iocmd != BIO_READ && ffs_enxio_enable)
2435 		bp->b_xflags |= BX_CVTENXIO;
2436 	g_vfs_strategy(bo, bp);
2437 }
2438 
2439 int
2440 ffs_own_mount(const struct mount *mp)
2441 {
2442 
2443 	if (mp->mnt_op == &ufs_vfsops)
2444 		return (1);
2445 	return (0);
2446 }
2447 
2448 #ifdef	DDB
2449 #ifdef SOFTUPDATES
2450 
2451 /* defined in ffs_softdep.c */
2452 extern void db_print_ffs(struct ufsmount *ump);
2453 
2454 DB_SHOW_COMMAND(ffs, db_show_ffs)
2455 {
2456 	struct mount *mp;
2457 	struct ufsmount *ump;
2458 
2459 	if (have_addr) {
2460 		ump = VFSTOUFS((struct mount *)addr);
2461 		db_print_ffs(ump);
2462 		return;
2463 	}
2464 
2465 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2466 		if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2467 			db_print_ffs(VFSTOUFS(mp));
2468 	}
2469 }
2470 
2471 #endif	/* SOFTUPDATES */
2472 #endif	/* DDB */
2473