xref: /freebsd/sys/ufs/ffs/ffs_vfsops.c (revision b3e7694832e81d7a904a10f525f8797b753bf0d3)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1991, 1993, 1994
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_quota.h"
38 #include "opt_ufs.h"
39 #include "opt_ffs.h"
40 #include "opt_ddb.h"
41 
42 #include <sys/param.h>
43 #include <sys/gsb_crc32.h>
44 #include <sys/systm.h>
45 #include <sys/namei.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/taskqueue.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/vnode.h>
52 #include <sys/mount.h>
53 #include <sys/bio.h>
54 #include <sys/buf.h>
55 #include <sys/conf.h>
56 #include <sys/fcntl.h>
57 #include <sys/ioccom.h>
58 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/rwlock.h>
61 #include <sys/sysctl.h>
62 #include <sys/vmmeter.h>
63 
64 #include <security/mac/mac_framework.h>
65 
66 #include <ufs/ufs/dir.h>
67 #include <ufs/ufs/extattr.h>
68 #include <ufs/ufs/gjournal.h>
69 #include <ufs/ufs/quota.h>
70 #include <ufs/ufs/ufsmount.h>
71 #include <ufs/ufs/inode.h>
72 #include <ufs/ufs/ufs_extern.h>
73 
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76 
77 #include <vm/vm.h>
78 #include <vm/uma.h>
79 #include <vm/vm_page.h>
80 
81 #include <geom/geom.h>
82 #include <geom/geom_vfs.h>
83 
84 #include <ddb/ddb.h>
85 
86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
87 VFS_SMR_DECLARE;
88 
89 static int	ffs_mountfs(struct vnode *, struct mount *, struct thread *);
90 static void	ffs_oldfscompat_read(struct fs *, struct ufsmount *,
91 		    ufs2_daddr_t);
92 static void	ffs_ifree(struct ufsmount *ump, struct inode *ip);
93 static int	ffs_sync_lazy(struct mount *mp);
94 static int	ffs_use_bread(void *devfd, off_t loc, void **bufp, int size);
95 static int	ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size);
96 
97 static vfs_init_t ffs_init;
98 static vfs_uninit_t ffs_uninit;
99 static vfs_extattrctl_t ffs_extattrctl;
100 static vfs_cmount_t ffs_cmount;
101 static vfs_unmount_t ffs_unmount;
102 static vfs_mount_t ffs_mount;
103 static vfs_statfs_t ffs_statfs;
104 static vfs_fhtovp_t ffs_fhtovp;
105 static vfs_sync_t ffs_sync;
106 
107 static struct vfsops ufs_vfsops = {
108 	.vfs_extattrctl =	ffs_extattrctl,
109 	.vfs_fhtovp =		ffs_fhtovp,
110 	.vfs_init =		ffs_init,
111 	.vfs_mount =		ffs_mount,
112 	.vfs_cmount =		ffs_cmount,
113 	.vfs_quotactl =		ufs_quotactl,
114 	.vfs_root =		vfs_cache_root,
115 	.vfs_cachedroot =	ufs_root,
116 	.vfs_statfs =		ffs_statfs,
117 	.vfs_sync =		ffs_sync,
118 	.vfs_uninit =		ffs_uninit,
119 	.vfs_unmount =		ffs_unmount,
120 	.vfs_vget =		ffs_vget,
121 	.vfs_susp_clean =	process_deferred_inactive,
122 };
123 
124 VFS_SET(ufs_vfsops, ufs, 0);
125 MODULE_VERSION(ufs, 1);
126 
127 static b_strategy_t ffs_geom_strategy;
128 static b_write_t ffs_bufwrite;
129 
130 static struct buf_ops ffs_ops = {
131 	.bop_name =	"FFS",
132 	.bop_write =	ffs_bufwrite,
133 	.bop_strategy =	ffs_geom_strategy,
134 	.bop_sync =	bufsync,
135 #ifdef NO_FFS_SNAPSHOT
136 	.bop_bdflush =	bufbdflush,
137 #else
138 	.bop_bdflush =	ffs_bdflush,
139 #endif
140 };
141 
142 /*
143  * Note that userquota and groupquota options are not currently used
144  * by UFS/FFS code and generally mount(8) does not pass those options
145  * from userland, but they can be passed by loader(8) via
146  * vfs.root.mountfrom.options.
147  */
148 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
149     "noclusterw", "noexec", "export", "force", "from", "groupquota",
150     "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir",
151     "nosymfollow", "sync", "union", "userquota", "untrusted", NULL };
152 
153 static int ffs_enxio_enable = 1;
154 SYSCTL_DECL(_vfs_ffs);
155 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN,
156     &ffs_enxio_enable, 0,
157     "enable mapping of other disk I/O errors to ENXIO");
158 
159 /*
160  * Return buffer with the contents of block "offset" from the beginning of
161  * directory "ip".  If "res" is non-zero, fill it in with a pointer to the
162  * remaining space in the directory.
163  */
164 static int
165 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp)
166 {
167 	struct inode *ip;
168 	struct fs *fs;
169 	struct buf *bp;
170 	ufs_lbn_t lbn;
171 	int bsize, error;
172 
173 	ip = VTOI(vp);
174 	fs = ITOFS(ip);
175 	lbn = lblkno(fs, offset);
176 	bsize = blksize(fs, ip, lbn);
177 
178 	*bpp = NULL;
179 	error = bread(vp, lbn, bsize, NOCRED, &bp);
180 	if (error) {
181 		return (error);
182 	}
183 	if (res)
184 		*res = (char *)bp->b_data + blkoff(fs, offset);
185 	*bpp = bp;
186 	return (0);
187 }
188 
189 /*
190  * Load up the contents of an inode and copy the appropriate pieces
191  * to the incore copy.
192  */
193 static int
194 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino)
195 {
196 	struct ufs1_dinode *dip1;
197 	struct ufs2_dinode *dip2;
198 	int error;
199 
200 	if (I_IS_UFS1(ip)) {
201 		dip1 = ip->i_din1;
202 		*dip1 =
203 		    *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
204 		ip->i_mode = dip1->di_mode;
205 		ip->i_nlink = dip1->di_nlink;
206 		ip->i_effnlink = dip1->di_nlink;
207 		ip->i_size = dip1->di_size;
208 		ip->i_flags = dip1->di_flags;
209 		ip->i_gen = dip1->di_gen;
210 		ip->i_uid = dip1->di_uid;
211 		ip->i_gid = dip1->di_gid;
212 		return (0);
213 	}
214 	dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
215 	if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 &&
216 	    !ffs_fsfail_cleanup(ITOUMP(ip), error)) {
217 		printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt,
218 		    (intmax_t)ino);
219 		return (error);
220 	}
221 	*ip->i_din2 = *dip2;
222 	dip2 = ip->i_din2;
223 	ip->i_mode = dip2->di_mode;
224 	ip->i_nlink = dip2->di_nlink;
225 	ip->i_effnlink = dip2->di_nlink;
226 	ip->i_size = dip2->di_size;
227 	ip->i_flags = dip2->di_flags;
228 	ip->i_gen = dip2->di_gen;
229 	ip->i_uid = dip2->di_uid;
230 	ip->i_gid = dip2->di_gid;
231 	return (0);
232 }
233 
234 /*
235  * Verify that a filesystem block number is a valid data block.
236  * This routine is only called on untrusted filesystems.
237  */
238 static int
239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize)
240 {
241 	struct fs *fs;
242 	struct ufsmount *ump;
243 	ufs2_daddr_t end_daddr;
244 	int cg, havemtx;
245 
246 	KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0,
247 	    ("ffs_check_blkno called on a trusted file system"));
248 	ump = VFSTOUFS(mp);
249 	fs = ump->um_fs;
250 	cg = dtog(fs, daddr);
251 	end_daddr = daddr + numfrags(fs, blksize);
252 	/*
253 	 * Verify that the block number is a valid data block. Also check
254 	 * that it does not point to an inode block or a superblock. Accept
255 	 * blocks that are unalloacted (0) or part of snapshot metadata
256 	 * (BLK_NOCOPY or BLK_SNAP).
257 	 *
258 	 * Thus, the block must be in a valid range for the filesystem and
259 	 * either in the space before a backup superblock (except the first
260 	 * cylinder group where that space is used by the bootstrap code) or
261 	 * after the inode blocks and before the end of the cylinder group.
262 	 */
263 	if ((uint64_t)daddr <= BLK_SNAP ||
264 	    ((uint64_t)end_daddr <= fs->fs_size &&
265 	    ((cg > 0 && end_daddr <= cgsblock(fs, cg)) ||
266 	    (daddr >= cgdmin(fs, cg) &&
267 	    end_daddr <= cgbase(fs, cg) + fs->fs_fpg))))
268 		return (0);
269 	if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0)
270 		UFS_LOCK(ump);
271 	if (ppsratecheck(&ump->um_last_integritymsg,
272 	    &ump->um_secs_integritymsg, 1)) {
273 		UFS_UNLOCK(ump);
274 		uprintf("\n%s: inode %jd, out-of-range indirect block "
275 		    "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr);
276 		if (havemtx)
277 			UFS_LOCK(ump);
278 	} else if (!havemtx)
279 		UFS_UNLOCK(ump);
280 	return (EINTEGRITY);
281 }
282 
283 /*
284  * On first ENXIO error, initiate an asynchronous forcible unmount.
285  * Used to unmount filesystems whose underlying media has gone away.
286  *
287  * Return true if a cleanup is in progress.
288  */
289 int
290 ffs_fsfail_cleanup(struct ufsmount *ump, int error)
291 {
292 	int retval;
293 
294 	UFS_LOCK(ump);
295 	retval = ffs_fsfail_cleanup_locked(ump, error);
296 	UFS_UNLOCK(ump);
297 	return (retval);
298 }
299 
300 int
301 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error)
302 {
303 	mtx_assert(UFS_MTX(ump), MA_OWNED);
304 	if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) {
305 		ump->um_flags |= UM_FSFAIL_CLEANUP;
306 		if (ump->um_mountp == rootvnode->v_mount)
307 			panic("UFS: root fs would be forcibly unmounted");
308 
309 		/*
310 		 * Queue an async forced unmount.
311 		 */
312 		vfs_ref(ump->um_mountp);
313 		dounmount(ump->um_mountp,
314 		    MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread);
315 		printf("UFS: forcibly unmounting %s from %s\n",
316 		    ump->um_mountp->mnt_stat.f_mntfromname,
317 		    ump->um_mountp->mnt_stat.f_mntonname);
318 	}
319 	return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0);
320 }
321 
322 /*
323  * Wrapper used during ENXIO cleanup to allocate empty buffers when
324  * the kernel is unable to read the real one. They are needed so that
325  * the soft updates code can use them to unwind its dependencies.
326  */
327 int
328 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno,
329     daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt,
330     struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *),
331     struct buf **bpp)
332 {
333 	int error;
334 
335 	flags |= GB_CVTENXIO;
336 	error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt,
337 	    cred, flags, ckhashfunc, bpp);
338 	if (error != 0 && ffs_fsfail_cleanup(ump, error)) {
339 		error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp);
340 		KASSERT(error == 0, ("getblkx failed"));
341 		vfs_bio_bzero_buf(*bpp, 0, size);
342 	}
343 	return (error);
344 }
345 
346 static int
347 ffs_mount(struct mount *mp)
348 {
349 	struct vnode *devvp, *odevvp;
350 	struct thread *td;
351 	struct ufsmount *ump = NULL;
352 	struct fs *fs;
353 	int error, flags;
354 	int error1 __diagused;
355 	uint64_t mntorflags, saved_mnt_flag;
356 	accmode_t accmode;
357 	struct nameidata ndp;
358 	char *fspec;
359 	bool mounted_softdep;
360 
361 	td = curthread;
362 	if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
363 		return (EINVAL);
364 	if (uma_inode == NULL) {
365 		uma_inode = uma_zcreate("FFS inode",
366 		    sizeof(struct inode), NULL, NULL, NULL, NULL,
367 		    UMA_ALIGN_PTR, 0);
368 		uma_ufs1 = uma_zcreate("FFS1 dinode",
369 		    sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
370 		    UMA_ALIGN_PTR, 0);
371 		uma_ufs2 = uma_zcreate("FFS2 dinode",
372 		    sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
373 		    UMA_ALIGN_PTR, 0);
374 		VFS_SMR_ZONE_SET(uma_inode);
375 	}
376 
377 	vfs_deleteopt(mp->mnt_optnew, "groupquota");
378 	vfs_deleteopt(mp->mnt_optnew, "userquota");
379 
380 	fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
381 	if (error)
382 		return (error);
383 
384 	mntorflags = 0;
385 	if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0)
386 		mntorflags |= MNT_UNTRUSTED;
387 
388 	if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
389 		mntorflags |= MNT_ACLS;
390 
391 	if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
392 		mntorflags |= MNT_SNAPSHOT;
393 		/*
394 		 * Once we have set the MNT_SNAPSHOT flag, do not
395 		 * persist "snapshot" in the options list.
396 		 */
397 		vfs_deleteopt(mp->mnt_optnew, "snapshot");
398 		vfs_deleteopt(mp->mnt_opt, "snapshot");
399 	}
400 
401 	if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
402 		if (mntorflags & MNT_ACLS) {
403 			vfs_mount_error(mp,
404 			    "\"acls\" and \"nfsv4acls\" options "
405 			    "are mutually exclusive");
406 			return (EINVAL);
407 		}
408 		mntorflags |= MNT_NFS4ACLS;
409 	}
410 
411 	MNT_ILOCK(mp);
412 	mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
413 	mp->mnt_flag |= mntorflags;
414 	MNT_IUNLOCK(mp);
415 
416 	/*
417 	 * If this is a snapshot request, take the snapshot.
418 	 */
419 	if (mp->mnt_flag & MNT_SNAPSHOT) {
420 		if ((mp->mnt_flag & MNT_UPDATE) == 0)
421 			return (EINVAL);
422 		return (ffs_snapshot(mp, fspec));
423 	}
424 
425 	/*
426 	 * Must not call namei() while owning busy ref.
427 	 */
428 	if (mp->mnt_flag & MNT_UPDATE)
429 		vfs_unbusy(mp);
430 
431 	/*
432 	 * Not an update, or updating the name: look up the name
433 	 * and verify that it refers to a sensible disk device.
434 	 */
435 	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec);
436 	error = namei(&ndp);
437 	if ((mp->mnt_flag & MNT_UPDATE) != 0) {
438 		/*
439 		 * Unmount does not start if MNT_UPDATE is set.  Mount
440 		 * update busies mp before setting MNT_UPDATE.  We
441 		 * must be able to retain our busy ref successfully,
442 		 * without sleep.
443 		 */
444 		error1 = vfs_busy(mp, MBF_NOWAIT);
445 		MPASS(error1 == 0);
446 	}
447 	if (error != 0)
448 		return (error);
449 	NDFREE_PNBUF(&ndp);
450 	if (!vn_isdisk_error(ndp.ni_vp, &error)) {
451 		vput(ndp.ni_vp);
452 		return (error);
453 	}
454 
455 	/*
456 	 * If mount by non-root, then verify that user has necessary
457 	 * permissions on the device.
458 	 */
459 	accmode = VREAD;
460 	if ((mp->mnt_flag & MNT_RDONLY) == 0)
461 		accmode |= VWRITE;
462 	error = VOP_ACCESS(ndp.ni_vp, accmode, td->td_ucred, td);
463 	if (error)
464 		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
465 	if (error) {
466 		vput(ndp.ni_vp);
467 		return (error);
468 	}
469 
470 	/*
471 	 * New mount
472 	 *
473 	 * We need the name for the mount point (also used for
474 	 * "last mounted on") copied in. If an error occurs,
475 	 * the mount point is discarded by the upper level code.
476 	 * Note that vfs_mount_alloc() populates f_mntonname for us.
477 	 */
478 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
479 		if ((error = ffs_mountfs(ndp.ni_vp, mp, td)) != 0) {
480 			vrele(ndp.ni_vp);
481 			return (error);
482 		}
483 	} else {
484 		/*
485 		 * When updating, check whether changing from read-only to
486 		 * read/write; if there is no device name, that's all we do.
487 		 */
488 		ump = VFSTOUFS(mp);
489 		fs = ump->um_fs;
490 		odevvp = ump->um_odevvp;
491 		devvp = ump->um_devvp;
492 
493 		/*
494 		 * If it's not the same vnode, or at least the same device
495 		 * then it's not correct.
496 		 */
497 		if (ndp.ni_vp->v_rdev != ump->um_odevvp->v_rdev)
498 			error = EINVAL; /* needs translation */
499 		vput(ndp.ni_vp);
500 		if (error)
501 			return (error);
502 		if (fs->fs_ronly == 0 &&
503 		    vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
504 			/*
505 			 * Flush any dirty data and suspend filesystem.
506 			 */
507 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
508 				return (error);
509 			error = vfs_write_suspend_umnt(mp);
510 			if (error != 0)
511 				return (error);
512 
513 			fs->fs_ronly = 1;
514 			if (MOUNTEDSOFTDEP(mp)) {
515 				MNT_ILOCK(mp);
516 				mp->mnt_flag &= ~MNT_SOFTDEP;
517 				MNT_IUNLOCK(mp);
518 				mounted_softdep = true;
519 			} else
520 				mounted_softdep = false;
521 
522 			/*
523 			 * Check for and optionally get rid of files open
524 			 * for writing.
525 			 */
526 			flags = WRITECLOSE;
527 			if (mp->mnt_flag & MNT_FORCE)
528 				flags |= FORCECLOSE;
529 			if (mounted_softdep) {
530 				error = softdep_flushfiles(mp, flags, td);
531 			} else {
532 				error = ffs_flushfiles(mp, flags, td);
533 			}
534 			if (error) {
535 				fs->fs_ronly = 0;
536 				if (mounted_softdep) {
537 					MNT_ILOCK(mp);
538 					mp->mnt_flag |= MNT_SOFTDEP;
539 					MNT_IUNLOCK(mp);
540 				}
541 				vfs_write_resume(mp, 0);
542 				return (error);
543 			}
544 
545 			if (fs->fs_pendingblocks != 0 ||
546 			    fs->fs_pendinginodes != 0) {
547 				printf("WARNING: %s Update error: blocks %jd "
548 				    "files %d\n", fs->fs_fsmnt,
549 				    (intmax_t)fs->fs_pendingblocks,
550 				    fs->fs_pendinginodes);
551 				fs->fs_pendingblocks = 0;
552 				fs->fs_pendinginodes = 0;
553 			}
554 			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
555 				fs->fs_clean = 1;
556 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
557 				fs->fs_ronly = 0;
558 				fs->fs_clean = 0;
559 				if (mounted_softdep) {
560 					MNT_ILOCK(mp);
561 					mp->mnt_flag |= MNT_SOFTDEP;
562 					MNT_IUNLOCK(mp);
563 				}
564 				vfs_write_resume(mp, 0);
565 				return (error);
566 			}
567 			if (mounted_softdep)
568 				softdep_unmount(mp);
569 			g_topology_lock();
570 			/*
571 			 * Drop our write and exclusive access.
572 			 */
573 			g_access(ump->um_cp, 0, -1, -1);
574 			g_topology_unlock();
575 			MNT_ILOCK(mp);
576 			mp->mnt_flag |= MNT_RDONLY;
577 			MNT_IUNLOCK(mp);
578 			/*
579 			 * Allow the writers to note that filesystem
580 			 * is ro now.
581 			 */
582 			vfs_write_resume(mp, 0);
583 		}
584 		if ((mp->mnt_flag & MNT_RELOAD) &&
585 		    (error = ffs_reload(mp, 0)) != 0)
586 			return (error);
587 		if (fs->fs_ronly &&
588 		    !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
589 			/*
590 			 * If upgrade to read-write by non-root, then verify
591 			 * that user has necessary permissions on the device.
592 			 */
593 			vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY);
594 			error = VOP_ACCESS(odevvp, VREAD | VWRITE,
595 			    td->td_ucred, td);
596 			if (error)
597 				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
598 			VOP_UNLOCK(odevvp);
599 			if (error) {
600 				return (error);
601 			}
602 			fs->fs_flags &= ~FS_UNCLEAN;
603 			if (fs->fs_clean == 0) {
604 				fs->fs_flags |= FS_UNCLEAN;
605 				if ((mp->mnt_flag & MNT_FORCE) ||
606 				    ((fs->fs_flags &
607 				     (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
608 				     (fs->fs_flags & FS_DOSOFTDEP))) {
609 					printf("WARNING: %s was not properly "
610 					   "dismounted\n",
611 					   mp->mnt_stat.f_mntonname);
612 				} else {
613 					vfs_mount_error(mp,
614 					   "R/W mount of %s denied. %s.%s",
615 					   mp->mnt_stat.f_mntonname,
616 					   "Filesystem is not clean - run fsck",
617 					   (fs->fs_flags & FS_SUJ) == 0 ? "" :
618 					   " Forced mount will invalidate"
619 					   " journal contents");
620 					return (EPERM);
621 				}
622 			}
623 			g_topology_lock();
624 			/*
625 			 * Request exclusive write access.
626 			 */
627 			error = g_access(ump->um_cp, 0, 1, 1);
628 			g_topology_unlock();
629 			if (error)
630 				return (error);
631 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
632 				return (error);
633 			error = vfs_write_suspend_umnt(mp);
634 			if (error != 0)
635 				return (error);
636 			fs->fs_ronly = 0;
637 			MNT_ILOCK(mp);
638 			saved_mnt_flag = MNT_RDONLY;
639 			if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag &
640 			    MNT_ASYNC) != 0)
641 				saved_mnt_flag |= MNT_ASYNC;
642 			mp->mnt_flag &= ~saved_mnt_flag;
643 			MNT_IUNLOCK(mp);
644 			fs->fs_mtime = time_second;
645 			/* check to see if we need to start softdep */
646 			if ((fs->fs_flags & FS_DOSOFTDEP) &&
647 			    (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
648 				fs->fs_ronly = 1;
649 				MNT_ILOCK(mp);
650 				mp->mnt_flag |= saved_mnt_flag;
651 				MNT_IUNLOCK(mp);
652 				vfs_write_resume(mp, 0);
653 				return (error);
654 			}
655 			fs->fs_clean = 0;
656 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
657 				fs->fs_ronly = 1;
658 				if ((fs->fs_flags & FS_DOSOFTDEP) != 0)
659 					softdep_unmount(mp);
660 				MNT_ILOCK(mp);
661 				mp->mnt_flag |= saved_mnt_flag;
662 				MNT_IUNLOCK(mp);
663 				vfs_write_resume(mp, 0);
664 				return (error);
665 			}
666 			if (fs->fs_snapinum[0] != 0)
667 				ffs_snapshot_mount(mp);
668 			vfs_write_resume(mp, 0);
669 		}
670 		/*
671 		 * Soft updates is incompatible with "async",
672 		 * so if we are doing softupdates stop the user
673 		 * from setting the async flag in an update.
674 		 * Softdep_mount() clears it in an initial mount
675 		 * or ro->rw remount.
676 		 */
677 		if (MOUNTEDSOFTDEP(mp)) {
678 			/* XXX: Reset too late ? */
679 			MNT_ILOCK(mp);
680 			mp->mnt_flag &= ~MNT_ASYNC;
681 			MNT_IUNLOCK(mp);
682 		}
683 		/*
684 		 * Keep MNT_ACLS flag if it is stored in superblock.
685 		 */
686 		if ((fs->fs_flags & FS_ACLS) != 0) {
687 			/* XXX: Set too late ? */
688 			MNT_ILOCK(mp);
689 			mp->mnt_flag |= MNT_ACLS;
690 			MNT_IUNLOCK(mp);
691 		}
692 
693 		if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
694 			/* XXX: Set too late ? */
695 			MNT_ILOCK(mp);
696 			mp->mnt_flag |= MNT_NFS4ACLS;
697 			MNT_IUNLOCK(mp);
698 		}
699 
700 	}
701 
702 	MNT_ILOCK(mp);
703 	/*
704 	 * This is racy versus lookup, see ufs_fplookup_vexec for details.
705 	 */
706 	if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0)
707 		panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp);
708 	if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0)
709 		mp->mnt_kern_flag |= MNTK_FPLOOKUP;
710 	MNT_IUNLOCK(mp);
711 
712 	vfs_mountedfrom(mp, fspec);
713 	return (0);
714 }
715 
716 /*
717  * Compatibility with old mount system call.
718  */
719 
720 static int
721 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
722 {
723 	struct ufs_args args;
724 	int error;
725 
726 	if (data == NULL)
727 		return (EINVAL);
728 	error = copyin(data, &args, sizeof args);
729 	if (error)
730 		return (error);
731 
732 	ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
733 	ma = mount_arg(ma, "export", &args.export, sizeof(args.export));
734 	error = kernel_mount(ma, flags);
735 
736 	return (error);
737 }
738 
739 /*
740  * Reload all incore data for a filesystem (used after running fsck on
741  * the root filesystem and finding things to fix). If the 'force' flag
742  * is 0, the filesystem must be mounted read-only.
743  *
744  * Things to do to update the mount:
745  *	1) invalidate all cached meta-data.
746  *	2) re-read superblock from disk.
747  *	3) re-read summary information from disk.
748  *	4) invalidate all inactive vnodes.
749  *	5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary
750  *	   writers, if requested.
751  *	6) invalidate all cached file data.
752  *	7) re-read inode data for all active vnodes.
753  */
754 int
755 ffs_reload(struct mount *mp, int flags)
756 {
757 	struct vnode *vp, *mvp, *devvp;
758 	struct inode *ip;
759 	void *space;
760 	struct buf *bp;
761 	struct fs *fs, *newfs;
762 	struct ufsmount *ump;
763 	ufs2_daddr_t sblockloc;
764 	int i, blks, error;
765 	uint64_t size;
766 	int32_t *lp;
767 
768 	ump = VFSTOUFS(mp);
769 
770 	MNT_ILOCK(mp);
771 	if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) {
772 		MNT_IUNLOCK(mp);
773 		return (EINVAL);
774 	}
775 	MNT_IUNLOCK(mp);
776 
777 	/*
778 	 * Step 1: invalidate all cached meta-data.
779 	 */
780 	devvp = VFSTOUFS(mp)->um_devvp;
781 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
782 	if (vinvalbuf(devvp, 0, 0, 0) != 0)
783 		panic("ffs_reload: dirty1");
784 	VOP_UNLOCK(devvp);
785 
786 	/*
787 	 * Step 2: re-read superblock from disk.
788 	 */
789 	fs = VFSTOUFS(mp)->um_fs;
790 	if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
791 	    NOCRED, &bp)) != 0)
792 		return (error);
793 	newfs = (struct fs *)bp->b_data;
794 	if ((newfs->fs_magic != FS_UFS1_MAGIC &&
795 	     newfs->fs_magic != FS_UFS2_MAGIC) ||
796 	    newfs->fs_bsize > MAXBSIZE ||
797 	    newfs->fs_bsize < sizeof(struct fs)) {
798 			brelse(bp);
799 			return (EINTEGRITY);
800 	}
801 	/*
802 	 * Preserve the summary information, read-only status, and
803 	 * superblock location by copying these fields into our new
804 	 * superblock before using it to update the existing superblock.
805 	 */
806 	newfs->fs_si = fs->fs_si;
807 	newfs->fs_ronly = fs->fs_ronly;
808 	sblockloc = fs->fs_sblockloc;
809 	bcopy(newfs, fs, (uint64_t)fs->fs_sbsize);
810 	brelse(bp);
811 	ump->um_bsize = fs->fs_bsize;
812 	ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
813 	ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc);
814 	UFS_LOCK(ump);
815 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
816 		printf("WARNING: %s: reload pending error: blocks %jd "
817 		    "files %d\n", mp->mnt_stat.f_mntonname,
818 		    (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
819 		fs->fs_pendingblocks = 0;
820 		fs->fs_pendinginodes = 0;
821 	}
822 	UFS_UNLOCK(ump);
823 
824 	/*
825 	 * Step 3: re-read summary information from disk.
826 	 */
827 	size = fs->fs_cssize;
828 	blks = howmany(size, fs->fs_fsize);
829 	if (fs->fs_contigsumsize > 0)
830 		size += fs->fs_ncg * sizeof(int32_t);
831 	size += fs->fs_ncg * sizeof(uint8_t);
832 	free(fs->fs_csp, M_UFSMNT);
833 	space = malloc(size, M_UFSMNT, M_WAITOK);
834 	fs->fs_csp = space;
835 	for (i = 0; i < blks; i += fs->fs_frag) {
836 		size = fs->fs_bsize;
837 		if (i + fs->fs_frag > blks)
838 			size = (blks - i) * fs->fs_fsize;
839 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
840 		    NOCRED, &bp);
841 		if (error)
842 			return (error);
843 		bcopy(bp->b_data, space, (uint64_t)size);
844 		space = (char *)space + size;
845 		brelse(bp);
846 	}
847 	/*
848 	 * We no longer know anything about clusters per cylinder group.
849 	 */
850 	if (fs->fs_contigsumsize > 0) {
851 		fs->fs_maxcluster = lp = space;
852 		for (i = 0; i < fs->fs_ncg; i++)
853 			*lp++ = fs->fs_contigsumsize;
854 		space = lp;
855 	}
856 	size = fs->fs_ncg * sizeof(uint8_t);
857 	fs->fs_contigdirs = (uint8_t *)space;
858 	bzero(fs->fs_contigdirs, size);
859 	if ((flags & FFSR_UNSUSPEND) != 0) {
860 		MNT_ILOCK(mp);
861 		mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
862 		wakeup(&mp->mnt_flag);
863 		MNT_IUNLOCK(mp);
864 	}
865 
866 loop:
867 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
868 		/*
869 		 * Skip syncer vnode.
870 		 */
871 		if (vp->v_type == VNON) {
872 			VI_UNLOCK(vp);
873 			continue;
874 		}
875 		/*
876 		 * Step 4: invalidate all cached file data.
877 		 */
878 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
879 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
880 			goto loop;
881 		}
882 		if (vinvalbuf(vp, 0, 0, 0))
883 			panic("ffs_reload: dirty2");
884 		/*
885 		 * Step 5: re-read inode data for all active vnodes.
886 		 */
887 		ip = VTOI(vp);
888 		error =
889 		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
890 		    (int)fs->fs_bsize, NOCRED, &bp);
891 		if (error) {
892 			vput(vp);
893 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
894 			return (error);
895 		}
896 		if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) {
897 			brelse(bp);
898 			vput(vp);
899 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
900 			return (error);
901 		}
902 		ip->i_effnlink = ip->i_nlink;
903 		brelse(bp);
904 		vput(vp);
905 	}
906 	return (0);
907 }
908 
909 /*
910  * Common code for mount and mountroot
911  */
912 static int
913 ffs_mountfs(struct vnode *odevvp, struct mount *mp, struct thread *td)
914 {
915 	struct ufsmount *ump;
916 	struct fs *fs;
917 	struct cdev *dev;
918 	int error, i, len, ronly;
919 	struct ucred *cred;
920 	struct g_consumer *cp;
921 	struct mount *nmp;
922 	struct vnode *devvp;
923 	int candelete, canspeedup;
924 
925 	fs = NULL;
926 	ump = NULL;
927 	cred = td ? td->td_ucred : NOCRED;
928 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
929 
930 	devvp = mntfs_allocvp(mp, odevvp);
931 	KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
932 	dev = devvp->v_rdev;
933 	KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data"));
934 	if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
935 	    (uintptr_t)mp) == 0) {
936 		mntfs_freevp(devvp);
937 		return (EBUSY);
938 	}
939 	g_topology_lock();
940 	error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
941 	g_topology_unlock();
942 	if (error != 0) {
943 		atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
944 		mntfs_freevp(devvp);
945 		return (error);
946 	}
947 	dev_ref(dev);
948 	devvp->v_bufobj.bo_ops = &ffs_ops;
949 	BO_LOCK(&odevvp->v_bufobj);
950 	odevvp->v_bufobj.bo_flag |= BO_NOBUFS;
951 	BO_UNLOCK(&odevvp->v_bufobj);
952 	VOP_UNLOCK(devvp);
953 	if (dev->si_iosize_max != 0)
954 		mp->mnt_iosize_max = dev->si_iosize_max;
955 	if (mp->mnt_iosize_max > maxphys)
956 		mp->mnt_iosize_max = maxphys;
957 	if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
958 		error = EINVAL;
959 		vfs_mount_error(mp,
960 		    "Invalid sectorsize %d for superblock size %d",
961 		    cp->provider->sectorsize, SBLOCKSIZE);
962 		goto out;
963 	}
964 	/* fetch the superblock and summary information */
965 	if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0)
966 		error = ffs_sbsearch(devvp, &fs, 0, M_UFSMNT, ffs_use_bread);
967 	else
968 		error = ffs_sbget(devvp, &fs, UFS_STDSB, 0, M_UFSMNT,
969 		    ffs_use_bread);
970 	if (error != 0)
971 		goto out;
972 	fs->fs_flags &= ~FS_UNCLEAN;
973 	if (fs->fs_clean == 0) {
974 		fs->fs_flags |= FS_UNCLEAN;
975 		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
976 		    ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
977 		     (fs->fs_flags & FS_DOSOFTDEP))) {
978 			printf("WARNING: %s was not properly dismounted\n",
979 			    mp->mnt_stat.f_mntonname);
980 		} else {
981 			vfs_mount_error(mp, "R/W mount on %s denied. "
982 			    "Filesystem is not clean - run fsck.%s",
983 			    mp->mnt_stat.f_mntonname,
984 			    (fs->fs_flags & FS_SUJ) == 0 ? "" :
985 			    " Forced mount will invalidate journal contents");
986 			error = EPERM;
987 			goto out;
988 		}
989 		if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
990 		    (mp->mnt_flag & MNT_FORCE)) {
991 			printf("WARNING: %s: lost blocks %jd files %d\n",
992 			    mp->mnt_stat.f_mntonname,
993 			    (intmax_t)fs->fs_pendingblocks,
994 			    fs->fs_pendinginodes);
995 			fs->fs_pendingblocks = 0;
996 			fs->fs_pendinginodes = 0;
997 		}
998 	}
999 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1000 		printf("WARNING: %s: mount pending error: blocks %jd "
1001 		    "files %d\n", mp->mnt_stat.f_mntonname,
1002 		    (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
1003 		fs->fs_pendingblocks = 0;
1004 		fs->fs_pendinginodes = 0;
1005 	}
1006 	if ((fs->fs_flags & FS_GJOURNAL) != 0) {
1007 #ifdef UFS_GJOURNAL
1008 		/*
1009 		 * Get journal provider name.
1010 		 */
1011 		len = 1024;
1012 		mp->mnt_gjprovider = malloc((uint64_t)len, M_UFSMNT, M_WAITOK);
1013 		if (g_io_getattr("GJOURNAL::provider", cp, &len,
1014 		    mp->mnt_gjprovider) == 0) {
1015 			mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len,
1016 			    M_UFSMNT, M_WAITOK);
1017 			MNT_ILOCK(mp);
1018 			mp->mnt_flag |= MNT_GJOURNAL;
1019 			MNT_IUNLOCK(mp);
1020 		} else {
1021 			if ((mp->mnt_flag & MNT_RDONLY) == 0)
1022 				printf("WARNING: %s: GJOURNAL flag on fs "
1023 				    "but no gjournal provider below\n",
1024 				    mp->mnt_stat.f_mntonname);
1025 			free(mp->mnt_gjprovider, M_UFSMNT);
1026 			mp->mnt_gjprovider = NULL;
1027 		}
1028 #else
1029 		printf("WARNING: %s: GJOURNAL flag on fs but no "
1030 		    "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
1031 #endif
1032 	} else {
1033 		mp->mnt_gjprovider = NULL;
1034 	}
1035 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
1036 	ump->um_cp = cp;
1037 	ump->um_bo = &devvp->v_bufobj;
1038 	ump->um_fs = fs;
1039 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1040 		ump->um_fstype = UFS1;
1041 		ump->um_balloc = ffs_balloc_ufs1;
1042 	} else {
1043 		ump->um_fstype = UFS2;
1044 		ump->um_balloc = ffs_balloc_ufs2;
1045 	}
1046 	ump->um_blkatoff = ffs_blkatoff;
1047 	ump->um_truncate = ffs_truncate;
1048 	ump->um_update = ffs_update;
1049 	ump->um_valloc = ffs_valloc;
1050 	ump->um_vfree = ffs_vfree;
1051 	ump->um_ifree = ffs_ifree;
1052 	ump->um_rdonly = ffs_rdonly;
1053 	ump->um_snapgone = ffs_snapgone;
1054 	if ((mp->mnt_flag & MNT_UNTRUSTED) != 0)
1055 		ump->um_check_blkno = ffs_check_blkno;
1056 	else
1057 		ump->um_check_blkno = NULL;
1058 	mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
1059 	sx_init(&ump->um_checkpath_lock, "uchpth");
1060 	ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc);
1061 	fs->fs_ronly = ronly;
1062 	fs->fs_active = NULL;
1063 	mp->mnt_data = ump;
1064 	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
1065 	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
1066 	nmp = NULL;
1067 	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
1068 	    (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
1069 		if (nmp)
1070 			vfs_rel(nmp);
1071 		vfs_getnewfsid(mp);
1072 	}
1073 	ump->um_bsize = fs->fs_bsize;
1074 	ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1075 	MNT_ILOCK(mp);
1076 	mp->mnt_flag |= MNT_LOCAL;
1077 	MNT_IUNLOCK(mp);
1078 	if ((fs->fs_flags & FS_MULTILABEL) != 0) {
1079 #ifdef MAC
1080 		MNT_ILOCK(mp);
1081 		mp->mnt_flag |= MNT_MULTILABEL;
1082 		MNT_IUNLOCK(mp);
1083 #else
1084 		printf("WARNING: %s: multilabel flag on fs but "
1085 		    "no MAC support\n", mp->mnt_stat.f_mntonname);
1086 #endif
1087 	}
1088 	if ((fs->fs_flags & FS_ACLS) != 0) {
1089 #ifdef UFS_ACL
1090 		MNT_ILOCK(mp);
1091 
1092 		if (mp->mnt_flag & MNT_NFS4ACLS)
1093 			printf("WARNING: %s: ACLs flag on fs conflicts with "
1094 			    "\"nfsv4acls\" mount option; option ignored\n",
1095 			    mp->mnt_stat.f_mntonname);
1096 		mp->mnt_flag &= ~MNT_NFS4ACLS;
1097 		mp->mnt_flag |= MNT_ACLS;
1098 
1099 		MNT_IUNLOCK(mp);
1100 #else
1101 		printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
1102 		    mp->mnt_stat.f_mntonname);
1103 #endif
1104 	}
1105 	if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
1106 #ifdef UFS_ACL
1107 		MNT_ILOCK(mp);
1108 
1109 		if (mp->mnt_flag & MNT_ACLS)
1110 			printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
1111 			    "with \"acls\" mount option; option ignored\n",
1112 			    mp->mnt_stat.f_mntonname);
1113 		mp->mnt_flag &= ~MNT_ACLS;
1114 		mp->mnt_flag |= MNT_NFS4ACLS;
1115 
1116 		MNT_IUNLOCK(mp);
1117 #else
1118 		printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
1119 		    "ACLs support\n", mp->mnt_stat.f_mntonname);
1120 #endif
1121 	}
1122 	if ((fs->fs_flags & FS_TRIM) != 0) {
1123 		len = sizeof(int);
1124 		if (g_io_getattr("GEOM::candelete", cp, &len,
1125 		    &candelete) == 0) {
1126 			if (candelete)
1127 				ump->um_flags |= UM_CANDELETE;
1128 			else
1129 				printf("WARNING: %s: TRIM flag on fs but disk "
1130 				    "does not support TRIM\n",
1131 				    mp->mnt_stat.f_mntonname);
1132 		} else {
1133 			printf("WARNING: %s: TRIM flag on fs but disk does "
1134 			    "not confirm that it supports TRIM\n",
1135 			    mp->mnt_stat.f_mntonname);
1136 		}
1137 		if (((ump->um_flags) & UM_CANDELETE) != 0) {
1138 			ump->um_trim_tq = taskqueue_create("trim", M_WAITOK,
1139 			    taskqueue_thread_enqueue, &ump->um_trim_tq);
1140 			taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS,
1141 			    "%s trim", mp->mnt_stat.f_mntonname);
1142 			ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM,
1143 			    &ump->um_trimlisthashsize);
1144 		}
1145 	}
1146 
1147 	len = sizeof(int);
1148 	if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) {
1149 		if (canspeedup)
1150 			ump->um_flags |= UM_CANSPEEDUP;
1151 	}
1152 
1153 	ump->um_mountp = mp;
1154 	ump->um_dev = dev;
1155 	ump->um_devvp = devvp;
1156 	ump->um_odevvp = odevvp;
1157 	ump->um_nindir = fs->fs_nindir;
1158 	ump->um_bptrtodb = fs->fs_fsbtodb;
1159 	ump->um_seqinc = fs->fs_frag;
1160 	for (i = 0; i < MAXQUOTAS; i++)
1161 		ump->um_quotas[i] = NULLVP;
1162 #ifdef UFS_EXTATTR
1163 	ufs_extattr_uepm_init(&ump->um_extattr);
1164 #endif
1165 	/*
1166 	 * Set FS local "last mounted on" information (NULL pad)
1167 	 */
1168 	bzero(fs->fs_fsmnt, MAXMNTLEN);
1169 	strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1170 	mp->mnt_stat.f_iosize = fs->fs_bsize;
1171 
1172 	if (mp->mnt_flag & MNT_ROOTFS) {
1173 		/*
1174 		 * Root mount; update timestamp in mount structure.
1175 		 * this will be used by the common root mount code
1176 		 * to update the system clock.
1177 		 */
1178 		mp->mnt_time = fs->fs_time;
1179 	}
1180 
1181 	if (ronly == 0) {
1182 		fs->fs_mtime = time_second;
1183 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
1184 		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1185 			ffs_flushfiles(mp, FORCECLOSE, td);
1186 			goto out;
1187 		}
1188 		if (fs->fs_snapinum[0] != 0)
1189 			ffs_snapshot_mount(mp);
1190 		fs->fs_fmod = 1;
1191 		fs->fs_clean = 0;
1192 		(void) ffs_sbupdate(ump, MNT_WAIT, 0);
1193 	}
1194 	/*
1195 	 * Initialize filesystem state information in mount struct.
1196 	 */
1197 	MNT_ILOCK(mp);
1198 	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1199 	    MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE;
1200 	MNT_IUNLOCK(mp);
1201 #ifdef UFS_EXTATTR
1202 #ifdef UFS_EXTATTR_AUTOSTART
1203 	/*
1204 	 *
1205 	 * Auto-starting does the following:
1206 	 *	- check for /.attribute in the fs, and extattr_start if so
1207 	 *	- for each file in .attribute, enable that file with
1208 	 * 	  an attribute of the same name.
1209 	 * Not clear how to report errors -- probably eat them.
1210 	 * This would all happen while the filesystem was busy/not
1211 	 * available, so would effectively be "atomic".
1212 	 */
1213 	(void) ufs_extattr_autostart(mp, td);
1214 #endif /* !UFS_EXTATTR_AUTOSTART */
1215 #endif /* !UFS_EXTATTR */
1216 	return (0);
1217 out:
1218 	if (fs != NULL) {
1219 		free(fs->fs_csp, M_UFSMNT);
1220 		free(fs->fs_si, M_UFSMNT);
1221 		free(fs, M_UFSMNT);
1222 	}
1223 	if (cp != NULL) {
1224 		g_topology_lock();
1225 		g_vfs_close(cp);
1226 		g_topology_unlock();
1227 	}
1228 	if (ump != NULL) {
1229 		mtx_destroy(UFS_MTX(ump));
1230 		sx_destroy(&ump->um_checkpath_lock);
1231 		if (mp->mnt_gjprovider != NULL) {
1232 			free(mp->mnt_gjprovider, M_UFSMNT);
1233 			mp->mnt_gjprovider = NULL;
1234 		}
1235 		MPASS(ump->um_softdep == NULL);
1236 		free(ump, M_UFSMNT);
1237 		mp->mnt_data = NULL;
1238 	}
1239 	BO_LOCK(&odevvp->v_bufobj);
1240 	odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1241 	BO_UNLOCK(&odevvp->v_bufobj);
1242 	atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
1243 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1244 	mntfs_freevp(devvp);
1245 	dev_rel(dev);
1246 	return (error);
1247 }
1248 
1249 /*
1250  * A read function for use by filesystem-layer routines.
1251  */
1252 static int
1253 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size)
1254 {
1255 	struct buf *bp;
1256 	int error;
1257 
1258 	KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp));
1259 	*bufp = malloc(size, M_UFSMNT, M_WAITOK);
1260 	if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED,
1261 	    &bp)) != 0)
1262 		return (error);
1263 	bcopy(bp->b_data, *bufp, size);
1264 	bp->b_flags |= B_INVAL | B_NOCACHE;
1265 	brelse(bp);
1266 	return (0);
1267 }
1268 
1269 static int bigcgs = 0;
1270 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
1271 
1272 /*
1273  * Sanity checks for loading old filesystem superblocks.
1274  * See ffs_oldfscompat_write below for unwound actions.
1275  *
1276  * XXX - Parts get retired eventually.
1277  * Unfortunately new bits get added.
1278  */
1279 static void
1280 ffs_oldfscompat_read(struct fs *fs,
1281 	struct ufsmount *ump,
1282 	ufs2_daddr_t sblockloc)
1283 {
1284 	off_t maxfilesize;
1285 
1286 	/*
1287 	 * If not yet done, update fs_flags location and value of fs_sblockloc.
1288 	 */
1289 	if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
1290 		fs->fs_flags = fs->fs_old_flags;
1291 		fs->fs_old_flags |= FS_FLAGS_UPDATED;
1292 		fs->fs_sblockloc = sblockloc;
1293 	}
1294 	/*
1295 	 * If not yet done, update UFS1 superblock with new wider fields.
1296 	 */
1297 	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
1298 		fs->fs_maxbsize = fs->fs_bsize;
1299 		fs->fs_time = fs->fs_old_time;
1300 		fs->fs_size = fs->fs_old_size;
1301 		fs->fs_dsize = fs->fs_old_dsize;
1302 		fs->fs_csaddr = fs->fs_old_csaddr;
1303 		fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1304 		fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1305 		fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1306 		fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1307 	}
1308 	if (fs->fs_magic == FS_UFS1_MAGIC &&
1309 	    fs->fs_old_inodefmt < FS_44INODEFMT) {
1310 		fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1;
1311 		fs->fs_qbmask = ~fs->fs_bmask;
1312 		fs->fs_qfmask = ~fs->fs_fmask;
1313 	}
1314 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1315 		ump->um_savedmaxfilesize = fs->fs_maxfilesize;
1316 		maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1;
1317 		if (fs->fs_maxfilesize > maxfilesize)
1318 			fs->fs_maxfilesize = maxfilesize;
1319 	}
1320 	/* Compatibility for old filesystems */
1321 	if (fs->fs_avgfilesize <= 0)
1322 		fs->fs_avgfilesize = AVFILESIZ;
1323 	if (fs->fs_avgfpdir <= 0)
1324 		fs->fs_avgfpdir = AFPDIR;
1325 	if (bigcgs) {
1326 		fs->fs_save_cgsize = fs->fs_cgsize;
1327 		fs->fs_cgsize = fs->fs_bsize;
1328 	}
1329 }
1330 
1331 /*
1332  * Unwinding superblock updates for old filesystems.
1333  * See ffs_oldfscompat_read above for details.
1334  *
1335  * XXX - Parts get retired eventually.
1336  * Unfortunately new bits get added.
1337  */
1338 void
1339 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1340 {
1341 
1342 	/*
1343 	 * Copy back UFS2 updated fields that UFS1 inspects.
1344 	 */
1345 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1346 		fs->fs_old_time = fs->fs_time;
1347 		fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1348 		fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1349 		fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1350 		fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1351 		fs->fs_maxfilesize = ump->um_savedmaxfilesize;
1352 	}
1353 	if (bigcgs) {
1354 		fs->fs_cgsize = fs->fs_save_cgsize;
1355 		fs->fs_save_cgsize = 0;
1356 	}
1357 }
1358 
1359 /*
1360  * unmount system call
1361  */
1362 static int
1363 ffs_unmount(struct mount *mp, int mntflags)
1364 {
1365 	struct thread *td;
1366 	struct ufsmount *ump = VFSTOUFS(mp);
1367 	struct fs *fs;
1368 	int error, flags, susp;
1369 #ifdef UFS_EXTATTR
1370 	int e_restart;
1371 #endif
1372 
1373 	flags = 0;
1374 	td = curthread;
1375 	fs = ump->um_fs;
1376 	if (mntflags & MNT_FORCE)
1377 		flags |= FORCECLOSE;
1378 	susp = fs->fs_ronly == 0;
1379 #ifdef UFS_EXTATTR
1380 	if ((error = ufs_extattr_stop(mp, td))) {
1381 		if (error != EOPNOTSUPP)
1382 			printf("WARNING: unmount %s: ufs_extattr_stop "
1383 			    "returned errno %d\n", mp->mnt_stat.f_mntonname,
1384 			    error);
1385 		e_restart = 0;
1386 	} else {
1387 		ufs_extattr_uepm_destroy(&ump->um_extattr);
1388 		e_restart = 1;
1389 	}
1390 #endif
1391 	if (susp) {
1392 		error = vfs_write_suspend_umnt(mp);
1393 		if (error != 0)
1394 			goto fail1;
1395 	}
1396 	if (MOUNTEDSOFTDEP(mp))
1397 		error = softdep_flushfiles(mp, flags, td);
1398 	else
1399 		error = ffs_flushfiles(mp, flags, td);
1400 	if (error != 0 && !ffs_fsfail_cleanup(ump, error))
1401 		goto fail;
1402 
1403 	UFS_LOCK(ump);
1404 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1405 		printf("WARNING: unmount %s: pending error: blocks %jd "
1406 		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1407 		    fs->fs_pendinginodes);
1408 		fs->fs_pendingblocks = 0;
1409 		fs->fs_pendinginodes = 0;
1410 	}
1411 	UFS_UNLOCK(ump);
1412 	if (MOUNTEDSOFTDEP(mp))
1413 		softdep_unmount(mp);
1414 	MPASS(ump->um_softdep == NULL);
1415 	if (fs->fs_ronly == 0) {
1416 		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1417 		error = ffs_sbupdate(ump, MNT_WAIT, 0);
1418 		if (ffs_fsfail_cleanup(ump, error))
1419 			error = 0;
1420 		if (error != 0 && !ffs_fsfail_cleanup(ump, error)) {
1421 			fs->fs_clean = 0;
1422 			goto fail;
1423 		}
1424 	}
1425 	if (susp)
1426 		vfs_write_resume(mp, VR_START_WRITE);
1427 	if (ump->um_trim_tq != NULL) {
1428 		MPASS(ump->um_trim_inflight == 0);
1429 		taskqueue_free(ump->um_trim_tq);
1430 		free (ump->um_trimhash, M_TRIM);
1431 	}
1432 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1433 	g_topology_lock();
1434 	g_vfs_close(ump->um_cp);
1435 	g_topology_unlock();
1436 	BO_LOCK(&ump->um_odevvp->v_bufobj);
1437 	ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1438 	BO_UNLOCK(&ump->um_odevvp->v_bufobj);
1439 	atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
1440 	mntfs_freevp(ump->um_devvp);
1441 	vrele(ump->um_odevvp);
1442 	dev_rel(ump->um_dev);
1443 	mtx_destroy(UFS_MTX(ump));
1444 	sx_destroy(&ump->um_checkpath_lock);
1445 	if (mp->mnt_gjprovider != NULL) {
1446 		free(mp->mnt_gjprovider, M_UFSMNT);
1447 		mp->mnt_gjprovider = NULL;
1448 	}
1449 	free(fs->fs_csp, M_UFSMNT);
1450 	free(fs->fs_si, M_UFSMNT);
1451 	free(fs, M_UFSMNT);
1452 	free(ump, M_UFSMNT);
1453 	mp->mnt_data = NULL;
1454 	if (td->td_su == mp) {
1455 		td->td_su = NULL;
1456 		vfs_rel(mp);
1457 	}
1458 	return (error);
1459 
1460 fail:
1461 	if (susp)
1462 		vfs_write_resume(mp, VR_START_WRITE);
1463 fail1:
1464 #ifdef UFS_EXTATTR
1465 	if (e_restart) {
1466 		ufs_extattr_uepm_init(&ump->um_extattr);
1467 #ifdef UFS_EXTATTR_AUTOSTART
1468 		(void) ufs_extattr_autostart(mp, td);
1469 #endif
1470 	}
1471 #endif
1472 
1473 	return (error);
1474 }
1475 
1476 /*
1477  * Flush out all the files in a filesystem.
1478  */
1479 int
1480 ffs_flushfiles(struct mount *mp, int flags, struct thread *td)
1481 {
1482 	struct ufsmount *ump;
1483 	int qerror, error;
1484 
1485 	ump = VFSTOUFS(mp);
1486 	qerror = 0;
1487 #ifdef QUOTA
1488 	if (mp->mnt_flag & MNT_QUOTA) {
1489 		int i;
1490 		error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1491 		if (error)
1492 			return (error);
1493 		for (i = 0; i < MAXQUOTAS; i++) {
1494 			error = quotaoff(td, mp, i);
1495 			if (error != 0) {
1496 				if ((flags & EARLYFLUSH) == 0)
1497 					return (error);
1498 				else
1499 					qerror = error;
1500 			}
1501 		}
1502 
1503 		/*
1504 		 * Here we fall through to vflush again to ensure that
1505 		 * we have gotten rid of all the system vnodes, unless
1506 		 * quotas must not be closed.
1507 		 */
1508 	}
1509 #endif
1510 	/* devvp is not locked there */
1511 	if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1512 		if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1513 			return (error);
1514 		ffs_snapshot_unmount(mp);
1515 		flags |= FORCECLOSE;
1516 		/*
1517 		 * Here we fall through to vflush again to ensure
1518 		 * that we have gotten rid of all the system vnodes.
1519 		 */
1520 	}
1521 
1522 	/*
1523 	 * Do not close system files if quotas were not closed, to be
1524 	 * able to sync the remaining dquots.  The freeblks softupdate
1525 	 * workitems might hold a reference on a dquot, preventing
1526 	 * quotaoff() from completing.  Next round of
1527 	 * softdep_flushworklist() iteration should process the
1528 	 * blockers, allowing the next run of quotaoff() to finally
1529 	 * flush held dquots.
1530 	 *
1531 	 * Otherwise, flush all the files.
1532 	 */
1533 	if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1534 		return (error);
1535 
1536 	/*
1537 	 * If this is a forcible unmount and there were any files that
1538 	 * were unlinked but still open, then vflush() will have
1539 	 * truncated and freed those files, which might have started
1540 	 * some trim work.  Wait here for any trims to complete
1541 	 * and process the blkfrees which follow the trims.
1542 	 * This may create more dirty devvp buffers and softdep deps.
1543 	 */
1544 	if (ump->um_trim_tq != NULL) {
1545 		while (ump->um_trim_inflight != 0)
1546 			pause("ufsutr", hz);
1547 		taskqueue_drain_all(ump->um_trim_tq);
1548 	}
1549 
1550 	/*
1551 	 * Flush filesystem metadata.
1552 	 */
1553 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1554 	error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1555 	VOP_UNLOCK(ump->um_devvp);
1556 	return (error);
1557 }
1558 
1559 /*
1560  * Get filesystem statistics.
1561  */
1562 static int
1563 ffs_statfs(struct mount *mp, struct statfs *sbp)
1564 {
1565 	struct ufsmount *ump;
1566 	struct fs *fs;
1567 
1568 	ump = VFSTOUFS(mp);
1569 	fs = ump->um_fs;
1570 	if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1571 		panic("ffs_statfs");
1572 	sbp->f_version = STATFS_VERSION;
1573 	sbp->f_bsize = fs->fs_fsize;
1574 	sbp->f_iosize = fs->fs_bsize;
1575 	sbp->f_blocks = fs->fs_dsize;
1576 	UFS_LOCK(ump);
1577 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1578 	    fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1579 	sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1580 	    dbtofsb(fs, fs->fs_pendingblocks);
1581 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1582 	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1583 	UFS_UNLOCK(ump);
1584 	sbp->f_namemax = UFS_MAXNAMLEN;
1585 	return (0);
1586 }
1587 
1588 static bool
1589 sync_doupdate(struct inode *ip)
1590 {
1591 
1592 	return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1593 	    IN_UPDATE)) != 0);
1594 }
1595 
1596 static int
1597 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused)
1598 {
1599 	struct inode *ip;
1600 
1601 	/*
1602 	 * Flags are safe to access because ->v_data invalidation
1603 	 * is held off by listmtx.
1604 	 */
1605 	if (vp->v_type == VNON)
1606 		return (false);
1607 	ip = VTOI(vp);
1608 	if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0)
1609 		return (false);
1610 	return (true);
1611 }
1612 
1613 /*
1614  * For a lazy sync, we only care about access times, quotas and the
1615  * superblock.  Other filesystem changes are already converted to
1616  * cylinder group blocks or inode blocks updates and are written to
1617  * disk by syncer.
1618  */
1619 static int
1620 ffs_sync_lazy(struct mount *mp)
1621 {
1622 	struct vnode *mvp, *vp;
1623 	struct inode *ip;
1624 	int allerror, error;
1625 
1626 	allerror = 0;
1627 	if ((mp->mnt_flag & MNT_NOATIME) != 0) {
1628 #ifdef QUOTA
1629 		qsync(mp);
1630 #endif
1631 		goto sbupdate;
1632 	}
1633 	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) {
1634 		if (vp->v_type == VNON) {
1635 			VI_UNLOCK(vp);
1636 			continue;
1637 		}
1638 		ip = VTOI(vp);
1639 
1640 		/*
1641 		 * The IN_ACCESS flag is converted to IN_MODIFIED by
1642 		 * ufs_close() and ufs_getattr() by the calls to
1643 		 * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1644 		 * Test also all the other timestamp flags too, to pick up
1645 		 * any other cases that could be missed.
1646 		 */
1647 		if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) {
1648 			VI_UNLOCK(vp);
1649 			continue;
1650 		}
1651 		if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0)
1652 			continue;
1653 #ifdef QUOTA
1654 		qsyncvp(vp);
1655 #endif
1656 		if (sync_doupdate(ip))
1657 			error = ffs_update(vp, 0);
1658 		if (error != 0)
1659 			allerror = error;
1660 		vput(vp);
1661 	}
1662 sbupdate:
1663 	if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1664 	    (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1665 		allerror = error;
1666 	return (allerror);
1667 }
1668 
1669 /*
1670  * Go through the disk queues to initiate sandbagged IO;
1671  * go through the inodes to write those that have been modified;
1672  * initiate the writing of the super block if it has been modified.
1673  *
1674  * Note: we are always called with the filesystem marked busy using
1675  * vfs_busy().
1676  */
1677 static int
1678 ffs_sync(struct mount *mp, int waitfor)
1679 {
1680 	struct vnode *mvp, *vp, *devvp;
1681 	struct thread *td;
1682 	struct inode *ip;
1683 	struct ufsmount *ump = VFSTOUFS(mp);
1684 	struct fs *fs;
1685 	int error, count, lockreq, allerror = 0;
1686 	int suspend;
1687 	int suspended;
1688 	int secondary_writes;
1689 	int secondary_accwrites;
1690 	int softdep_deps;
1691 	int softdep_accdeps;
1692 	struct bufobj *bo;
1693 
1694 	suspend = 0;
1695 	suspended = 0;
1696 	td = curthread;
1697 	fs = ump->um_fs;
1698 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0)
1699 		panic("%s: ffs_sync: modification on read-only filesystem",
1700 		    fs->fs_fsmnt);
1701 	if (waitfor == MNT_LAZY) {
1702 		if (!rebooting)
1703 			return (ffs_sync_lazy(mp));
1704 		waitfor = MNT_NOWAIT;
1705 	}
1706 
1707 	/*
1708 	 * Write back each (modified) inode.
1709 	 */
1710 	lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1711 	if (waitfor == MNT_SUSPEND) {
1712 		suspend = 1;
1713 		waitfor = MNT_WAIT;
1714 	}
1715 	if (waitfor == MNT_WAIT)
1716 		lockreq = LK_EXCLUSIVE;
1717 	lockreq |= LK_INTERLOCK;
1718 loop:
1719 	/* Grab snapshot of secondary write counts */
1720 	MNT_ILOCK(mp);
1721 	secondary_writes = mp->mnt_secondary_writes;
1722 	secondary_accwrites = mp->mnt_secondary_accwrites;
1723 	MNT_IUNLOCK(mp);
1724 
1725 	/* Grab snapshot of softdep dependency counts */
1726 	softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1727 
1728 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1729 		/*
1730 		 * Depend on the vnode interlock to keep things stable enough
1731 		 * for a quick test.  Since there might be hundreds of
1732 		 * thousands of vnodes, we cannot afford even a subroutine
1733 		 * call unless there's a good chance that we have work to do.
1734 		 */
1735 		if (vp->v_type == VNON) {
1736 			VI_UNLOCK(vp);
1737 			continue;
1738 		}
1739 		ip = VTOI(vp);
1740 		if ((ip->i_flag &
1741 		    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1742 		    vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1743 			VI_UNLOCK(vp);
1744 			continue;
1745 		}
1746 		if ((error = vget(vp, lockreq)) != 0) {
1747 			if (error == ENOENT) {
1748 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1749 				goto loop;
1750 			}
1751 			continue;
1752 		}
1753 #ifdef QUOTA
1754 		qsyncvp(vp);
1755 #endif
1756 		for (;;) {
1757 			error = ffs_syncvnode(vp, waitfor, 0);
1758 			if (error == ERELOOKUP)
1759 				continue;
1760 			if (error != 0)
1761 				allerror = error;
1762 			break;
1763 		}
1764 		vput(vp);
1765 	}
1766 	/*
1767 	 * Force stale filesystem control information to be flushed.
1768 	 */
1769 	if (waitfor == MNT_WAIT || rebooting) {
1770 		if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1771 			allerror = error;
1772 		if (ffs_fsfail_cleanup(ump, allerror))
1773 			allerror = 0;
1774 		/* Flushed work items may create new vnodes to clean */
1775 		if (allerror == 0 && count)
1776 			goto loop;
1777 	}
1778 
1779 	devvp = ump->um_devvp;
1780 	bo = &devvp->v_bufobj;
1781 	BO_LOCK(bo);
1782 	if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1783 		BO_UNLOCK(bo);
1784 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1785 		error = VOP_FSYNC(devvp, waitfor, td);
1786 		VOP_UNLOCK(devvp);
1787 		if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
1788 			error = ffs_sbupdate(ump, waitfor, 0);
1789 		if (error != 0)
1790 			allerror = error;
1791 		if (ffs_fsfail_cleanup(ump, allerror))
1792 			allerror = 0;
1793 		if (allerror == 0 && waitfor == MNT_WAIT)
1794 			goto loop;
1795 	} else if (suspend != 0) {
1796 		if (softdep_check_suspend(mp,
1797 					  devvp,
1798 					  softdep_deps,
1799 					  softdep_accdeps,
1800 					  secondary_writes,
1801 					  secondary_accwrites) != 0) {
1802 			MNT_IUNLOCK(mp);
1803 			goto loop;	/* More work needed */
1804 		}
1805 		mtx_assert(MNT_MTX(mp), MA_OWNED);
1806 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1807 		MNT_IUNLOCK(mp);
1808 		suspended = 1;
1809 	} else
1810 		BO_UNLOCK(bo);
1811 	/*
1812 	 * Write back modified superblock.
1813 	 */
1814 	if (fs->fs_fmod != 0 &&
1815 	    (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1816 		allerror = error;
1817 	if (ffs_fsfail_cleanup(ump, allerror))
1818 		allerror = 0;
1819 	return (allerror);
1820 }
1821 
1822 int
1823 ffs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
1824 {
1825 	return (ffs_vgetf(mp, ino, flags, vpp, 0));
1826 }
1827 
1828 int
1829 ffs_vgetf(struct mount *mp,
1830 	ino_t ino,
1831 	int flags,
1832 	struct vnode **vpp,
1833 	int ffs_flags)
1834 {
1835 	struct fs *fs;
1836 	struct inode *ip;
1837 	struct ufsmount *ump;
1838 	struct buf *bp;
1839 	struct vnode *vp;
1840 	daddr_t dbn;
1841 	int error;
1842 
1843 	MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 ||
1844 	    (flags & LK_EXCLUSIVE) != 0);
1845 
1846 	error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1847 	if (error != 0)
1848 		return (error);
1849 	if (*vpp != NULL) {
1850 		if ((ffs_flags & FFSV_REPLACE) == 0 ||
1851 		    ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 ||
1852 		    !VN_IS_DOOMED(*vpp)))
1853 			return (0);
1854 		vgone(*vpp);
1855 		vput(*vpp);
1856 	}
1857 
1858 	/*
1859 	 * We must promote to an exclusive lock for vnode creation.  This
1860 	 * can happen if lookup is passed LOCKSHARED.
1861 	 */
1862 	if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1863 		flags &= ~LK_TYPE_MASK;
1864 		flags |= LK_EXCLUSIVE;
1865 	}
1866 
1867 	/*
1868 	 * We do not lock vnode creation as it is believed to be too
1869 	 * expensive for such rare case as simultaneous creation of vnode
1870 	 * for same ino by different processes. We just allow them to race
1871 	 * and check later to decide who wins. Let the race begin!
1872 	 */
1873 
1874 	ump = VFSTOUFS(mp);
1875 	fs = ump->um_fs;
1876 	ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO);
1877 
1878 	/* Allocate a new vnode/inode. */
1879 	error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ?
1880 	    &ffs_vnodeops1 : &ffs_vnodeops2, &vp);
1881 	if (error) {
1882 		*vpp = NULL;
1883 		uma_zfree_smr(uma_inode, ip);
1884 		return (error);
1885 	}
1886 	/*
1887 	 * FFS supports recursive locking.
1888 	 */
1889 	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1890 	VN_LOCK_AREC(vp);
1891 	vp->v_data = ip;
1892 	vp->v_bufobj.bo_bsize = fs->fs_bsize;
1893 	ip->i_vnode = vp;
1894 	ip->i_ump = ump;
1895 	ip->i_number = ino;
1896 	ip->i_ea_refs = 0;
1897 	ip->i_nextclustercg = -1;
1898 	ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2;
1899 	ip->i_mode = 0; /* ensure error cases below throw away vnode */
1900 	cluster_init_vn(&ip->i_clusterw);
1901 #ifdef DIAGNOSTIC
1902 	ufs_init_trackers(ip);
1903 #endif
1904 #ifdef QUOTA
1905 	{
1906 		int i;
1907 		for (i = 0; i < MAXQUOTAS; i++)
1908 			ip->i_dquot[i] = NODQUOT;
1909 	}
1910 #endif
1911 
1912 	if (ffs_flags & FFSV_FORCEINSMQ)
1913 		vp->v_vflag |= VV_FORCEINSMQ;
1914 	error = insmntque(vp, mp);
1915 	if (error != 0) {
1916 		uma_zfree_smr(uma_inode, ip);
1917 		*vpp = NULL;
1918 		return (error);
1919 	}
1920 	vp->v_vflag &= ~VV_FORCEINSMQ;
1921 	error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1922 	if (error != 0)
1923 		return (error);
1924 	if (*vpp != NULL) {
1925 		/*
1926 		 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set)
1927 		 * operate on empty inode, which must not be found by
1928 		 * other threads until fully filled.  Vnode for empty
1929 		 * inode must be not re-inserted on the hash by other
1930 		 * thread, after removal by us at the beginning.
1931 		 */
1932 		MPASS((ffs_flags & FFSV_REPLACE) == 0);
1933 		return (0);
1934 	}
1935 	if (I_IS_UFS1(ip))
1936 		ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1937 	else
1938 		ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1939 
1940 	if ((ffs_flags & FFSV_NEWINODE) != 0) {
1941 		/* New inode, just zero out its contents. */
1942 		if (I_IS_UFS1(ip))
1943 			memset(ip->i_din1, 0, sizeof(struct ufs1_dinode));
1944 		else
1945 			memset(ip->i_din2, 0, sizeof(struct ufs2_dinode));
1946 	} else {
1947 		/* Read the disk contents for the inode, copy into the inode. */
1948 		dbn = fsbtodb(fs, ino_to_fsba(fs, ino));
1949 		error = ffs_breadz(ump, ump->um_devvp, dbn, dbn,
1950 		    (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp);
1951 		if (error != 0) {
1952 			/*
1953 			 * The inode does not contain anything useful, so it
1954 			 * would be misleading to leave it on its hash chain.
1955 			 * With mode still zero, it will be unlinked and
1956 			 * returned to the free list by vput().
1957 			 */
1958 			vgone(vp);
1959 			vput(vp);
1960 			*vpp = NULL;
1961 			return (error);
1962 		}
1963 		if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) {
1964 			bqrelse(bp);
1965 			vgone(vp);
1966 			vput(vp);
1967 			*vpp = NULL;
1968 			return (error);
1969 		}
1970 		bqrelse(bp);
1971 	}
1972 	if (DOINGSOFTDEP(vp) && (!fs->fs_ronly ||
1973 	    (ffs_flags & FFSV_FORCEINODEDEP) != 0))
1974 		softdep_load_inodeblock(ip);
1975 	else
1976 		ip->i_effnlink = ip->i_nlink;
1977 
1978 	/*
1979 	 * Initialize the vnode from the inode, check for aliases.
1980 	 * Note that the underlying vnode may have changed.
1981 	 */
1982 	error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2,
1983 	    &vp);
1984 	if (error) {
1985 		vgone(vp);
1986 		vput(vp);
1987 		*vpp = NULL;
1988 		return (error);
1989 	}
1990 
1991 	/*
1992 	 * Finish inode initialization.
1993 	 */
1994 	if (vp->v_type != VFIFO) {
1995 		/* FFS supports shared locking for all files except fifos. */
1996 		VN_LOCK_ASHARE(vp);
1997 	}
1998 
1999 	/*
2000 	 * Set up a generation number for this inode if it does not
2001 	 * already have one. This should only happen on old filesystems.
2002 	 */
2003 	if (ip->i_gen == 0) {
2004 		while (ip->i_gen == 0)
2005 			ip->i_gen = arc4random();
2006 		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
2007 			UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
2008 			DIP_SET(ip, i_gen, ip->i_gen);
2009 		}
2010 	}
2011 #ifdef MAC
2012 	if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
2013 		/*
2014 		 * If this vnode is already allocated, and we're running
2015 		 * multi-label, attempt to perform a label association
2016 		 * from the extended attributes on the inode.
2017 		 */
2018 		error = mac_vnode_associate_extattr(mp, vp);
2019 		if (error) {
2020 			/* ufs_inactive will release ip->i_devvp ref. */
2021 			vgone(vp);
2022 			vput(vp);
2023 			*vpp = NULL;
2024 			return (error);
2025 		}
2026 	}
2027 #endif
2028 
2029 	vn_set_state(vp, VSTATE_CONSTRUCTED);
2030 	*vpp = vp;
2031 	return (0);
2032 }
2033 
2034 /*
2035  * File handle to vnode
2036  *
2037  * Have to be really careful about stale file handles:
2038  * - check that the inode number is valid
2039  * - for UFS2 check that the inode number is initialized
2040  * - call ffs_vget() to get the locked inode
2041  * - check for an unallocated inode (i_mode == 0)
2042  * - check that the given client host has export rights and return
2043  *   those rights via. exflagsp and credanonp
2044  */
2045 static int
2046 ffs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
2047 {
2048 	struct ufid *ufhp;
2049 
2050 	ufhp = (struct ufid *)fhp;
2051 	return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags,
2052 	    vpp, 0));
2053 }
2054 
2055 /*
2056  * Return a vnode from a mounted filesystem for inode with specified
2057  * generation number. Return ESTALE if the inode with given generation
2058  * number no longer exists on that filesystem.
2059  */
2060 int
2061 ffs_inotovp(struct mount *mp,
2062 	ino_t ino,
2063 	uint64_t gen,
2064 	int lflags,
2065 	struct vnode **vpp,
2066 	int ffs_flags)
2067 {
2068 	struct ufsmount *ump;
2069 	struct vnode *nvp;
2070 	struct inode *ip;
2071 	struct fs *fs;
2072 	struct cg *cgp;
2073 	struct buf *bp;
2074 	uint64_t cg;
2075 
2076 	ump = VFSTOUFS(mp);
2077 	fs = ump->um_fs;
2078 	*vpp = NULL;
2079 
2080 	if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg)
2081 		return (ESTALE);
2082 
2083 	/*
2084 	 * Need to check if inode is initialized because UFS2 does lazy
2085 	 * initialization and nfs_fhtovp can offer arbitrary inode numbers.
2086 	 */
2087 	if (fs->fs_magic == FS_UFS2_MAGIC) {
2088 		cg = ino_to_cg(fs, ino);
2089 		if (ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp) != 0)
2090 			return (ESTALE);
2091 		if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) {
2092 			brelse(bp);
2093 			return (ESTALE);
2094 		}
2095 		brelse(bp);
2096 	}
2097 
2098 	if (ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags) != 0)
2099 		return (ESTALE);
2100 
2101 	ip = VTOI(nvp);
2102 	if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) {
2103 		if (ip->i_mode == 0)
2104 			vgone(nvp);
2105 		vput(nvp);
2106 		return (ESTALE);
2107 	}
2108 
2109 	vnode_create_vobject(nvp, DIP(ip, i_size), curthread);
2110 	*vpp = nvp;
2111 	return (0);
2112 }
2113 
2114 /*
2115  * Initialize the filesystem.
2116  */
2117 static int
2118 ffs_init(struct vfsconf *vfsp)
2119 {
2120 
2121 	ffs_susp_initialize();
2122 	softdep_initialize();
2123 	return (ufs_init(vfsp));
2124 }
2125 
2126 /*
2127  * Undo the work of ffs_init().
2128  */
2129 static int
2130 ffs_uninit(struct vfsconf *vfsp)
2131 {
2132 	int ret;
2133 
2134 	ret = ufs_uninit(vfsp);
2135 	softdep_uninitialize();
2136 	ffs_susp_uninitialize();
2137 	taskqueue_drain_all(taskqueue_thread);
2138 	return (ret);
2139 }
2140 
2141 /*
2142  * Structure used to pass information from ffs_sbupdate to its
2143  * helper routine ffs_use_bwrite.
2144  */
2145 struct devfd {
2146 	struct ufsmount	*ump;
2147 	struct buf	*sbbp;
2148 	int		 waitfor;
2149 	int		 suspended;
2150 	int		 error;
2151 };
2152 
2153 /*
2154  * Write a superblock and associated information back to disk.
2155  */
2156 int
2157 ffs_sbupdate(struct ufsmount *ump, int waitfor, int suspended)
2158 {
2159 	struct fs *fs;
2160 	struct buf *sbbp;
2161 	struct devfd devfd;
2162 
2163 	fs = ump->um_fs;
2164 	if (fs->fs_ronly == 1 &&
2165 	    (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
2166 	    (MNT_RDONLY | MNT_UPDATE))
2167 		panic("ffs_sbupdate: write read-only filesystem");
2168 	/*
2169 	 * We use the superblock's buf to serialize calls to ffs_sbupdate().
2170 	 */
2171 	sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
2172 	    (int)fs->fs_sbsize, 0, 0, 0);
2173 	/*
2174 	 * Initialize info needed for write function.
2175 	 */
2176 	devfd.ump = ump;
2177 	devfd.sbbp = sbbp;
2178 	devfd.waitfor = waitfor;
2179 	devfd.suspended = suspended;
2180 	devfd.error = 0;
2181 	return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite));
2182 }
2183 
2184 /*
2185  * Write function for use by filesystem-layer routines.
2186  */
2187 static int
2188 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size)
2189 {
2190 	struct devfd *devfdp;
2191 	struct ufsmount *ump;
2192 	struct buf *bp;
2193 	struct fs *fs;
2194 	int error;
2195 
2196 	devfdp = devfd;
2197 	ump = devfdp->ump;
2198 	fs = ump->um_fs;
2199 	/*
2200 	 * Writing the superblock summary information.
2201 	 */
2202 	if (loc != fs->fs_sblockloc) {
2203 		bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0);
2204 		bcopy(buf, bp->b_data, (uint64_t)size);
2205 		if (devfdp->suspended)
2206 			bp->b_flags |= B_VALIDSUSPWRT;
2207 		if (devfdp->waitfor != MNT_WAIT)
2208 			bawrite(bp);
2209 		else if ((error = bwrite(bp)) != 0)
2210 			devfdp->error = error;
2211 		return (0);
2212 	}
2213 	/*
2214 	 * Writing the superblock itself. We need to do special checks for it.
2215 	 */
2216 	bp = devfdp->sbbp;
2217 	if (ffs_fsfail_cleanup(ump, devfdp->error))
2218 		devfdp->error = 0;
2219 	if (devfdp->error != 0) {
2220 		brelse(bp);
2221 		return (devfdp->error);
2222 	}
2223 	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
2224 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2225 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2226 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
2227 		fs->fs_sblockloc = SBLOCK_UFS1;
2228 	}
2229 	if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
2230 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2231 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2232 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
2233 		fs->fs_sblockloc = SBLOCK_UFS2;
2234 	}
2235 	if (MOUNTEDSOFTDEP(ump->um_mountp))
2236 		softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
2237 	UFS_LOCK(ump);
2238 	bcopy((caddr_t)fs, bp->b_data, (uint64_t)fs->fs_sbsize);
2239 	UFS_UNLOCK(ump);
2240 	fs = (struct fs *)bp->b_data;
2241 	fs->fs_fmod = 0;
2242 	ffs_oldfscompat_write(fs, ump);
2243 	fs->fs_si = NULL;
2244 	/* Recalculate the superblock hash */
2245 	fs->fs_ckhash = ffs_calc_sbhash(fs);
2246 	if (devfdp->suspended)
2247 		bp->b_flags |= B_VALIDSUSPWRT;
2248 	if (devfdp->waitfor != MNT_WAIT)
2249 		bawrite(bp);
2250 	else if ((error = bwrite(bp)) != 0)
2251 		devfdp->error = error;
2252 	return (devfdp->error);
2253 }
2254 
2255 static int
2256 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
2257 	int attrnamespace, const char *attrname)
2258 {
2259 
2260 #ifdef UFS_EXTATTR
2261 	return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
2262 	    attrname));
2263 #else
2264 	return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
2265 	    attrname));
2266 #endif
2267 }
2268 
2269 static void
2270 ffs_ifree(struct ufsmount *ump, struct inode *ip)
2271 {
2272 
2273 	if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
2274 		uma_zfree(uma_ufs1, ip->i_din1);
2275 	else if (ip->i_din2 != NULL)
2276 		uma_zfree(uma_ufs2, ip->i_din2);
2277 	uma_zfree_smr(uma_inode, ip);
2278 }
2279 
2280 static int dobkgrdwrite = 1;
2281 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
2282     "Do background writes (honoring the BV_BKGRDWRITE flag)?");
2283 
2284 /*
2285  * Complete a background write started from bwrite.
2286  */
2287 static void
2288 ffs_backgroundwritedone(struct buf *bp)
2289 {
2290 	struct bufobj *bufobj;
2291 	struct buf *origbp;
2292 
2293 #ifdef SOFTUPDATES
2294 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0)
2295 		softdep_handle_error(bp);
2296 #endif
2297 
2298 	/*
2299 	 * Find the original buffer that we are writing.
2300 	 */
2301 	bufobj = bp->b_bufobj;
2302 	BO_LOCK(bufobj);
2303 	if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
2304 		panic("backgroundwritedone: lost buffer");
2305 
2306 	/*
2307 	 * We should mark the cylinder group buffer origbp as
2308 	 * dirty, to not lose the failed write.
2309 	 */
2310 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2311 		origbp->b_vflags |= BV_BKGRDERR;
2312 	BO_UNLOCK(bufobj);
2313 	/*
2314 	 * Process dependencies then return any unfinished ones.
2315 	 */
2316 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0)
2317 		buf_complete(bp);
2318 #ifdef SOFTUPDATES
2319 	if (!LIST_EMPTY(&bp->b_dep))
2320 		softdep_move_dependencies(bp, origbp);
2321 #endif
2322 	/*
2323 	 * This buffer is marked B_NOCACHE so when it is released
2324 	 * by biodone it will be tossed.  Clear B_IOSTARTED in case of error.
2325 	 */
2326 	bp->b_flags |= B_NOCACHE;
2327 	bp->b_flags &= ~(B_CACHE | B_IOSTARTED);
2328 	pbrelvp(bp);
2329 
2330 	/*
2331 	 * Prevent brelse() from trying to keep and re-dirtying bp on
2332 	 * errors. It causes b_bufobj dereference in
2333 	 * bdirty()/reassignbuf(), and b_bufobj was cleared in
2334 	 * pbrelvp() above.
2335 	 */
2336 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2337 		bp->b_flags |= B_INVAL;
2338 	bufdone(bp);
2339 	BO_LOCK(bufobj);
2340 	/*
2341 	 * Clear the BV_BKGRDINPROG flag in the original buffer
2342 	 * and awaken it if it is waiting for the write to complete.
2343 	 * If BV_BKGRDINPROG is not set in the original buffer it must
2344 	 * have been released and re-instantiated - which is not legal.
2345 	 */
2346 	KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
2347 	    ("backgroundwritedone: lost buffer2"));
2348 	origbp->b_vflags &= ~BV_BKGRDINPROG;
2349 	if (origbp->b_vflags & BV_BKGRDWAIT) {
2350 		origbp->b_vflags &= ~BV_BKGRDWAIT;
2351 		wakeup(&origbp->b_xflags);
2352 	}
2353 	BO_UNLOCK(bufobj);
2354 }
2355 
2356 /*
2357  * Write, release buffer on completion.  (Done by iodone
2358  * if async).  Do not bother writing anything if the buffer
2359  * is invalid.
2360  *
2361  * Note that we set B_CACHE here, indicating that buffer is
2362  * fully valid and thus cacheable.  This is true even of NFS
2363  * now so we set it generally.  This could be set either here
2364  * or in biodone() since the I/O is synchronous.  We put it
2365  * here.
2366  */
2367 static int
2368 ffs_bufwrite(struct buf *bp)
2369 {
2370 	struct buf *newbp;
2371 	struct cg *cgp;
2372 
2373 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2374 	if (bp->b_flags & B_INVAL) {
2375 		brelse(bp);
2376 		return (0);
2377 	}
2378 
2379 	if (!BUF_ISLOCKED(bp))
2380 		panic("bufwrite: buffer is not busy???");
2381 	/*
2382 	 * If a background write is already in progress, delay
2383 	 * writing this block if it is asynchronous. Otherwise
2384 	 * wait for the background write to complete.
2385 	 */
2386 	BO_LOCK(bp->b_bufobj);
2387 	if (bp->b_vflags & BV_BKGRDINPROG) {
2388 		if (bp->b_flags & B_ASYNC) {
2389 			BO_UNLOCK(bp->b_bufobj);
2390 			bdwrite(bp);
2391 			return (0);
2392 		}
2393 		bp->b_vflags |= BV_BKGRDWAIT;
2394 		msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2395 		    "bwrbg", 0);
2396 		if (bp->b_vflags & BV_BKGRDINPROG)
2397 			panic("bufwrite: still writing");
2398 	}
2399 	bp->b_vflags &= ~BV_BKGRDERR;
2400 	BO_UNLOCK(bp->b_bufobj);
2401 
2402 	/*
2403 	 * If this buffer is marked for background writing and we
2404 	 * do not have to wait for it, make a copy and write the
2405 	 * copy so as to leave this buffer ready for further use.
2406 	 *
2407 	 * This optimization eats a lot of memory.  If we have a page
2408 	 * or buffer shortfall we can't do it.
2409 	 */
2410 	if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2411 	    (bp->b_flags & B_ASYNC) &&
2412 	    !vm_page_count_severe() &&
2413 	    !buf_dirty_count_severe()) {
2414 		KASSERT(bp->b_iodone == NULL,
2415 		    ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2416 
2417 		/* get a new block */
2418 		newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2419 		if (newbp == NULL)
2420 			goto normal_write;
2421 
2422 		KASSERT(buf_mapped(bp), ("Unmapped cg"));
2423 		memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2424 		BO_LOCK(bp->b_bufobj);
2425 		bp->b_vflags |= BV_BKGRDINPROG;
2426 		BO_UNLOCK(bp->b_bufobj);
2427 		newbp->b_xflags |=
2428 		    (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER;
2429 		newbp->b_lblkno = bp->b_lblkno;
2430 		newbp->b_blkno = bp->b_blkno;
2431 		newbp->b_offset = bp->b_offset;
2432 		newbp->b_iodone = ffs_backgroundwritedone;
2433 		newbp->b_flags |= B_ASYNC;
2434 		newbp->b_flags &= ~B_INVAL;
2435 		pbgetvp(bp->b_vp, newbp);
2436 
2437 #ifdef SOFTUPDATES
2438 		/*
2439 		 * Move over the dependencies.  If there are rollbacks,
2440 		 * leave the parent buffer dirtied as it will need to
2441 		 * be written again.
2442 		 */
2443 		if (LIST_EMPTY(&bp->b_dep) ||
2444 		    softdep_move_dependencies(bp, newbp) == 0)
2445 			bundirty(bp);
2446 #else
2447 		bundirty(bp);
2448 #endif
2449 
2450 		/*
2451 		 * Initiate write on the copy, release the original.  The
2452 		 * BKGRDINPROG flag prevents it from going away until
2453 		 * the background write completes. We have to recalculate
2454 		 * its check hash in case the buffer gets freed and then
2455 		 * reconstituted from the buffer cache during a later read.
2456 		 */
2457 		if ((bp->b_xflags & BX_CYLGRP) != 0) {
2458 			cgp = (struct cg *)bp->b_data;
2459 			cgp->cg_ckhash = 0;
2460 			cgp->cg_ckhash =
2461 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2462 		}
2463 		bqrelse(bp);
2464 		bp = newbp;
2465 	} else
2466 		/* Mark the buffer clean */
2467 		bundirty(bp);
2468 
2469 	/* Let the normal bufwrite do the rest for us */
2470 normal_write:
2471 	/*
2472 	 * If we are writing a cylinder group, update its time.
2473 	 */
2474 	if ((bp->b_xflags & BX_CYLGRP) != 0) {
2475 		cgp = (struct cg *)bp->b_data;
2476 		cgp->cg_old_time = cgp->cg_time = time_second;
2477 	}
2478 	return (bufwrite(bp));
2479 }
2480 
2481 static void
2482 ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2483 {
2484 	struct vnode *vp;
2485 	struct buf *tbp;
2486 	int error, nocopy;
2487 
2488 	/*
2489 	 * This is the bufobj strategy for the private VCHR vnodes
2490 	 * used by FFS to access the underlying storage device.
2491 	 * We override the default bufobj strategy and thus bypass
2492 	 * VOP_STRATEGY() for these vnodes.
2493 	 */
2494 	vp = bo2vnode(bo);
2495 	KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR ||
2496 	    bp->b_vp->v_rdev == NULL ||
2497 	    bp->b_vp->v_rdev->si_mountpt == NULL ||
2498 	    VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL ||
2499 	    vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp,
2500 	    ("ffs_geom_strategy() with wrong vp"));
2501 	if (bp->b_iocmd == BIO_WRITE) {
2502 		if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2503 		    bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2504 		    (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2505 			panic("ffs_geom_strategy: bad I/O");
2506 		nocopy = bp->b_flags & B_NOCOPY;
2507 		bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2508 		if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2509 		    vp->v_rdev->si_snapdata != NULL) {
2510 			if ((bp->b_flags & B_CLUSTER) != 0) {
2511 				runningbufwakeup(bp);
2512 				TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2513 					      b_cluster.cluster_entry) {
2514 					error = ffs_copyonwrite(vp, tbp);
2515 					if (error != 0 &&
2516 					    error != EOPNOTSUPP) {
2517 						bp->b_error = error;
2518 						bp->b_ioflags |= BIO_ERROR;
2519 						bp->b_flags &= ~B_BARRIER;
2520 						bufdone(bp);
2521 						return;
2522 					}
2523 				}
2524 				bp->b_runningbufspace = bp->b_bufsize;
2525 				atomic_add_long(&runningbufspace,
2526 					       bp->b_runningbufspace);
2527 			} else {
2528 				error = ffs_copyonwrite(vp, bp);
2529 				if (error != 0 && error != EOPNOTSUPP) {
2530 					bp->b_error = error;
2531 					bp->b_ioflags |= BIO_ERROR;
2532 					bp->b_flags &= ~B_BARRIER;
2533 					bufdone(bp);
2534 					return;
2535 				}
2536 			}
2537 		}
2538 #ifdef SOFTUPDATES
2539 		if ((bp->b_flags & B_CLUSTER) != 0) {
2540 			TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2541 				      b_cluster.cluster_entry) {
2542 				if (!LIST_EMPTY(&tbp->b_dep))
2543 					buf_start(tbp);
2544 			}
2545 		} else {
2546 			if (!LIST_EMPTY(&bp->b_dep))
2547 				buf_start(bp);
2548 		}
2549 
2550 #endif
2551 		/*
2552 		 * Check for metadata that needs check-hashes and update them.
2553 		 */
2554 		switch (bp->b_xflags & BX_FSPRIV) {
2555 		case BX_CYLGRP:
2556 			((struct cg *)bp->b_data)->cg_ckhash = 0;
2557 			((struct cg *)bp->b_data)->cg_ckhash =
2558 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2559 			break;
2560 
2561 		case BX_SUPERBLOCK:
2562 		case BX_INODE:
2563 		case BX_INDIR:
2564 		case BX_DIR:
2565 			printf("Check-hash write is unimplemented!!!\n");
2566 			break;
2567 
2568 		case 0:
2569 			break;
2570 
2571 		default:
2572 			printf("multiple buffer types 0x%b\n",
2573 			    (bp->b_xflags & BX_FSPRIV), PRINT_UFS_BUF_XFLAGS);
2574 			break;
2575 		}
2576 	}
2577 	if (bp->b_iocmd != BIO_READ && ffs_enxio_enable)
2578 		bp->b_xflags |= BX_CVTENXIO;
2579 	g_vfs_strategy(bo, bp);
2580 }
2581 
2582 int
2583 ffs_own_mount(const struct mount *mp)
2584 {
2585 
2586 	if (mp->mnt_op == &ufs_vfsops)
2587 		return (1);
2588 	return (0);
2589 }
2590 
2591 #ifdef	DDB
2592 #ifdef SOFTUPDATES
2593 
2594 /* defined in ffs_softdep.c */
2595 extern void db_print_ffs(struct ufsmount *ump);
2596 
2597 DB_SHOW_COMMAND(ffs, db_show_ffs)
2598 {
2599 	struct mount *mp;
2600 	struct ufsmount *ump;
2601 
2602 	if (have_addr) {
2603 		ump = VFSTOUFS((struct mount *)addr);
2604 		db_print_ffs(ump);
2605 		return;
2606 	}
2607 
2608 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2609 		if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2610 			db_print_ffs(VFSTOUFS(mp));
2611 	}
2612 }
2613 
2614 #endif	/* SOFTUPDATES */
2615 #endif	/* DDB */
2616