xref: /freebsd/sys/ufs/ffs/ffs_vfsops.c (revision 99db5849f7506e765c43f4e69a7105cc888e8d5e)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1991, 1993, 1994
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_quota.h"
38 #include "opt_ufs.h"
39 #include "opt_ffs.h"
40 #include "opt_ddb.h"
41 
42 #include <sys/param.h>
43 #include <sys/gsb_crc32.h>
44 #include <sys/systm.h>
45 #include <sys/namei.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/taskqueue.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/vnode.h>
52 #include <sys/mount.h>
53 #include <sys/bio.h>
54 #include <sys/buf.h>
55 #include <sys/conf.h>
56 #include <sys/fcntl.h>
57 #include <sys/ioccom.h>
58 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/rwlock.h>
61 #include <sys/sysctl.h>
62 #include <sys/vmmeter.h>
63 
64 #include <security/mac/mac_framework.h>
65 
66 #include <ufs/ufs/dir.h>
67 #include <ufs/ufs/extattr.h>
68 #include <ufs/ufs/gjournal.h>
69 #include <ufs/ufs/quota.h>
70 #include <ufs/ufs/ufsmount.h>
71 #include <ufs/ufs/inode.h>
72 #include <ufs/ufs/ufs_extern.h>
73 
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76 
77 #include <vm/vm.h>
78 #include <vm/uma.h>
79 #include <vm/vm_page.h>
80 
81 #include <geom/geom.h>
82 #include <geom/geom_vfs.h>
83 
84 #include <ddb/ddb.h>
85 
86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
87 
88 static int	ffs_mountfs(struct vnode *, struct mount *, struct thread *);
89 static void	ffs_oldfscompat_read(struct fs *, struct ufsmount *,
90 		    ufs2_daddr_t);
91 static void	ffs_ifree(struct ufsmount *ump, struct inode *ip);
92 static int	ffs_sync_lazy(struct mount *mp);
93 static int	ffs_use_bread(void *devfd, off_t loc, void **bufp, int size);
94 static int	ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size);
95 
96 static vfs_init_t ffs_init;
97 static vfs_uninit_t ffs_uninit;
98 static vfs_extattrctl_t ffs_extattrctl;
99 static vfs_cmount_t ffs_cmount;
100 static vfs_unmount_t ffs_unmount;
101 static vfs_mount_t ffs_mount;
102 static vfs_statfs_t ffs_statfs;
103 static vfs_fhtovp_t ffs_fhtovp;
104 static vfs_sync_t ffs_sync;
105 
106 static struct vfsops ufs_vfsops = {
107 	.vfs_extattrctl =	ffs_extattrctl,
108 	.vfs_fhtovp =		ffs_fhtovp,
109 	.vfs_init =		ffs_init,
110 	.vfs_mount =		ffs_mount,
111 	.vfs_cmount =		ffs_cmount,
112 	.vfs_quotactl =		ufs_quotactl,
113 	.vfs_root =		vfs_cache_root,
114 	.vfs_cachedroot =	ufs_root,
115 	.vfs_statfs =		ffs_statfs,
116 	.vfs_sync =		ffs_sync,
117 	.vfs_uninit =		ffs_uninit,
118 	.vfs_unmount =		ffs_unmount,
119 	.vfs_vget =		ffs_vget,
120 	.vfs_susp_clean =	process_deferred_inactive,
121 };
122 
123 VFS_SET(ufs_vfsops, ufs, 0);
124 MODULE_VERSION(ufs, 1);
125 
126 static b_strategy_t ffs_geom_strategy;
127 static b_write_t ffs_bufwrite;
128 
129 static struct buf_ops ffs_ops = {
130 	.bop_name =	"FFS",
131 	.bop_write =	ffs_bufwrite,
132 	.bop_strategy =	ffs_geom_strategy,
133 	.bop_sync =	bufsync,
134 #ifdef NO_FFS_SNAPSHOT
135 	.bop_bdflush =	bufbdflush,
136 #else
137 	.bop_bdflush =	ffs_bdflush,
138 #endif
139 };
140 
141 /*
142  * Note that userquota and groupquota options are not currently used
143  * by UFS/FFS code and generally mount(8) does not pass those options
144  * from userland, but they can be passed by loader(8) via
145  * vfs.root.mountfrom.options.
146  */
147 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
148     "noclusterw", "noexec", "export", "force", "from", "groupquota",
149     "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir",
150     "nosymfollow", "sync", "union", "userquota", "untrusted", NULL };
151 
152 static int ffs_enxio_enable = 1;
153 SYSCTL_DECL(_vfs_ffs);
154 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN,
155     &ffs_enxio_enable, 0,
156     "enable mapping of other disk I/O errors to ENXIO");
157 
158 /*
159  * Return buffer with the contents of block "offset" from the beginning of
160  * directory "ip".  If "res" is non-zero, fill it in with a pointer to the
161  * remaining space in the directory.
162  */
163 static int
164 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp)
165 {
166 	struct inode *ip;
167 	struct fs *fs;
168 	struct buf *bp;
169 	ufs_lbn_t lbn;
170 	int bsize, error;
171 
172 	ip = VTOI(vp);
173 	fs = ITOFS(ip);
174 	lbn = lblkno(fs, offset);
175 	bsize = blksize(fs, ip, lbn);
176 
177 	*bpp = NULL;
178 	error = bread(vp, lbn, bsize, NOCRED, &bp);
179 	if (error) {
180 		return (error);
181 	}
182 	if (res)
183 		*res = (char *)bp->b_data + blkoff(fs, offset);
184 	*bpp = bp;
185 	return (0);
186 }
187 
188 /*
189  * Load up the contents of an inode and copy the appropriate pieces
190  * to the incore copy.
191  */
192 static int
193 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino)
194 {
195 	struct ufs1_dinode *dip1;
196 	struct ufs2_dinode *dip2;
197 	int error;
198 
199 	if (I_IS_UFS1(ip)) {
200 		dip1 = ip->i_din1;
201 		*dip1 =
202 		    *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
203 		ip->i_mode = dip1->di_mode;
204 		ip->i_nlink = dip1->di_nlink;
205 		ip->i_effnlink = dip1->di_nlink;
206 		ip->i_size = dip1->di_size;
207 		ip->i_flags = dip1->di_flags;
208 		ip->i_gen = dip1->di_gen;
209 		ip->i_uid = dip1->di_uid;
210 		ip->i_gid = dip1->di_gid;
211 		return (0);
212 	}
213 	dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
214 	if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 &&
215 	    !ffs_fsfail_cleanup(ITOUMP(ip), error)) {
216 		printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt,
217 		    (intmax_t)ino);
218 		return (error);
219 	}
220 	*ip->i_din2 = *dip2;
221 	dip2 = ip->i_din2;
222 	ip->i_mode = dip2->di_mode;
223 	ip->i_nlink = dip2->di_nlink;
224 	ip->i_effnlink = dip2->di_nlink;
225 	ip->i_size = dip2->di_size;
226 	ip->i_flags = dip2->di_flags;
227 	ip->i_gen = dip2->di_gen;
228 	ip->i_uid = dip2->di_uid;
229 	ip->i_gid = dip2->di_gid;
230 	return (0);
231 }
232 
233 /*
234  * Verify that a filesystem block number is a valid data block.
235  * This routine is only called on untrusted filesystems.
236  */
237 static int
238 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize)
239 {
240 	struct fs *fs;
241 	struct ufsmount *ump;
242 	ufs2_daddr_t end_daddr;
243 	int cg, havemtx;
244 
245 	KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0,
246 	    ("ffs_check_blkno called on a trusted file system"));
247 	ump = VFSTOUFS(mp);
248 	fs = ump->um_fs;
249 	cg = dtog(fs, daddr);
250 	end_daddr = daddr + numfrags(fs, blksize);
251 	/*
252 	 * Verify that the block number is a valid data block. Also check
253 	 * that it does not point to an inode block or a superblock. Accept
254 	 * blocks that are unalloacted (0) or part of snapshot metadata
255 	 * (BLK_NOCOPY or BLK_SNAP).
256 	 *
257 	 * Thus, the block must be in a valid range for the filesystem and
258 	 * either in the space before a backup superblock (except the first
259 	 * cylinder group where that space is used by the bootstrap code) or
260 	 * after the inode blocks and before the end of the cylinder group.
261 	 */
262 	if ((uint64_t)daddr <= BLK_SNAP ||
263 	    ((uint64_t)end_daddr <= fs->fs_size &&
264 	    ((cg > 0 && end_daddr <= cgsblock(fs, cg)) ||
265 	    (daddr >= cgdmin(fs, cg) &&
266 	    end_daddr <= cgbase(fs, cg) + fs->fs_fpg))))
267 		return (0);
268 	if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0)
269 		UFS_LOCK(ump);
270 	if (ppsratecheck(&ump->um_last_integritymsg,
271 	    &ump->um_secs_integritymsg, 1)) {
272 		UFS_UNLOCK(ump);
273 		uprintf("\n%s: inode %jd, out-of-range indirect block "
274 		    "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr);
275 		if (havemtx)
276 			UFS_LOCK(ump);
277 	} else if (!havemtx)
278 		UFS_UNLOCK(ump);
279 	return (EINTEGRITY);
280 }
281 
282 /*
283  * Initiate a forcible unmount.
284  * Used to unmount filesystems whose underlying media has gone away.
285  */
286 static void
287 ffs_fsfail_unmount(void *v, int pending)
288 {
289 	struct fsfail_task *etp;
290 	struct mount *mp;
291 
292 	etp = v;
293 
294 	/*
295 	 * Find our mount and get a ref on it, then try to unmount.
296 	 */
297 	mp = vfs_getvfs(&etp->fsid);
298 	if (mp != NULL)
299 		dounmount(mp, MNT_FORCE, curthread);
300 	free(etp, M_UFSMNT);
301 }
302 
303 /*
304  * On first ENXIO error, start a task that forcibly unmounts the filesystem.
305  *
306  * Return true if a cleanup is in progress.
307  */
308 int
309 ffs_fsfail_cleanup(struct ufsmount *ump, int error)
310 {
311 	int retval;
312 
313 	UFS_LOCK(ump);
314 	retval = ffs_fsfail_cleanup_locked(ump, error);
315 	UFS_UNLOCK(ump);
316 	return (retval);
317 }
318 
319 int
320 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error)
321 {
322 	struct fsfail_task *etp;
323 	struct task *tp;
324 
325 	mtx_assert(UFS_MTX(ump), MA_OWNED);
326 	if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) {
327 		ump->um_flags |= UM_FSFAIL_CLEANUP;
328 		/*
329 		 * Queue an async forced unmount.
330 		 */
331 		etp = ump->um_fsfail_task;
332 		ump->um_fsfail_task = NULL;
333 		if (etp != NULL) {
334 			tp = &etp->task;
335 			TASK_INIT(tp, 0, ffs_fsfail_unmount, etp);
336 			taskqueue_enqueue(taskqueue_thread, tp);
337 			printf("UFS: forcibly unmounting %s from %s\n",
338 			    ump->um_mountp->mnt_stat.f_mntfromname,
339 			    ump->um_mountp->mnt_stat.f_mntonname);
340 		}
341 	}
342 	return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0);
343 }
344 
345 /*
346  * Wrapper used during ENXIO cleanup to allocate empty buffers when
347  * the kernel is unable to read the real one. They are needed so that
348  * the soft updates code can use them to unwind its dependencies.
349  */
350 int
351 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno,
352     daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt,
353     struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *),
354     struct buf **bpp)
355 {
356 	int error;
357 
358 	flags |= GB_CVTENXIO;
359 	error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt,
360 	    cred, flags, ckhashfunc, bpp);
361 	if (error != 0 && ffs_fsfail_cleanup(ump, error)) {
362 		error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp);
363 		KASSERT(error == 0, ("getblkx failed"));
364 		vfs_bio_bzero_buf(*bpp, 0, size);
365 	}
366 	return (error);
367 }
368 
369 static int
370 ffs_mount(struct mount *mp)
371 {
372 	struct vnode *devvp, *odevvp;
373 	struct thread *td;
374 	struct ufsmount *ump = NULL;
375 	struct fs *fs;
376 	pid_t fsckpid = 0;
377 	int error, error1, flags;
378 	uint64_t mntorflags, saved_mnt_flag;
379 	accmode_t accmode;
380 	struct nameidata ndp;
381 	char *fspec;
382 
383 	td = curthread;
384 	if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
385 		return (EINVAL);
386 	if (uma_inode == NULL) {
387 		uma_inode = uma_zcreate("FFS inode",
388 		    sizeof(struct inode), NULL, NULL, NULL, NULL,
389 		    UMA_ALIGN_PTR, 0);
390 		uma_ufs1 = uma_zcreate("FFS1 dinode",
391 		    sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
392 		    UMA_ALIGN_PTR, 0);
393 		uma_ufs2 = uma_zcreate("FFS2 dinode",
394 		    sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
395 		    UMA_ALIGN_PTR, 0);
396 	}
397 
398 	vfs_deleteopt(mp->mnt_optnew, "groupquota");
399 	vfs_deleteopt(mp->mnt_optnew, "userquota");
400 
401 	fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
402 	if (error)
403 		return (error);
404 
405 	mntorflags = 0;
406 	if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0)
407 		mntorflags |= MNT_UNTRUSTED;
408 
409 	if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
410 		mntorflags |= MNT_ACLS;
411 
412 	if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
413 		mntorflags |= MNT_SNAPSHOT;
414 		/*
415 		 * Once we have set the MNT_SNAPSHOT flag, do not
416 		 * persist "snapshot" in the options list.
417 		 */
418 		vfs_deleteopt(mp->mnt_optnew, "snapshot");
419 		vfs_deleteopt(mp->mnt_opt, "snapshot");
420 	}
421 
422 	if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 &&
423 	    vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) {
424 		/*
425 		 * Once we have set the restricted PID, do not
426 		 * persist "fsckpid" in the options list.
427 		 */
428 		vfs_deleteopt(mp->mnt_optnew, "fsckpid");
429 		vfs_deleteopt(mp->mnt_opt, "fsckpid");
430 		if (mp->mnt_flag & MNT_UPDATE) {
431 			if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 &&
432 			     vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) {
433 				vfs_mount_error(mp,
434 				    "Checker enable: Must be read-only");
435 				return (EINVAL);
436 			}
437 		} else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) {
438 			vfs_mount_error(mp,
439 			    "Checker enable: Must be read-only");
440 			return (EINVAL);
441 		}
442 		/* Set to -1 if we are done */
443 		if (fsckpid == 0)
444 			fsckpid = -1;
445 	}
446 
447 	if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
448 		if (mntorflags & MNT_ACLS) {
449 			vfs_mount_error(mp,
450 			    "\"acls\" and \"nfsv4acls\" options "
451 			    "are mutually exclusive");
452 			return (EINVAL);
453 		}
454 		mntorflags |= MNT_NFS4ACLS;
455 	}
456 
457 	MNT_ILOCK(mp);
458 	mp->mnt_flag |= mntorflags;
459 	MNT_IUNLOCK(mp);
460 	/*
461 	 * If updating, check whether changing from read-only to
462 	 * read/write; if there is no device name, that's all we do.
463 	 */
464 	if (mp->mnt_flag & MNT_UPDATE) {
465 		ump = VFSTOUFS(mp);
466 		fs = ump->um_fs;
467 		odevvp = ump->um_odevvp;
468 		devvp = ump->um_devvp;
469 		if (fsckpid == -1 && ump->um_fsckpid > 0) {
470 			if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 ||
471 			    (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0)
472 				return (error);
473 			g_topology_lock();
474 			/*
475 			 * Return to normal read-only mode.
476 			 */
477 			error = g_access(ump->um_cp, 0, -1, 0);
478 			g_topology_unlock();
479 			ump->um_fsckpid = 0;
480 		}
481 		if (fs->fs_ronly == 0 &&
482 		    vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
483 			/*
484 			 * Flush any dirty data and suspend filesystem.
485 			 */
486 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
487 				return (error);
488 			error = vfs_write_suspend_umnt(mp);
489 			if (error != 0)
490 				return (error);
491 			/*
492 			 * Check for and optionally get rid of files open
493 			 * for writing.
494 			 */
495 			flags = WRITECLOSE;
496 			if (mp->mnt_flag & MNT_FORCE)
497 				flags |= FORCECLOSE;
498 			if (MOUNTEDSOFTDEP(mp)) {
499 				error = softdep_flushfiles(mp, flags, td);
500 			} else {
501 				error = ffs_flushfiles(mp, flags, td);
502 			}
503 			if (error) {
504 				vfs_write_resume(mp, 0);
505 				return (error);
506 			}
507 			if (fs->fs_pendingblocks != 0 ||
508 			    fs->fs_pendinginodes != 0) {
509 				printf("WARNING: %s Update error: blocks %jd "
510 				    "files %d\n", fs->fs_fsmnt,
511 				    (intmax_t)fs->fs_pendingblocks,
512 				    fs->fs_pendinginodes);
513 				fs->fs_pendingblocks = 0;
514 				fs->fs_pendinginodes = 0;
515 			}
516 			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
517 				fs->fs_clean = 1;
518 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
519 				fs->fs_ronly = 0;
520 				fs->fs_clean = 0;
521 				vfs_write_resume(mp, 0);
522 				return (error);
523 			}
524 			if (MOUNTEDSOFTDEP(mp))
525 				softdep_unmount(mp);
526 			g_topology_lock();
527 			/*
528 			 * Drop our write and exclusive access.
529 			 */
530 			g_access(ump->um_cp, 0, -1, -1);
531 			g_topology_unlock();
532 			fs->fs_ronly = 1;
533 			MNT_ILOCK(mp);
534 			mp->mnt_flag |= MNT_RDONLY;
535 			MNT_IUNLOCK(mp);
536 			/*
537 			 * Allow the writers to note that filesystem
538 			 * is ro now.
539 			 */
540 			vfs_write_resume(mp, 0);
541 		}
542 		if ((mp->mnt_flag & MNT_RELOAD) &&
543 		    (error = ffs_reload(mp, td, 0)) != 0)
544 			return (error);
545 		if (fs->fs_ronly &&
546 		    !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
547 			/*
548 			 * If we are running a checker, do not allow upgrade.
549 			 */
550 			if (ump->um_fsckpid > 0) {
551 				vfs_mount_error(mp,
552 				    "Active checker, cannot upgrade to write");
553 				return (EINVAL);
554 			}
555 			/*
556 			 * If upgrade to read-write by non-root, then verify
557 			 * that user has necessary permissions on the device.
558 			 */
559 			vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY);
560 			error = VOP_ACCESS(odevvp, VREAD | VWRITE,
561 			    td->td_ucred, td);
562 			if (error)
563 				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
564 			VOP_UNLOCK(odevvp);
565 			if (error) {
566 				return (error);
567 			}
568 			fs->fs_flags &= ~FS_UNCLEAN;
569 			if (fs->fs_clean == 0) {
570 				fs->fs_flags |= FS_UNCLEAN;
571 				if ((mp->mnt_flag & MNT_FORCE) ||
572 				    ((fs->fs_flags &
573 				     (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
574 				     (fs->fs_flags & FS_DOSOFTDEP))) {
575 					printf("WARNING: %s was not properly "
576 					   "dismounted\n", fs->fs_fsmnt);
577 				} else {
578 					vfs_mount_error(mp,
579 					   "R/W mount of %s denied. %s.%s",
580 					   fs->fs_fsmnt,
581 					   "Filesystem is not clean - run fsck",
582 					   (fs->fs_flags & FS_SUJ) == 0 ? "" :
583 					   " Forced mount will invalidate"
584 					   " journal contents");
585 					return (EPERM);
586 				}
587 			}
588 			g_topology_lock();
589 			/*
590 			 * Request exclusive write access.
591 			 */
592 			error = g_access(ump->um_cp, 0, 1, 1);
593 			g_topology_unlock();
594 			if (error)
595 				return (error);
596 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
597 				return (error);
598 			error = vfs_write_suspend_umnt(mp);
599 			if (error != 0)
600 				return (error);
601 			fs->fs_ronly = 0;
602 			MNT_ILOCK(mp);
603 			saved_mnt_flag = MNT_RDONLY;
604 			if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag &
605 			    MNT_ASYNC) != 0)
606 				saved_mnt_flag |= MNT_ASYNC;
607 			mp->mnt_flag &= ~saved_mnt_flag;
608 			MNT_IUNLOCK(mp);
609 			fs->fs_mtime = time_second;
610 			/* check to see if we need to start softdep */
611 			if ((fs->fs_flags & FS_DOSOFTDEP) &&
612 			    (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
613 				fs->fs_ronly = 1;
614 				MNT_ILOCK(mp);
615 				mp->mnt_flag |= saved_mnt_flag;
616 				MNT_IUNLOCK(mp);
617 				vfs_write_resume(mp, 0);
618 				return (error);
619 			}
620 			fs->fs_clean = 0;
621 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
622 				fs->fs_ronly = 1;
623 				MNT_ILOCK(mp);
624 				mp->mnt_flag |= saved_mnt_flag;
625 				MNT_IUNLOCK(mp);
626 				vfs_write_resume(mp, 0);
627 				return (error);
628 			}
629 			if (fs->fs_snapinum[0] != 0)
630 				ffs_snapshot_mount(mp);
631 			vfs_write_resume(mp, 0);
632 		}
633 		/*
634 		 * Soft updates is incompatible with "async",
635 		 * so if we are doing softupdates stop the user
636 		 * from setting the async flag in an update.
637 		 * Softdep_mount() clears it in an initial mount
638 		 * or ro->rw remount.
639 		 */
640 		if (MOUNTEDSOFTDEP(mp)) {
641 			/* XXX: Reset too late ? */
642 			MNT_ILOCK(mp);
643 			mp->mnt_flag &= ~MNT_ASYNC;
644 			MNT_IUNLOCK(mp);
645 		}
646 		/*
647 		 * Keep MNT_ACLS flag if it is stored in superblock.
648 		 */
649 		if ((fs->fs_flags & FS_ACLS) != 0) {
650 			/* XXX: Set too late ? */
651 			MNT_ILOCK(mp);
652 			mp->mnt_flag |= MNT_ACLS;
653 			MNT_IUNLOCK(mp);
654 		}
655 
656 		if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
657 			/* XXX: Set too late ? */
658 			MNT_ILOCK(mp);
659 			mp->mnt_flag |= MNT_NFS4ACLS;
660 			MNT_IUNLOCK(mp);
661 		}
662 		/*
663 		 * If this is a request from fsck to clean up the filesystem,
664 		 * then allow the specified pid to proceed.
665 		 */
666 		if (fsckpid > 0) {
667 			if (ump->um_fsckpid != 0) {
668 				vfs_mount_error(mp,
669 				    "Active checker already running on %s",
670 				    fs->fs_fsmnt);
671 				return (EINVAL);
672 			}
673 			KASSERT(MOUNTEDSOFTDEP(mp) == 0,
674 			    ("soft updates enabled on read-only file system"));
675 			g_topology_lock();
676 			/*
677 			 * Request write access.
678 			 */
679 			error = g_access(ump->um_cp, 0, 1, 0);
680 			g_topology_unlock();
681 			if (error) {
682 				vfs_mount_error(mp,
683 				    "Checker activation failed on %s",
684 				    fs->fs_fsmnt);
685 				return (error);
686 			}
687 			ump->um_fsckpid = fsckpid;
688 			if (fs->fs_snapinum[0] != 0)
689 				ffs_snapshot_mount(mp);
690 			fs->fs_mtime = time_second;
691 			fs->fs_fmod = 1;
692 			fs->fs_clean = 0;
693 			(void) ffs_sbupdate(ump, MNT_WAIT, 0);
694 		}
695 
696 		/*
697 		 * If this is a snapshot request, take the snapshot.
698 		 */
699 		if (mp->mnt_flag & MNT_SNAPSHOT)
700 			return (ffs_snapshot(mp, fspec));
701 
702 		/*
703 		 * Must not call namei() while owning busy ref.
704 		 */
705 		vfs_unbusy(mp);
706 	}
707 
708 	/*
709 	 * Not an update, or updating the name: look up the name
710 	 * and verify that it refers to a sensible disk device.
711 	 */
712 	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
713 	error = namei(&ndp);
714 	if ((mp->mnt_flag & MNT_UPDATE) != 0) {
715 		/*
716 		 * Unmount does not start if MNT_UPDATE is set.  Mount
717 		 * update busies mp before setting MNT_UPDATE.  We
718 		 * must be able to retain our busy ref succesfully,
719 		 * without sleep.
720 		 */
721 		error1 = vfs_busy(mp, MBF_NOWAIT);
722 		MPASS(error1 == 0);
723 	}
724 	if (error != 0)
725 		return (error);
726 	NDFREE(&ndp, NDF_ONLY_PNBUF);
727 	devvp = ndp.ni_vp;
728 	if (!vn_isdisk(devvp, &error)) {
729 		vput(devvp);
730 		return (error);
731 	}
732 
733 	/*
734 	 * If mount by non-root, then verify that user has necessary
735 	 * permissions on the device.
736 	 */
737 	accmode = VREAD;
738 	if ((mp->mnt_flag & MNT_RDONLY) == 0)
739 		accmode |= VWRITE;
740 	error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
741 	if (error)
742 		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
743 	if (error) {
744 		vput(devvp);
745 		return (error);
746 	}
747 
748 	if (mp->mnt_flag & MNT_UPDATE) {
749 		/*
750 		 * Update only
751 		 *
752 		 * If it's not the same vnode, or at least the same device
753 		 * then it's not correct.
754 		 */
755 
756 		if (devvp->v_rdev != ump->um_devvp->v_rdev)
757 			error = EINVAL;	/* needs translation */
758 		vput(devvp);
759 		if (error)
760 			return (error);
761 	} else {
762 		/*
763 		 * New mount
764 		 *
765 		 * We need the name for the mount point (also used for
766 		 * "last mounted on") copied in. If an error occurs,
767 		 * the mount point is discarded by the upper level code.
768 		 * Note that vfs_mount_alloc() populates f_mntonname for us.
769 		 */
770 		if ((error = ffs_mountfs(devvp, mp, td)) != 0) {
771 			vrele(devvp);
772 			return (error);
773 		}
774 		if (fsckpid > 0) {
775 			KASSERT(MOUNTEDSOFTDEP(mp) == 0,
776 			    ("soft updates enabled on read-only file system"));
777 			ump = VFSTOUFS(mp);
778 			fs = ump->um_fs;
779 			g_topology_lock();
780 			/*
781 			 * Request write access.
782 			 */
783 			error = g_access(ump->um_cp, 0, 1, 0);
784 			g_topology_unlock();
785 			if (error) {
786 				printf("WARNING: %s: Checker activation "
787 				    "failed\n", fs->fs_fsmnt);
788 			} else {
789 				ump->um_fsckpid = fsckpid;
790 				if (fs->fs_snapinum[0] != 0)
791 					ffs_snapshot_mount(mp);
792 				fs->fs_mtime = time_second;
793 				fs->fs_clean = 0;
794 				(void) ffs_sbupdate(ump, MNT_WAIT, 0);
795 			}
796 		}
797 	}
798 	vfs_mountedfrom(mp, fspec);
799 	return (0);
800 }
801 
802 /*
803  * Compatibility with old mount system call.
804  */
805 
806 static int
807 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
808 {
809 	struct ufs_args args;
810 	int error;
811 
812 	if (data == NULL)
813 		return (EINVAL);
814 	error = copyin(data, &args, sizeof args);
815 	if (error)
816 		return (error);
817 
818 	ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
819 	ma = mount_arg(ma, "export", &args.export, sizeof(args.export));
820 	error = kernel_mount(ma, flags);
821 
822 	return (error);
823 }
824 
825 /*
826  * Reload all incore data for a filesystem (used after running fsck on
827  * the root filesystem and finding things to fix). If the 'force' flag
828  * is 0, the filesystem must be mounted read-only.
829  *
830  * Things to do to update the mount:
831  *	1) invalidate all cached meta-data.
832  *	2) re-read superblock from disk.
833  *	3) re-read summary information from disk.
834  *	4) invalidate all inactive vnodes.
835  *	5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary
836  *	   writers, if requested.
837  *	6) invalidate all cached file data.
838  *	7) re-read inode data for all active vnodes.
839  */
840 int
841 ffs_reload(struct mount *mp, struct thread *td, int flags)
842 {
843 	struct vnode *vp, *mvp, *devvp;
844 	struct inode *ip;
845 	void *space;
846 	struct buf *bp;
847 	struct fs *fs, *newfs;
848 	struct ufsmount *ump;
849 	ufs2_daddr_t sblockloc;
850 	int i, blks, error;
851 	u_long size;
852 	int32_t *lp;
853 
854 	ump = VFSTOUFS(mp);
855 
856 	MNT_ILOCK(mp);
857 	if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) {
858 		MNT_IUNLOCK(mp);
859 		return (EINVAL);
860 	}
861 	MNT_IUNLOCK(mp);
862 
863 	/*
864 	 * Step 1: invalidate all cached meta-data.
865 	 */
866 	devvp = VFSTOUFS(mp)->um_devvp;
867 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
868 	if (vinvalbuf(devvp, 0, 0, 0) != 0)
869 		panic("ffs_reload: dirty1");
870 	VOP_UNLOCK(devvp);
871 
872 	/*
873 	 * Step 2: re-read superblock from disk.
874 	 */
875 	fs = VFSTOUFS(mp)->um_fs;
876 	if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
877 	    NOCRED, &bp)) != 0)
878 		return (error);
879 	newfs = (struct fs *)bp->b_data;
880 	if ((newfs->fs_magic != FS_UFS1_MAGIC &&
881 	     newfs->fs_magic != FS_UFS2_MAGIC) ||
882 	    newfs->fs_bsize > MAXBSIZE ||
883 	    newfs->fs_bsize < sizeof(struct fs)) {
884 			brelse(bp);
885 			return (EIO);		/* XXX needs translation */
886 	}
887 	/*
888 	 * Preserve the summary information, read-only status, and
889 	 * superblock location by copying these fields into our new
890 	 * superblock before using it to update the existing superblock.
891 	 */
892 	newfs->fs_si = fs->fs_si;
893 	newfs->fs_ronly = fs->fs_ronly;
894 	sblockloc = fs->fs_sblockloc;
895 	bcopy(newfs, fs, (u_int)fs->fs_sbsize);
896 	brelse(bp);
897 	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
898 	ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc);
899 	UFS_LOCK(ump);
900 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
901 		printf("WARNING: %s: reload pending error: blocks %jd "
902 		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
903 		    fs->fs_pendinginodes);
904 		fs->fs_pendingblocks = 0;
905 		fs->fs_pendinginodes = 0;
906 	}
907 	UFS_UNLOCK(ump);
908 
909 	/*
910 	 * Step 3: re-read summary information from disk.
911 	 */
912 	size = fs->fs_cssize;
913 	blks = howmany(size, fs->fs_fsize);
914 	if (fs->fs_contigsumsize > 0)
915 		size += fs->fs_ncg * sizeof(int32_t);
916 	size += fs->fs_ncg * sizeof(u_int8_t);
917 	free(fs->fs_csp, M_UFSMNT);
918 	space = malloc(size, M_UFSMNT, M_WAITOK);
919 	fs->fs_csp = space;
920 	for (i = 0; i < blks; i += fs->fs_frag) {
921 		size = fs->fs_bsize;
922 		if (i + fs->fs_frag > blks)
923 			size = (blks - i) * fs->fs_fsize;
924 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
925 		    NOCRED, &bp);
926 		if (error)
927 			return (error);
928 		bcopy(bp->b_data, space, (u_int)size);
929 		space = (char *)space + size;
930 		brelse(bp);
931 	}
932 	/*
933 	 * We no longer know anything about clusters per cylinder group.
934 	 */
935 	if (fs->fs_contigsumsize > 0) {
936 		fs->fs_maxcluster = lp = space;
937 		for (i = 0; i < fs->fs_ncg; i++)
938 			*lp++ = fs->fs_contigsumsize;
939 		space = lp;
940 	}
941 	size = fs->fs_ncg * sizeof(u_int8_t);
942 	fs->fs_contigdirs = (u_int8_t *)space;
943 	bzero(fs->fs_contigdirs, size);
944 	if ((flags & FFSR_UNSUSPEND) != 0) {
945 		MNT_ILOCK(mp);
946 		mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
947 		wakeup(&mp->mnt_flag);
948 		MNT_IUNLOCK(mp);
949 	}
950 
951 loop:
952 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
953 		/*
954 		 * Skip syncer vnode.
955 		 */
956 		if (vp->v_type == VNON) {
957 			VI_UNLOCK(vp);
958 			continue;
959 		}
960 		/*
961 		 * Step 4: invalidate all cached file data.
962 		 */
963 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
964 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
965 			goto loop;
966 		}
967 		if (vinvalbuf(vp, 0, 0, 0))
968 			panic("ffs_reload: dirty2");
969 		/*
970 		 * Step 5: re-read inode data for all active vnodes.
971 		 */
972 		ip = VTOI(vp);
973 		error =
974 		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
975 		    (int)fs->fs_bsize, NOCRED, &bp);
976 		if (error) {
977 			vput(vp);
978 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
979 			return (error);
980 		}
981 		if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) {
982 			brelse(bp);
983 			vput(vp);
984 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
985 			return (error);
986 		}
987 		ip->i_effnlink = ip->i_nlink;
988 		brelse(bp);
989 		vput(vp);
990 	}
991 	return (0);
992 }
993 
994 /*
995  * Common code for mount and mountroot
996  */
997 static int
998 ffs_mountfs(odevvp, mp, td)
999 	struct vnode *odevvp;
1000 	struct mount *mp;
1001 	struct thread *td;
1002 {
1003 	struct ufsmount *ump;
1004 	struct fs *fs;
1005 	struct cdev *dev;
1006 	int error, i, len, ronly;
1007 	struct ucred *cred;
1008 	struct g_consumer *cp;
1009 	struct mount *nmp;
1010 	struct vnode *devvp;
1011 	struct fsfail_task *etp;
1012 	int candelete, canspeedup;
1013 	off_t loc;
1014 
1015 	fs = NULL;
1016 	ump = NULL;
1017 	cred = td ? td->td_ucred : NOCRED;
1018 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
1019 
1020 	devvp = mntfs_allocvp(mp, odevvp);
1021 	VOP_UNLOCK(odevvp);
1022 	KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
1023 	dev = devvp->v_rdev;
1024 	if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
1025 	    (uintptr_t)mp) == 0) {
1026 		mntfs_freevp(devvp);
1027 		return (EBUSY);
1028 	}
1029 	g_topology_lock();
1030 	error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
1031 	g_topology_unlock();
1032 	if (error != 0) {
1033 		atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
1034 		mntfs_freevp(devvp);
1035 		return (error);
1036 	}
1037 	dev_ref(dev);
1038 	devvp->v_bufobj.bo_ops = &ffs_ops;
1039 	BO_LOCK(&odevvp->v_bufobj);
1040 	odevvp->v_bufobj.bo_flag |= BO_NOBUFS;
1041 	BO_UNLOCK(&odevvp->v_bufobj);
1042 	if (dev->si_iosize_max != 0)
1043 		mp->mnt_iosize_max = dev->si_iosize_max;
1044 	if (mp->mnt_iosize_max > MAXPHYS)
1045 		mp->mnt_iosize_max = MAXPHYS;
1046 	if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
1047 		error = EINVAL;
1048 		vfs_mount_error(mp,
1049 		    "Invalid sectorsize %d for superblock size %d",
1050 		    cp->provider->sectorsize, SBLOCKSIZE);
1051 		goto out;
1052 	}
1053 	/* fetch the superblock and summary information */
1054 	loc = STDSB;
1055 	if ((mp->mnt_flag & MNT_ROOTFS) != 0)
1056 		loc = STDSB_NOHASHFAIL;
1057 	if ((error = ffs_sbget(devvp, &fs, loc, M_UFSMNT, ffs_use_bread)) != 0)
1058 		goto out;
1059 	/* none of these types of check-hashes are maintained by this kernel */
1060 	fs->fs_metackhash &= ~(CK_INDIR | CK_DIR);
1061 	/* no support for any undefined flags */
1062 	fs->fs_flags &= FS_SUPPORTED;
1063 	fs->fs_flags &= ~FS_UNCLEAN;
1064 	if (fs->fs_clean == 0) {
1065 		fs->fs_flags |= FS_UNCLEAN;
1066 		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
1067 		    ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
1068 		     (fs->fs_flags & FS_DOSOFTDEP))) {
1069 			printf("WARNING: %s was not properly dismounted\n",
1070 			    fs->fs_fsmnt);
1071 		} else {
1072 			vfs_mount_error(mp, "R/W mount of %s denied. %s%s",
1073 			    fs->fs_fsmnt, "Filesystem is not clean - run fsck.",
1074 			    (fs->fs_flags & FS_SUJ) == 0 ? "" :
1075 			    " Forced mount will invalidate journal contents");
1076 			error = EPERM;
1077 			goto out;
1078 		}
1079 		if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
1080 		    (mp->mnt_flag & MNT_FORCE)) {
1081 			printf("WARNING: %s: lost blocks %jd files %d\n",
1082 			    fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1083 			    fs->fs_pendinginodes);
1084 			fs->fs_pendingblocks = 0;
1085 			fs->fs_pendinginodes = 0;
1086 		}
1087 	}
1088 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1089 		printf("WARNING: %s: mount pending error: blocks %jd "
1090 		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1091 		    fs->fs_pendinginodes);
1092 		fs->fs_pendingblocks = 0;
1093 		fs->fs_pendinginodes = 0;
1094 	}
1095 	if ((fs->fs_flags & FS_GJOURNAL) != 0) {
1096 #ifdef UFS_GJOURNAL
1097 		/*
1098 		 * Get journal provider name.
1099 		 */
1100 		len = 1024;
1101 		mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK);
1102 		if (g_io_getattr("GJOURNAL::provider", cp, &len,
1103 		    mp->mnt_gjprovider) == 0) {
1104 			mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len,
1105 			    M_UFSMNT, M_WAITOK);
1106 			MNT_ILOCK(mp);
1107 			mp->mnt_flag |= MNT_GJOURNAL;
1108 			MNT_IUNLOCK(mp);
1109 		} else {
1110 			printf("WARNING: %s: GJOURNAL flag on fs "
1111 			    "but no gjournal provider below\n",
1112 			    mp->mnt_stat.f_mntonname);
1113 			free(mp->mnt_gjprovider, M_UFSMNT);
1114 			mp->mnt_gjprovider = NULL;
1115 		}
1116 #else
1117 		printf("WARNING: %s: GJOURNAL flag on fs but no "
1118 		    "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
1119 #endif
1120 	} else {
1121 		mp->mnt_gjprovider = NULL;
1122 	}
1123 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
1124 	ump->um_cp = cp;
1125 	ump->um_bo = &devvp->v_bufobj;
1126 	ump->um_fs = fs;
1127 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1128 		ump->um_fstype = UFS1;
1129 		ump->um_balloc = ffs_balloc_ufs1;
1130 	} else {
1131 		ump->um_fstype = UFS2;
1132 		ump->um_balloc = ffs_balloc_ufs2;
1133 	}
1134 	ump->um_blkatoff = ffs_blkatoff;
1135 	ump->um_truncate = ffs_truncate;
1136 	ump->um_update = ffs_update;
1137 	ump->um_valloc = ffs_valloc;
1138 	ump->um_vfree = ffs_vfree;
1139 	ump->um_ifree = ffs_ifree;
1140 	ump->um_rdonly = ffs_rdonly;
1141 	ump->um_snapgone = ffs_snapgone;
1142 	if ((mp->mnt_flag & MNT_UNTRUSTED) != 0)
1143 		ump->um_check_blkno = ffs_check_blkno;
1144 	else
1145 		ump->um_check_blkno = NULL;
1146 	mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
1147 	ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc);
1148 	fs->fs_ronly = ronly;
1149 	fs->fs_active = NULL;
1150 	mp->mnt_data = ump;
1151 	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
1152 	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
1153 	nmp = NULL;
1154 	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
1155 	    (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
1156 		if (nmp)
1157 			vfs_rel(nmp);
1158 		vfs_getnewfsid(mp);
1159 	}
1160 	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
1161 	MNT_ILOCK(mp);
1162 	mp->mnt_flag |= MNT_LOCAL;
1163 	MNT_IUNLOCK(mp);
1164 	if ((fs->fs_flags & FS_MULTILABEL) != 0) {
1165 #ifdef MAC
1166 		MNT_ILOCK(mp);
1167 		mp->mnt_flag |= MNT_MULTILABEL;
1168 		MNT_IUNLOCK(mp);
1169 #else
1170 		printf("WARNING: %s: multilabel flag on fs but "
1171 		    "no MAC support\n", mp->mnt_stat.f_mntonname);
1172 #endif
1173 	}
1174 	if ((fs->fs_flags & FS_ACLS) != 0) {
1175 #ifdef UFS_ACL
1176 		MNT_ILOCK(mp);
1177 
1178 		if (mp->mnt_flag & MNT_NFS4ACLS)
1179 			printf("WARNING: %s: ACLs flag on fs conflicts with "
1180 			    "\"nfsv4acls\" mount option; option ignored\n",
1181 			    mp->mnt_stat.f_mntonname);
1182 		mp->mnt_flag &= ~MNT_NFS4ACLS;
1183 		mp->mnt_flag |= MNT_ACLS;
1184 
1185 		MNT_IUNLOCK(mp);
1186 #else
1187 		printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
1188 		    mp->mnt_stat.f_mntonname);
1189 #endif
1190 	}
1191 	if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
1192 #ifdef UFS_ACL
1193 		MNT_ILOCK(mp);
1194 
1195 		if (mp->mnt_flag & MNT_ACLS)
1196 			printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
1197 			    "with \"acls\" mount option; option ignored\n",
1198 			    mp->mnt_stat.f_mntonname);
1199 		mp->mnt_flag &= ~MNT_ACLS;
1200 		mp->mnt_flag |= MNT_NFS4ACLS;
1201 
1202 		MNT_IUNLOCK(mp);
1203 #else
1204 		printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
1205 		    "ACLs support\n", mp->mnt_stat.f_mntonname);
1206 #endif
1207 	}
1208 	if ((fs->fs_flags & FS_TRIM) != 0) {
1209 		len = sizeof(int);
1210 		if (g_io_getattr("GEOM::candelete", cp, &len,
1211 		    &candelete) == 0) {
1212 			if (candelete)
1213 				ump->um_flags |= UM_CANDELETE;
1214 			else
1215 				printf("WARNING: %s: TRIM flag on fs but disk "
1216 				    "does not support TRIM\n",
1217 				    mp->mnt_stat.f_mntonname);
1218 		} else {
1219 			printf("WARNING: %s: TRIM flag on fs but disk does "
1220 			    "not confirm that it supports TRIM\n",
1221 			    mp->mnt_stat.f_mntonname);
1222 		}
1223 		if (((ump->um_flags) & UM_CANDELETE) != 0) {
1224 			ump->um_trim_tq = taskqueue_create("trim", M_WAITOK,
1225 			    taskqueue_thread_enqueue, &ump->um_trim_tq);
1226 			taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS,
1227 			    "%s trim", mp->mnt_stat.f_mntonname);
1228 			ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM,
1229 			    &ump->um_trimlisthashsize);
1230 		}
1231 	}
1232 
1233 	len = sizeof(int);
1234 	if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) {
1235 		if (canspeedup)
1236 			ump->um_flags |= UM_CANSPEEDUP;
1237 	}
1238 
1239 	ump->um_mountp = mp;
1240 	ump->um_dev = dev;
1241 	ump->um_devvp = devvp;
1242 	ump->um_odevvp = odevvp;
1243 	ump->um_nindir = fs->fs_nindir;
1244 	ump->um_bptrtodb = fs->fs_fsbtodb;
1245 	ump->um_seqinc = fs->fs_frag;
1246 	for (i = 0; i < MAXQUOTAS; i++)
1247 		ump->um_quotas[i] = NULLVP;
1248 #ifdef UFS_EXTATTR
1249 	ufs_extattr_uepm_init(&ump->um_extattr);
1250 #endif
1251 	/*
1252 	 * Set FS local "last mounted on" information (NULL pad)
1253 	 */
1254 	bzero(fs->fs_fsmnt, MAXMNTLEN);
1255 	strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1256 	mp->mnt_stat.f_iosize = fs->fs_bsize;
1257 
1258 	if (mp->mnt_flag & MNT_ROOTFS) {
1259 		/*
1260 		 * Root mount; update timestamp in mount structure.
1261 		 * this will be used by the common root mount code
1262 		 * to update the system clock.
1263 		 */
1264 		mp->mnt_time = fs->fs_time;
1265 	}
1266 
1267 	if (ronly == 0) {
1268 		fs->fs_mtime = time_second;
1269 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
1270 		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1271 			ffs_flushfiles(mp, FORCECLOSE, td);
1272 			goto out;
1273 		}
1274 		if (fs->fs_snapinum[0] != 0)
1275 			ffs_snapshot_mount(mp);
1276 		fs->fs_fmod = 1;
1277 		fs->fs_clean = 0;
1278 		(void) ffs_sbupdate(ump, MNT_WAIT, 0);
1279 	}
1280 	/*
1281 	 * Initialize filesystem state information in mount struct.
1282 	 */
1283 	MNT_ILOCK(mp);
1284 	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1285 	    MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE;
1286 	MNT_IUNLOCK(mp);
1287 #ifdef UFS_EXTATTR
1288 #ifdef UFS_EXTATTR_AUTOSTART
1289 	/*
1290 	 *
1291 	 * Auto-starting does the following:
1292 	 *	- check for /.attribute in the fs, and extattr_start if so
1293 	 *	- for each file in .attribute, enable that file with
1294 	 * 	  an attribute of the same name.
1295 	 * Not clear how to report errors -- probably eat them.
1296 	 * This would all happen while the filesystem was busy/not
1297 	 * available, so would effectively be "atomic".
1298 	 */
1299 	(void) ufs_extattr_autostart(mp, td);
1300 #endif /* !UFS_EXTATTR_AUTOSTART */
1301 #endif /* !UFS_EXTATTR */
1302 	etp = malloc(sizeof *ump->um_fsfail_task, M_UFSMNT, M_WAITOK | M_ZERO);
1303 	etp->fsid = mp->mnt_stat.f_fsid;
1304 	ump->um_fsfail_task = etp;
1305 	return (0);
1306 out:
1307 	if (fs != NULL) {
1308 		free(fs->fs_csp, M_UFSMNT);
1309 		free(fs->fs_si, M_UFSMNT);
1310 		free(fs, M_UFSMNT);
1311 	}
1312 	if (cp != NULL) {
1313 		g_topology_lock();
1314 		g_vfs_close(cp);
1315 		g_topology_unlock();
1316 	}
1317 	if (ump) {
1318 		mtx_destroy(UFS_MTX(ump));
1319 		if (mp->mnt_gjprovider != NULL) {
1320 			free(mp->mnt_gjprovider, M_UFSMNT);
1321 			mp->mnt_gjprovider = NULL;
1322 		}
1323 		free(ump, M_UFSMNT);
1324 		mp->mnt_data = NULL;
1325 	}
1326 	BO_LOCK(&odevvp->v_bufobj);
1327 	odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1328 	BO_UNLOCK(&odevvp->v_bufobj);
1329 	atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
1330 	mntfs_freevp(devvp);
1331 	dev_rel(dev);
1332 	return (error);
1333 }
1334 
1335 /*
1336  * A read function for use by filesystem-layer routines.
1337  */
1338 static int
1339 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size)
1340 {
1341 	struct buf *bp;
1342 	int error;
1343 
1344 	KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp));
1345 	*bufp = malloc(size, M_UFSMNT, M_WAITOK);
1346 	if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED,
1347 	    &bp)) != 0)
1348 		return (error);
1349 	bcopy(bp->b_data, *bufp, size);
1350 	bp->b_flags |= B_INVAL | B_NOCACHE;
1351 	brelse(bp);
1352 	return (0);
1353 }
1354 
1355 static int bigcgs = 0;
1356 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
1357 
1358 /*
1359  * Sanity checks for loading old filesystem superblocks.
1360  * See ffs_oldfscompat_write below for unwound actions.
1361  *
1362  * XXX - Parts get retired eventually.
1363  * Unfortunately new bits get added.
1364  */
1365 static void
1366 ffs_oldfscompat_read(fs, ump, sblockloc)
1367 	struct fs *fs;
1368 	struct ufsmount *ump;
1369 	ufs2_daddr_t sblockloc;
1370 {
1371 	off_t maxfilesize;
1372 
1373 	/*
1374 	 * If not yet done, update fs_flags location and value of fs_sblockloc.
1375 	 */
1376 	if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
1377 		fs->fs_flags = fs->fs_old_flags;
1378 		fs->fs_old_flags |= FS_FLAGS_UPDATED;
1379 		fs->fs_sblockloc = sblockloc;
1380 	}
1381 	/*
1382 	 * If not yet done, update UFS1 superblock with new wider fields.
1383 	 */
1384 	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
1385 		fs->fs_maxbsize = fs->fs_bsize;
1386 		fs->fs_time = fs->fs_old_time;
1387 		fs->fs_size = fs->fs_old_size;
1388 		fs->fs_dsize = fs->fs_old_dsize;
1389 		fs->fs_csaddr = fs->fs_old_csaddr;
1390 		fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1391 		fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1392 		fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1393 		fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1394 	}
1395 	if (fs->fs_magic == FS_UFS1_MAGIC &&
1396 	    fs->fs_old_inodefmt < FS_44INODEFMT) {
1397 		fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1;
1398 		fs->fs_qbmask = ~fs->fs_bmask;
1399 		fs->fs_qfmask = ~fs->fs_fmask;
1400 	}
1401 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1402 		ump->um_savedmaxfilesize = fs->fs_maxfilesize;
1403 		maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1;
1404 		if (fs->fs_maxfilesize > maxfilesize)
1405 			fs->fs_maxfilesize = maxfilesize;
1406 	}
1407 	/* Compatibility for old filesystems */
1408 	if (fs->fs_avgfilesize <= 0)
1409 		fs->fs_avgfilesize = AVFILESIZ;
1410 	if (fs->fs_avgfpdir <= 0)
1411 		fs->fs_avgfpdir = AFPDIR;
1412 	if (bigcgs) {
1413 		fs->fs_save_cgsize = fs->fs_cgsize;
1414 		fs->fs_cgsize = fs->fs_bsize;
1415 	}
1416 }
1417 
1418 /*
1419  * Unwinding superblock updates for old filesystems.
1420  * See ffs_oldfscompat_read above for details.
1421  *
1422  * XXX - Parts get retired eventually.
1423  * Unfortunately new bits get added.
1424  */
1425 void
1426 ffs_oldfscompat_write(fs, ump)
1427 	struct fs *fs;
1428 	struct ufsmount *ump;
1429 {
1430 
1431 	/*
1432 	 * Copy back UFS2 updated fields that UFS1 inspects.
1433 	 */
1434 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1435 		fs->fs_old_time = fs->fs_time;
1436 		fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1437 		fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1438 		fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1439 		fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1440 		fs->fs_maxfilesize = ump->um_savedmaxfilesize;
1441 	}
1442 	if (bigcgs) {
1443 		fs->fs_cgsize = fs->fs_save_cgsize;
1444 		fs->fs_save_cgsize = 0;
1445 	}
1446 }
1447 
1448 /*
1449  * unmount system call
1450  */
1451 static int
1452 ffs_unmount(mp, mntflags)
1453 	struct mount *mp;
1454 	int mntflags;
1455 {
1456 	struct thread *td;
1457 	struct ufsmount *ump = VFSTOUFS(mp);
1458 	struct fs *fs;
1459 	int error, flags, susp;
1460 #ifdef UFS_EXTATTR
1461 	int e_restart;
1462 #endif
1463 
1464 	flags = 0;
1465 	td = curthread;
1466 	fs = ump->um_fs;
1467 	if (mntflags & MNT_FORCE)
1468 		flags |= FORCECLOSE;
1469 	susp = fs->fs_ronly == 0;
1470 #ifdef UFS_EXTATTR
1471 	if ((error = ufs_extattr_stop(mp, td))) {
1472 		if (error != EOPNOTSUPP)
1473 			printf("WARNING: unmount %s: ufs_extattr_stop "
1474 			    "returned errno %d\n", mp->mnt_stat.f_mntonname,
1475 			    error);
1476 		e_restart = 0;
1477 	} else {
1478 		ufs_extattr_uepm_destroy(&ump->um_extattr);
1479 		e_restart = 1;
1480 	}
1481 #endif
1482 	if (susp) {
1483 		error = vfs_write_suspend_umnt(mp);
1484 		if (error != 0)
1485 			goto fail1;
1486 	}
1487 	if (MOUNTEDSOFTDEP(mp))
1488 		error = softdep_flushfiles(mp, flags, td);
1489 	else
1490 		error = ffs_flushfiles(mp, flags, td);
1491 	if (error != 0 && !ffs_fsfail_cleanup(ump, error))
1492 		goto fail;
1493 
1494 	UFS_LOCK(ump);
1495 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1496 		printf("WARNING: unmount %s: pending error: blocks %jd "
1497 		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1498 		    fs->fs_pendinginodes);
1499 		fs->fs_pendingblocks = 0;
1500 		fs->fs_pendinginodes = 0;
1501 	}
1502 	UFS_UNLOCK(ump);
1503 	if (MOUNTEDSOFTDEP(mp))
1504 		softdep_unmount(mp);
1505 	if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) {
1506 		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1507 		error = ffs_sbupdate(ump, MNT_WAIT, 0);
1508 		if (ffs_fsfail_cleanup(ump, error))
1509 			error = 0;
1510 		if (error != 0 && !ffs_fsfail_cleanup(ump, error)) {
1511 			fs->fs_clean = 0;
1512 			goto fail;
1513 		}
1514 	}
1515 	if (susp)
1516 		vfs_write_resume(mp, VR_START_WRITE);
1517 	if (ump->um_trim_tq != NULL) {
1518 		while (ump->um_trim_inflight != 0)
1519 			pause("ufsutr", hz);
1520 		taskqueue_drain_all(ump->um_trim_tq);
1521 		taskqueue_free(ump->um_trim_tq);
1522 		free (ump->um_trimhash, M_TRIM);
1523 	}
1524 	g_topology_lock();
1525 	if (ump->um_fsckpid > 0) {
1526 		/*
1527 		 * Return to normal read-only mode.
1528 		 */
1529 		error = g_access(ump->um_cp, 0, -1, 0);
1530 		ump->um_fsckpid = 0;
1531 	}
1532 	g_vfs_close(ump->um_cp);
1533 	g_topology_unlock();
1534 	BO_LOCK(&ump->um_odevvp->v_bufobj);
1535 	ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1536 	BO_UNLOCK(&ump->um_odevvp->v_bufobj);
1537 	atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
1538 	mntfs_freevp(ump->um_devvp);
1539 	vrele(ump->um_odevvp);
1540 	dev_rel(ump->um_dev);
1541 	mtx_destroy(UFS_MTX(ump));
1542 	if (mp->mnt_gjprovider != NULL) {
1543 		free(mp->mnt_gjprovider, M_UFSMNT);
1544 		mp->mnt_gjprovider = NULL;
1545 	}
1546 	free(fs->fs_csp, M_UFSMNT);
1547 	free(fs->fs_si, M_UFSMNT);
1548 	free(fs, M_UFSMNT);
1549 	if (ump->um_fsfail_task != NULL)
1550 		free(ump->um_fsfail_task, M_UFSMNT);
1551 	free(ump, M_UFSMNT);
1552 	mp->mnt_data = NULL;
1553 	MNT_ILOCK(mp);
1554 	mp->mnt_flag &= ~MNT_LOCAL;
1555 	MNT_IUNLOCK(mp);
1556 	if (td->td_su == mp) {
1557 		td->td_su = NULL;
1558 		vfs_rel(mp);
1559 	}
1560 	return (error);
1561 
1562 fail:
1563 	if (susp)
1564 		vfs_write_resume(mp, VR_START_WRITE);
1565 fail1:
1566 #ifdef UFS_EXTATTR
1567 	if (e_restart) {
1568 		ufs_extattr_uepm_init(&ump->um_extattr);
1569 #ifdef UFS_EXTATTR_AUTOSTART
1570 		(void) ufs_extattr_autostart(mp, td);
1571 #endif
1572 	}
1573 #endif
1574 
1575 	return (error);
1576 }
1577 
1578 /*
1579  * Flush out all the files in a filesystem.
1580  */
1581 int
1582 ffs_flushfiles(mp, flags, td)
1583 	struct mount *mp;
1584 	int flags;
1585 	struct thread *td;
1586 {
1587 	struct ufsmount *ump;
1588 	int qerror, error;
1589 
1590 	ump = VFSTOUFS(mp);
1591 	qerror = 0;
1592 #ifdef QUOTA
1593 	if (mp->mnt_flag & MNT_QUOTA) {
1594 		int i;
1595 		error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1596 		if (error)
1597 			return (error);
1598 		for (i = 0; i < MAXQUOTAS; i++) {
1599 			error = quotaoff(td, mp, i);
1600 			if (error != 0) {
1601 				if ((flags & EARLYFLUSH) == 0)
1602 					return (error);
1603 				else
1604 					qerror = error;
1605 			}
1606 		}
1607 
1608 		/*
1609 		 * Here we fall through to vflush again to ensure that
1610 		 * we have gotten rid of all the system vnodes, unless
1611 		 * quotas must not be closed.
1612 		 */
1613 	}
1614 #endif
1615 	ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles");
1616 	if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1617 		if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1618 			return (error);
1619 		ffs_snapshot_unmount(mp);
1620 		flags |= FORCECLOSE;
1621 		/*
1622 		 * Here we fall through to vflush again to ensure
1623 		 * that we have gotten rid of all the system vnodes.
1624 		 */
1625 	}
1626 
1627 	/*
1628 	 * Do not close system files if quotas were not closed, to be
1629 	 * able to sync the remaining dquots.  The freeblks softupdate
1630 	 * workitems might hold a reference on a dquot, preventing
1631 	 * quotaoff() from completing.  Next round of
1632 	 * softdep_flushworklist() iteration should process the
1633 	 * blockers, allowing the next run of quotaoff() to finally
1634 	 * flush held dquots.
1635 	 *
1636 	 * Otherwise, flush all the files.
1637 	 */
1638 	if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1639 		return (error);
1640 
1641 	/*
1642 	 * Flush filesystem metadata.
1643 	 */
1644 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1645 	error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1646 	VOP_UNLOCK(ump->um_devvp);
1647 	return (error);
1648 }
1649 
1650 /*
1651  * Get filesystem statistics.
1652  */
1653 static int
1654 ffs_statfs(mp, sbp)
1655 	struct mount *mp;
1656 	struct statfs *sbp;
1657 {
1658 	struct ufsmount *ump;
1659 	struct fs *fs;
1660 
1661 	ump = VFSTOUFS(mp);
1662 	fs = ump->um_fs;
1663 	if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1664 		panic("ffs_statfs");
1665 	sbp->f_version = STATFS_VERSION;
1666 	sbp->f_bsize = fs->fs_fsize;
1667 	sbp->f_iosize = fs->fs_bsize;
1668 	sbp->f_blocks = fs->fs_dsize;
1669 	UFS_LOCK(ump);
1670 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1671 	    fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1672 	sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1673 	    dbtofsb(fs, fs->fs_pendingblocks);
1674 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1675 	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1676 	UFS_UNLOCK(ump);
1677 	sbp->f_namemax = UFS_MAXNAMLEN;
1678 	return (0);
1679 }
1680 
1681 static bool
1682 sync_doupdate(struct inode *ip)
1683 {
1684 
1685 	return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1686 	    IN_UPDATE)) != 0);
1687 }
1688 
1689 static int
1690 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused)
1691 {
1692 	struct inode *ip;
1693 
1694 	/*
1695 	 * Flags are safe to access because ->v_data invalidation
1696 	 * is held off by listmtx.
1697 	 */
1698 	if (vp->v_type == VNON)
1699 		return (false);
1700 	ip = VTOI(vp);
1701 	if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0)
1702 		return (false);
1703 	return (true);
1704 }
1705 
1706 /*
1707  * For a lazy sync, we only care about access times, quotas and the
1708  * superblock.  Other filesystem changes are already converted to
1709  * cylinder group blocks or inode blocks updates and are written to
1710  * disk by syncer.
1711  */
1712 static int
1713 ffs_sync_lazy(mp)
1714      struct mount *mp;
1715 {
1716 	struct vnode *mvp, *vp;
1717 	struct inode *ip;
1718 	struct thread *td;
1719 	int allerror, error;
1720 
1721 	allerror = 0;
1722 	td = curthread;
1723 	if ((mp->mnt_flag & MNT_NOATIME) != 0) {
1724 #ifdef QUOTA
1725 		qsync(mp);
1726 #endif
1727 		goto sbupdate;
1728 	}
1729 	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) {
1730 		if (vp->v_type == VNON) {
1731 			VI_UNLOCK(vp);
1732 			continue;
1733 		}
1734 		ip = VTOI(vp);
1735 
1736 		/*
1737 		 * The IN_ACCESS flag is converted to IN_MODIFIED by
1738 		 * ufs_close() and ufs_getattr() by the calls to
1739 		 * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1740 		 * Test also all the other timestamp flags too, to pick up
1741 		 * any other cases that could be missed.
1742 		 */
1743 		if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) {
1744 			VI_UNLOCK(vp);
1745 			continue;
1746 		}
1747 		if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
1748 		    td)) != 0)
1749 			continue;
1750 #ifdef QUOTA
1751 		qsyncvp(vp);
1752 #endif
1753 		if (sync_doupdate(ip))
1754 			error = ffs_update(vp, 0);
1755 		if (error != 0)
1756 			allerror = error;
1757 		vput(vp);
1758 	}
1759 sbupdate:
1760 	if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1761 	    (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1762 		allerror = error;
1763 	return (allerror);
1764 }
1765 
1766 /*
1767  * Go through the disk queues to initiate sandbagged IO;
1768  * go through the inodes to write those that have been modified;
1769  * initiate the writing of the super block if it has been modified.
1770  *
1771  * Note: we are always called with the filesystem marked busy using
1772  * vfs_busy().
1773  */
1774 static int
1775 ffs_sync(mp, waitfor)
1776 	struct mount *mp;
1777 	int waitfor;
1778 {
1779 	struct vnode *mvp, *vp, *devvp;
1780 	struct thread *td;
1781 	struct inode *ip;
1782 	struct ufsmount *ump = VFSTOUFS(mp);
1783 	struct fs *fs;
1784 	int error, count, lockreq, allerror = 0;
1785 	int suspend;
1786 	int suspended;
1787 	int secondary_writes;
1788 	int secondary_accwrites;
1789 	int softdep_deps;
1790 	int softdep_accdeps;
1791 	struct bufobj *bo;
1792 
1793 	suspend = 0;
1794 	suspended = 0;
1795 	td = curthread;
1796 	fs = ump->um_fs;
1797 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0)
1798 		panic("%s: ffs_sync: modification on read-only filesystem",
1799 		    fs->fs_fsmnt);
1800 	if (waitfor == MNT_LAZY) {
1801 		if (!rebooting)
1802 			return (ffs_sync_lazy(mp));
1803 		waitfor = MNT_NOWAIT;
1804 	}
1805 
1806 	/*
1807 	 * Write back each (modified) inode.
1808 	 */
1809 	lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1810 	if (waitfor == MNT_SUSPEND) {
1811 		suspend = 1;
1812 		waitfor = MNT_WAIT;
1813 	}
1814 	if (waitfor == MNT_WAIT)
1815 		lockreq = LK_EXCLUSIVE;
1816 	lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
1817 loop:
1818 	/* Grab snapshot of secondary write counts */
1819 	MNT_ILOCK(mp);
1820 	secondary_writes = mp->mnt_secondary_writes;
1821 	secondary_accwrites = mp->mnt_secondary_accwrites;
1822 	MNT_IUNLOCK(mp);
1823 
1824 	/* Grab snapshot of softdep dependency counts */
1825 	softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1826 
1827 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1828 		/*
1829 		 * Depend on the vnode interlock to keep things stable enough
1830 		 * for a quick test.  Since there might be hundreds of
1831 		 * thousands of vnodes, we cannot afford even a subroutine
1832 		 * call unless there's a good chance that we have work to do.
1833 		 */
1834 		if (vp->v_type == VNON) {
1835 			VI_UNLOCK(vp);
1836 			continue;
1837 		}
1838 		ip = VTOI(vp);
1839 		if ((ip->i_flag &
1840 		    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1841 		    vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1842 			VI_UNLOCK(vp);
1843 			continue;
1844 		}
1845 		if ((error = vget(vp, lockreq, td)) != 0) {
1846 			if (error == ENOENT || error == ENOLCK) {
1847 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1848 				goto loop;
1849 			}
1850 			continue;
1851 		}
1852 #ifdef QUOTA
1853 		qsyncvp(vp);
1854 #endif
1855 		if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0)
1856 			allerror = error;
1857 		vput(vp);
1858 	}
1859 	/*
1860 	 * Force stale filesystem control information to be flushed.
1861 	 */
1862 	if (waitfor == MNT_WAIT || rebooting) {
1863 		if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1864 			allerror = error;
1865 		if (ffs_fsfail_cleanup(ump, allerror))
1866 			allerror = 0;
1867 		/* Flushed work items may create new vnodes to clean */
1868 		if (allerror == 0 && count)
1869 			goto loop;
1870 	}
1871 
1872 	devvp = ump->um_devvp;
1873 	bo = &devvp->v_bufobj;
1874 	BO_LOCK(bo);
1875 	if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1876 		BO_UNLOCK(bo);
1877 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1878 		error = VOP_FSYNC(devvp, waitfor, td);
1879 		VOP_UNLOCK(devvp);
1880 		if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
1881 			error = ffs_sbupdate(ump, waitfor, 0);
1882 		if (error != 0)
1883 			allerror = error;
1884 		if (ffs_fsfail_cleanup(ump, allerror))
1885 			allerror = 0;
1886 		if (allerror == 0 && waitfor == MNT_WAIT)
1887 			goto loop;
1888 	} else if (suspend != 0) {
1889 		if (softdep_check_suspend(mp,
1890 					  devvp,
1891 					  softdep_deps,
1892 					  softdep_accdeps,
1893 					  secondary_writes,
1894 					  secondary_accwrites) != 0) {
1895 			MNT_IUNLOCK(mp);
1896 			goto loop;	/* More work needed */
1897 		}
1898 		mtx_assert(MNT_MTX(mp), MA_OWNED);
1899 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1900 		MNT_IUNLOCK(mp);
1901 		suspended = 1;
1902 	} else
1903 		BO_UNLOCK(bo);
1904 	/*
1905 	 * Write back modified superblock.
1906 	 */
1907 	if (fs->fs_fmod != 0 &&
1908 	    (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1909 		allerror = error;
1910 	if (ffs_fsfail_cleanup(ump, allerror))
1911 		allerror = 0;
1912 	return (allerror);
1913 }
1914 
1915 int
1916 ffs_vget(mp, ino, flags, vpp)
1917 	struct mount *mp;
1918 	ino_t ino;
1919 	int flags;
1920 	struct vnode **vpp;
1921 {
1922 	return (ffs_vgetf(mp, ino, flags, vpp, 0));
1923 }
1924 
1925 int
1926 ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
1927 	struct mount *mp;
1928 	ino_t ino;
1929 	int flags;
1930 	struct vnode **vpp;
1931 	int ffs_flags;
1932 {
1933 	struct fs *fs;
1934 	struct inode *ip;
1935 	struct ufsmount *ump;
1936 	struct buf *bp;
1937 	struct vnode *vp;
1938 	daddr_t dbn;
1939 	int error;
1940 
1941 	MPASS((ffs_flags & FFSV_REPLACE) == 0 || (flags & LK_EXCLUSIVE) != 0);
1942 
1943 	error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1944 	if (error != 0)
1945 		return (error);
1946 	if (*vpp != NULL) {
1947 		if ((ffs_flags & FFSV_REPLACE) == 0)
1948 			return (0);
1949 		vgone(*vpp);
1950 		vput(*vpp);
1951 	}
1952 
1953 	/*
1954 	 * We must promote to an exclusive lock for vnode creation.  This
1955 	 * can happen if lookup is passed LOCKSHARED.
1956 	 */
1957 	if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1958 		flags &= ~LK_TYPE_MASK;
1959 		flags |= LK_EXCLUSIVE;
1960 	}
1961 
1962 	/*
1963 	 * We do not lock vnode creation as it is believed to be too
1964 	 * expensive for such rare case as simultaneous creation of vnode
1965 	 * for same ino by different processes. We just allow them to race
1966 	 * and check later to decide who wins. Let the race begin!
1967 	 */
1968 
1969 	ump = VFSTOUFS(mp);
1970 	fs = ump->um_fs;
1971 	ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO);
1972 
1973 	/* Allocate a new vnode/inode. */
1974 	error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ?
1975 	    &ffs_vnodeops1 : &ffs_vnodeops2, &vp);
1976 	if (error) {
1977 		*vpp = NULL;
1978 		uma_zfree(uma_inode, ip);
1979 		return (error);
1980 	}
1981 	/*
1982 	 * FFS supports recursive locking.
1983 	 */
1984 	lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
1985 	VN_LOCK_AREC(vp);
1986 	vp->v_data = ip;
1987 	vp->v_bufobj.bo_bsize = fs->fs_bsize;
1988 	ip->i_vnode = vp;
1989 	ip->i_ump = ump;
1990 	ip->i_number = ino;
1991 	ip->i_ea_refs = 0;
1992 	ip->i_nextclustercg = -1;
1993 	ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2;
1994 	ip->i_mode = 0; /* ensure error cases below throw away vnode */
1995 #ifdef QUOTA
1996 	{
1997 		int i;
1998 		for (i = 0; i < MAXQUOTAS; i++)
1999 			ip->i_dquot[i] = NODQUOT;
2000 	}
2001 #endif
2002 
2003 	if (ffs_flags & FFSV_FORCEINSMQ)
2004 		vp->v_vflag |= VV_FORCEINSMQ;
2005 	error = insmntque(vp, mp);
2006 	if (error != 0) {
2007 		uma_zfree(uma_inode, ip);
2008 		*vpp = NULL;
2009 		return (error);
2010 	}
2011 	vp->v_vflag &= ~VV_FORCEINSMQ;
2012 	error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
2013 	if (error != 0)
2014 		return (error);
2015 	if (*vpp != NULL) {
2016 		/*
2017 		 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set)
2018 		 * operate on empty inode, which must not be found by
2019 		 * other threads until fully filled.  Vnode for empty
2020 		 * inode must be not re-inserted on the hash by other
2021 		 * thread, after removal by us at the beginning.
2022 		 */
2023 		MPASS((ffs_flags & FFSV_REPLACE) == 0);
2024 		return (0);
2025 	}
2026 
2027 	/* Read in the disk contents for the inode, copy into the inode. */
2028 	dbn = fsbtodb(fs, ino_to_fsba(fs, ino));
2029 	error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize,
2030 	    NULL, NULL, 0, NOCRED, 0, NULL, &bp);
2031 	if (error != 0) {
2032 		/*
2033 		 * The inode does not contain anything useful, so it would
2034 		 * be misleading to leave it on its hash chain. With mode
2035 		 * still zero, it will be unlinked and returned to the free
2036 		 * list by vput().
2037 		 */
2038 		vgone(vp);
2039 		vput(vp);
2040 		*vpp = NULL;
2041 		return (error);
2042 	}
2043 	if (I_IS_UFS1(ip))
2044 		ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
2045 	else
2046 		ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
2047 	if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) {
2048 		bqrelse(bp);
2049 		vgone(vp);
2050 		vput(vp);
2051 		*vpp = NULL;
2052 		return (error);
2053 	}
2054 	if (DOINGSOFTDEP(vp))
2055 		softdep_load_inodeblock(ip);
2056 	else
2057 		ip->i_effnlink = ip->i_nlink;
2058 	bqrelse(bp);
2059 
2060 	/*
2061 	 * Initialize the vnode from the inode, check for aliases.
2062 	 * Note that the underlying vnode may have changed.
2063 	 */
2064 	error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2,
2065 	    &vp);
2066 	if (error) {
2067 		vgone(vp);
2068 		vput(vp);
2069 		*vpp = NULL;
2070 		return (error);
2071 	}
2072 
2073 	/*
2074 	 * Finish inode initialization.
2075 	 */
2076 	if (vp->v_type != VFIFO) {
2077 		/* FFS supports shared locking for all files except fifos. */
2078 		VN_LOCK_ASHARE(vp);
2079 	}
2080 
2081 	/*
2082 	 * Set up a generation number for this inode if it does not
2083 	 * already have one. This should only happen on old filesystems.
2084 	 */
2085 	if (ip->i_gen == 0) {
2086 		while (ip->i_gen == 0)
2087 			ip->i_gen = arc4random();
2088 		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
2089 			UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
2090 			DIP_SET(ip, i_gen, ip->i_gen);
2091 		}
2092 	}
2093 #ifdef MAC
2094 	if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
2095 		/*
2096 		 * If this vnode is already allocated, and we're running
2097 		 * multi-label, attempt to perform a label association
2098 		 * from the extended attributes on the inode.
2099 		 */
2100 		error = mac_vnode_associate_extattr(mp, vp);
2101 		if (error) {
2102 			/* ufs_inactive will release ip->i_devvp ref. */
2103 			vgone(vp);
2104 			vput(vp);
2105 			*vpp = NULL;
2106 			return (error);
2107 		}
2108 	}
2109 #endif
2110 
2111 	*vpp = vp;
2112 	return (0);
2113 }
2114 
2115 /*
2116  * File handle to vnode
2117  *
2118  * Have to be really careful about stale file handles:
2119  * - check that the inode number is valid
2120  * - for UFS2 check that the inode number is initialized
2121  * - call ffs_vget() to get the locked inode
2122  * - check for an unallocated inode (i_mode == 0)
2123  * - check that the given client host has export rights and return
2124  *   those rights via. exflagsp and credanonp
2125  */
2126 static int
2127 ffs_fhtovp(mp, fhp, flags, vpp)
2128 	struct mount *mp;
2129 	struct fid *fhp;
2130 	int flags;
2131 	struct vnode **vpp;
2132 {
2133 	struct ufid *ufhp;
2134 	struct ufsmount *ump;
2135 	struct fs *fs;
2136 	struct cg *cgp;
2137 	struct buf *bp;
2138 	ino_t ino;
2139 	u_int cg;
2140 	int error;
2141 
2142 	ufhp = (struct ufid *)fhp;
2143 	ino = ufhp->ufid_ino;
2144 	ump = VFSTOUFS(mp);
2145 	fs = ump->um_fs;
2146 	if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg)
2147 		return (ESTALE);
2148 	/*
2149 	 * Need to check if inode is initialized because UFS2 does lazy
2150 	 * initialization and nfs_fhtovp can offer arbitrary inode numbers.
2151 	 */
2152 	if (fs->fs_magic != FS_UFS2_MAGIC)
2153 		return (ufs_fhtovp(mp, ufhp, flags, vpp));
2154 	cg = ino_to_cg(fs, ino);
2155 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0)
2156 		return (error);
2157 	if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) {
2158 		brelse(bp);
2159 		return (ESTALE);
2160 	}
2161 	brelse(bp);
2162 	return (ufs_fhtovp(mp, ufhp, flags, vpp));
2163 }
2164 
2165 /*
2166  * Initialize the filesystem.
2167  */
2168 static int
2169 ffs_init(vfsp)
2170 	struct vfsconf *vfsp;
2171 {
2172 
2173 	ffs_susp_initialize();
2174 	softdep_initialize();
2175 	return (ufs_init(vfsp));
2176 }
2177 
2178 /*
2179  * Undo the work of ffs_init().
2180  */
2181 static int
2182 ffs_uninit(vfsp)
2183 	struct vfsconf *vfsp;
2184 {
2185 	int ret;
2186 
2187 	ret = ufs_uninit(vfsp);
2188 	softdep_uninitialize();
2189 	ffs_susp_uninitialize();
2190 	taskqueue_drain_all(taskqueue_thread);
2191 	return (ret);
2192 }
2193 
2194 /*
2195  * Structure used to pass information from ffs_sbupdate to its
2196  * helper routine ffs_use_bwrite.
2197  */
2198 struct devfd {
2199 	struct ufsmount	*ump;
2200 	struct buf	*sbbp;
2201 	int		 waitfor;
2202 	int		 suspended;
2203 	int		 error;
2204 };
2205 
2206 /*
2207  * Write a superblock and associated information back to disk.
2208  */
2209 int
2210 ffs_sbupdate(ump, waitfor, suspended)
2211 	struct ufsmount *ump;
2212 	int waitfor;
2213 	int suspended;
2214 {
2215 	struct fs *fs;
2216 	struct buf *sbbp;
2217 	struct devfd devfd;
2218 
2219 	fs = ump->um_fs;
2220 	if (fs->fs_ronly == 1 &&
2221 	    (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
2222 	    (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0)
2223 		panic("ffs_sbupdate: write read-only filesystem");
2224 	/*
2225 	 * We use the superblock's buf to serialize calls to ffs_sbupdate().
2226 	 */
2227 	sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
2228 	    (int)fs->fs_sbsize, 0, 0, 0);
2229 	/*
2230 	 * Initialize info needed for write function.
2231 	 */
2232 	devfd.ump = ump;
2233 	devfd.sbbp = sbbp;
2234 	devfd.waitfor = waitfor;
2235 	devfd.suspended = suspended;
2236 	devfd.error = 0;
2237 	return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite));
2238 }
2239 
2240 /*
2241  * Write function for use by filesystem-layer routines.
2242  */
2243 static int
2244 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size)
2245 {
2246 	struct devfd *devfdp;
2247 	struct ufsmount *ump;
2248 	struct buf *bp;
2249 	struct fs *fs;
2250 	int error;
2251 
2252 	devfdp = devfd;
2253 	ump = devfdp->ump;
2254 	fs = ump->um_fs;
2255 	/*
2256 	 * Writing the superblock summary information.
2257 	 */
2258 	if (loc != fs->fs_sblockloc) {
2259 		bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0);
2260 		bcopy(buf, bp->b_data, (u_int)size);
2261 		if (devfdp->suspended)
2262 			bp->b_flags |= B_VALIDSUSPWRT;
2263 		if (devfdp->waitfor != MNT_WAIT)
2264 			bawrite(bp);
2265 		else if ((error = bwrite(bp)) != 0)
2266 			devfdp->error = error;
2267 		return (0);
2268 	}
2269 	/*
2270 	 * Writing the superblock itself. We need to do special checks for it.
2271 	 */
2272 	bp = devfdp->sbbp;
2273 	if (ffs_fsfail_cleanup(ump, devfdp->error))
2274 		devfdp->error = 0;
2275 	if (devfdp->error != 0) {
2276 		brelse(bp);
2277 		return (devfdp->error);
2278 	}
2279 	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
2280 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2281 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2282 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
2283 		fs->fs_sblockloc = SBLOCK_UFS1;
2284 	}
2285 	if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
2286 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2287 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2288 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
2289 		fs->fs_sblockloc = SBLOCK_UFS2;
2290 	}
2291 	if (MOUNTEDSOFTDEP(ump->um_mountp))
2292 		softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
2293 	bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
2294 	fs = (struct fs *)bp->b_data;
2295 	ffs_oldfscompat_write(fs, ump);
2296 	fs->fs_si = NULL;
2297 	/* Recalculate the superblock hash */
2298 	fs->fs_ckhash = ffs_calc_sbhash(fs);
2299 	if (devfdp->suspended)
2300 		bp->b_flags |= B_VALIDSUSPWRT;
2301 	if (devfdp->waitfor != MNT_WAIT)
2302 		bawrite(bp);
2303 	else if ((error = bwrite(bp)) != 0)
2304 		devfdp->error = error;
2305 	return (devfdp->error);
2306 }
2307 
2308 static int
2309 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
2310 	int attrnamespace, const char *attrname)
2311 {
2312 
2313 #ifdef UFS_EXTATTR
2314 	return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
2315 	    attrname));
2316 #else
2317 	return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
2318 	    attrname));
2319 #endif
2320 }
2321 
2322 static void
2323 ffs_ifree(struct ufsmount *ump, struct inode *ip)
2324 {
2325 
2326 	if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
2327 		uma_zfree(uma_ufs1, ip->i_din1);
2328 	else if (ip->i_din2 != NULL)
2329 		uma_zfree(uma_ufs2, ip->i_din2);
2330 	uma_zfree(uma_inode, ip);
2331 }
2332 
2333 static int dobkgrdwrite = 1;
2334 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
2335     "Do background writes (honoring the BV_BKGRDWRITE flag)?");
2336 
2337 /*
2338  * Complete a background write started from bwrite.
2339  */
2340 static void
2341 ffs_backgroundwritedone(struct buf *bp)
2342 {
2343 	struct bufobj *bufobj;
2344 	struct buf *origbp;
2345 
2346 #ifdef SOFTUPDATES
2347 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0)
2348 		softdep_handle_error(bp);
2349 #endif
2350 
2351 	/*
2352 	 * Find the original buffer that we are writing.
2353 	 */
2354 	bufobj = bp->b_bufobj;
2355 	BO_LOCK(bufobj);
2356 	if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
2357 		panic("backgroundwritedone: lost buffer");
2358 
2359 	/*
2360 	 * We should mark the cylinder group buffer origbp as
2361 	 * dirty, to not lose the failed write.
2362 	 */
2363 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2364 		origbp->b_vflags |= BV_BKGRDERR;
2365 	BO_UNLOCK(bufobj);
2366 	/*
2367 	 * Process dependencies then return any unfinished ones.
2368 	 */
2369 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0)
2370 		buf_complete(bp);
2371 #ifdef SOFTUPDATES
2372 	if (!LIST_EMPTY(&bp->b_dep))
2373 		softdep_move_dependencies(bp, origbp);
2374 #endif
2375 	/*
2376 	 * This buffer is marked B_NOCACHE so when it is released
2377 	 * by biodone it will be tossed.
2378 	 */
2379 	bp->b_flags |= B_NOCACHE;
2380 	bp->b_flags &= ~B_CACHE;
2381 	pbrelvp(bp);
2382 
2383 	/*
2384 	 * Prevent brelse() from trying to keep and re-dirtying bp on
2385 	 * errors. It causes b_bufobj dereference in
2386 	 * bdirty()/reassignbuf(), and b_bufobj was cleared in
2387 	 * pbrelvp() above.
2388 	 */
2389 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2390 		bp->b_flags |= B_INVAL;
2391 	bufdone(bp);
2392 	BO_LOCK(bufobj);
2393 	/*
2394 	 * Clear the BV_BKGRDINPROG flag in the original buffer
2395 	 * and awaken it if it is waiting for the write to complete.
2396 	 * If BV_BKGRDINPROG is not set in the original buffer it must
2397 	 * have been released and re-instantiated - which is not legal.
2398 	 */
2399 	KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
2400 	    ("backgroundwritedone: lost buffer2"));
2401 	origbp->b_vflags &= ~BV_BKGRDINPROG;
2402 	if (origbp->b_vflags & BV_BKGRDWAIT) {
2403 		origbp->b_vflags &= ~BV_BKGRDWAIT;
2404 		wakeup(&origbp->b_xflags);
2405 	}
2406 	BO_UNLOCK(bufobj);
2407 }
2408 
2409 
2410 /*
2411  * Write, release buffer on completion.  (Done by iodone
2412  * if async).  Do not bother writing anything if the buffer
2413  * is invalid.
2414  *
2415  * Note that we set B_CACHE here, indicating that buffer is
2416  * fully valid and thus cacheable.  This is true even of NFS
2417  * now so we set it generally.  This could be set either here
2418  * or in biodone() since the I/O is synchronous.  We put it
2419  * here.
2420  */
2421 static int
2422 ffs_bufwrite(struct buf *bp)
2423 {
2424 	struct buf *newbp;
2425 	struct cg *cgp;
2426 
2427 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2428 	if (bp->b_flags & B_INVAL) {
2429 		brelse(bp);
2430 		return (0);
2431 	}
2432 
2433 	if (!BUF_ISLOCKED(bp))
2434 		panic("bufwrite: buffer is not busy???");
2435 	/*
2436 	 * If a background write is already in progress, delay
2437 	 * writing this block if it is asynchronous. Otherwise
2438 	 * wait for the background write to complete.
2439 	 */
2440 	BO_LOCK(bp->b_bufobj);
2441 	if (bp->b_vflags & BV_BKGRDINPROG) {
2442 		if (bp->b_flags & B_ASYNC) {
2443 			BO_UNLOCK(bp->b_bufobj);
2444 			bdwrite(bp);
2445 			return (0);
2446 		}
2447 		bp->b_vflags |= BV_BKGRDWAIT;
2448 		msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2449 		    "bwrbg", 0);
2450 		if (bp->b_vflags & BV_BKGRDINPROG)
2451 			panic("bufwrite: still writing");
2452 	}
2453 	bp->b_vflags &= ~BV_BKGRDERR;
2454 	BO_UNLOCK(bp->b_bufobj);
2455 
2456 	/*
2457 	 * If this buffer is marked for background writing and we
2458 	 * do not have to wait for it, make a copy and write the
2459 	 * copy so as to leave this buffer ready for further use.
2460 	 *
2461 	 * This optimization eats a lot of memory.  If we have a page
2462 	 * or buffer shortfall we can't do it.
2463 	 */
2464 	if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2465 	    (bp->b_flags & B_ASYNC) &&
2466 	    !vm_page_count_severe() &&
2467 	    !buf_dirty_count_severe()) {
2468 		KASSERT(bp->b_iodone == NULL,
2469 		    ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2470 
2471 		/* get a new block */
2472 		newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2473 		if (newbp == NULL)
2474 			goto normal_write;
2475 
2476 		KASSERT(buf_mapped(bp), ("Unmapped cg"));
2477 		memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2478 		BO_LOCK(bp->b_bufobj);
2479 		bp->b_vflags |= BV_BKGRDINPROG;
2480 		BO_UNLOCK(bp->b_bufobj);
2481 		newbp->b_xflags |=
2482 		    (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER;
2483 		newbp->b_lblkno = bp->b_lblkno;
2484 		newbp->b_blkno = bp->b_blkno;
2485 		newbp->b_offset = bp->b_offset;
2486 		newbp->b_iodone = ffs_backgroundwritedone;
2487 		newbp->b_flags |= B_ASYNC;
2488 		newbp->b_flags &= ~B_INVAL;
2489 		pbgetvp(bp->b_vp, newbp);
2490 
2491 #ifdef SOFTUPDATES
2492 		/*
2493 		 * Move over the dependencies.  If there are rollbacks,
2494 		 * leave the parent buffer dirtied as it will need to
2495 		 * be written again.
2496 		 */
2497 		if (LIST_EMPTY(&bp->b_dep) ||
2498 		    softdep_move_dependencies(bp, newbp) == 0)
2499 			bundirty(bp);
2500 #else
2501 		bundirty(bp);
2502 #endif
2503 
2504 		/*
2505 		 * Initiate write on the copy, release the original.  The
2506 		 * BKGRDINPROG flag prevents it from going away until
2507 		 * the background write completes. We have to recalculate
2508 		 * its check hash in case the buffer gets freed and then
2509 		 * reconstituted from the buffer cache during a later read.
2510 		 */
2511 		if ((bp->b_xflags & BX_CYLGRP) != 0) {
2512 			cgp = (struct cg *)bp->b_data;
2513 			cgp->cg_ckhash = 0;
2514 			cgp->cg_ckhash =
2515 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2516 		}
2517 		bqrelse(bp);
2518 		bp = newbp;
2519 	} else
2520 		/* Mark the buffer clean */
2521 		bundirty(bp);
2522 
2523 
2524 	/* Let the normal bufwrite do the rest for us */
2525 normal_write:
2526 	/*
2527 	 * If we are writing a cylinder group, update its time.
2528 	 */
2529 	if ((bp->b_xflags & BX_CYLGRP) != 0) {
2530 		cgp = (struct cg *)bp->b_data;
2531 		cgp->cg_old_time = cgp->cg_time = time_second;
2532 	}
2533 	return (bufwrite(bp));
2534 }
2535 
2536 
2537 static void
2538 ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2539 {
2540 	struct vnode *vp;
2541 	struct buf *tbp;
2542 	int error, nocopy;
2543 
2544 	/*
2545 	 * This is the bufobj strategy for the private VCHR vnodes
2546 	 * used by FFS to access the underlying storage device.
2547 	 * We override the default bufobj strategy and thus bypass
2548 	 * VOP_STRATEGY() for these vnodes.
2549 	 */
2550 	vp = bo2vnode(bo);
2551 	KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR ||
2552 	    bp->b_vp->v_rdev == NULL ||
2553 	    bp->b_vp->v_rdev->si_mountpt == NULL ||
2554 	    VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL ||
2555 	    vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp,
2556 	    ("ffs_geom_strategy() with wrong vp"));
2557 	if (bp->b_iocmd == BIO_WRITE) {
2558 		if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2559 		    bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2560 		    (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2561 			panic("ffs_geom_strategy: bad I/O");
2562 		nocopy = bp->b_flags & B_NOCOPY;
2563 		bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2564 		if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2565 		    vp->v_rdev->si_snapdata != NULL) {
2566 			if ((bp->b_flags & B_CLUSTER) != 0) {
2567 				runningbufwakeup(bp);
2568 				TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2569 					      b_cluster.cluster_entry) {
2570 					error = ffs_copyonwrite(vp, tbp);
2571 					if (error != 0 &&
2572 					    error != EOPNOTSUPP) {
2573 						bp->b_error = error;
2574 						bp->b_ioflags |= BIO_ERROR;
2575 						bufdone(bp);
2576 						return;
2577 					}
2578 				}
2579 				bp->b_runningbufspace = bp->b_bufsize;
2580 				atomic_add_long(&runningbufspace,
2581 					       bp->b_runningbufspace);
2582 			} else {
2583 				error = ffs_copyonwrite(vp, bp);
2584 				if (error != 0 && error != EOPNOTSUPP) {
2585 					bp->b_error = error;
2586 					bp->b_ioflags |= BIO_ERROR;
2587 					bufdone(bp);
2588 					return;
2589 				}
2590 			}
2591 		}
2592 #ifdef SOFTUPDATES
2593 		if ((bp->b_flags & B_CLUSTER) != 0) {
2594 			TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2595 				      b_cluster.cluster_entry) {
2596 				if (!LIST_EMPTY(&tbp->b_dep))
2597 					buf_start(tbp);
2598 			}
2599 		} else {
2600 			if (!LIST_EMPTY(&bp->b_dep))
2601 				buf_start(bp);
2602 		}
2603 
2604 #endif
2605 		/*
2606 		 * Check for metadata that needs check-hashes and update them.
2607 		 */
2608 		switch (bp->b_xflags & BX_FSPRIV) {
2609 		case BX_CYLGRP:
2610 			((struct cg *)bp->b_data)->cg_ckhash = 0;
2611 			((struct cg *)bp->b_data)->cg_ckhash =
2612 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2613 			break;
2614 
2615 		case BX_SUPERBLOCK:
2616 		case BX_INODE:
2617 		case BX_INDIR:
2618 		case BX_DIR:
2619 			printf("Check-hash write is unimplemented!!!\n");
2620 			break;
2621 
2622 		case 0:
2623 			break;
2624 
2625 		default:
2626 			printf("multiple buffer types 0x%b\n",
2627 			    (u_int)(bp->b_xflags & BX_FSPRIV),
2628 			    PRINT_UFS_BUF_XFLAGS);
2629 			break;
2630 		}
2631 	}
2632 	if (bp->b_iocmd != BIO_READ && ffs_enxio_enable)
2633 		bp->b_xflags |= BX_CVTENXIO;
2634 	g_vfs_strategy(bo, bp);
2635 }
2636 
2637 int
2638 ffs_own_mount(const struct mount *mp)
2639 {
2640 
2641 	if (mp->mnt_op == &ufs_vfsops)
2642 		return (1);
2643 	return (0);
2644 }
2645 
2646 #ifdef	DDB
2647 #ifdef SOFTUPDATES
2648 
2649 /* defined in ffs_softdep.c */
2650 extern void db_print_ffs(struct ufsmount *ump);
2651 
2652 DB_SHOW_COMMAND(ffs, db_show_ffs)
2653 {
2654 	struct mount *mp;
2655 	struct ufsmount *ump;
2656 
2657 	if (have_addr) {
2658 		ump = VFSTOUFS((struct mount *)addr);
2659 		db_print_ffs(ump);
2660 		return;
2661 	}
2662 
2663 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2664 		if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2665 			db_print_ffs(VFSTOUFS(mp));
2666 	}
2667 }
2668 
2669 #endif	/* SOFTUPDATES */
2670 #endif	/* DDB */
2671