xref: /freebsd/sys/ufs/ffs/ffs_vfsops.c (revision 1111a44301da39d7b7459c784230e1405e8980f8)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1991, 1993, 1994
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 #include "opt_quota.h"
34 #include "opt_ufs.h"
35 #include "opt_ffs.h"
36 #include "opt_ddb.h"
37 
38 #include <sys/param.h>
39 #include <sys/gsb_crc32.h>
40 #include <sys/systm.h>
41 #include <sys/namei.h>
42 #include <sys/priv.h>
43 #include <sys/proc.h>
44 #include <sys/taskqueue.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/fcntl.h>
53 #include <sys/ioccom.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/rwlock.h>
57 #include <sys/sysctl.h>
58 #include <sys/vmmeter.h>
59 
60 #include <security/mac/mac_framework.h>
61 
62 #include <ufs/ufs/dir.h>
63 #include <ufs/ufs/extattr.h>
64 #include <ufs/ufs/gjournal.h>
65 #include <ufs/ufs/quota.h>
66 #include <ufs/ufs/ufsmount.h>
67 #include <ufs/ufs/inode.h>
68 #include <ufs/ufs/ufs_extern.h>
69 
70 #include <ufs/ffs/fs.h>
71 #include <ufs/ffs/ffs_extern.h>
72 
73 #include <vm/vm.h>
74 #include <vm/uma.h>
75 #include <vm/vm_page.h>
76 
77 #include <geom/geom.h>
78 #include <geom/geom_vfs.h>
79 
80 #include <ddb/ddb.h>
81 
82 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
83 VFS_SMR_DECLARE;
84 
85 static int	ffs_mountfs(struct vnode *, struct mount *, struct thread *);
86 static void	ffs_ifree(struct ufsmount *ump, struct inode *ip);
87 static int	ffs_sync_lazy(struct mount *mp);
88 static int	ffs_use_bread(void *devfd, off_t loc, void **bufp, int size);
89 static int	ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size);
90 
91 static vfs_init_t ffs_init;
92 static vfs_uninit_t ffs_uninit;
93 static vfs_extattrctl_t ffs_extattrctl;
94 static vfs_cmount_t ffs_cmount;
95 static vfs_unmount_t ffs_unmount;
96 static vfs_mount_t ffs_mount;
97 static vfs_statfs_t ffs_statfs;
98 static vfs_fhtovp_t ffs_fhtovp;
99 static vfs_sync_t ffs_sync;
100 
101 static struct vfsops ufs_vfsops = {
102 	.vfs_extattrctl =	ffs_extattrctl,
103 	.vfs_fhtovp =		ffs_fhtovp,
104 	.vfs_init =		ffs_init,
105 	.vfs_mount =		ffs_mount,
106 	.vfs_cmount =		ffs_cmount,
107 	.vfs_quotactl =		ufs_quotactl,
108 	.vfs_root =		vfs_cache_root,
109 	.vfs_cachedroot =	ufs_root,
110 	.vfs_statfs =		ffs_statfs,
111 	.vfs_sync =		ffs_sync,
112 	.vfs_uninit =		ffs_uninit,
113 	.vfs_unmount =		ffs_unmount,
114 	.vfs_vget =		ffs_vget,
115 	.vfs_susp_clean =	process_deferred_inactive,
116 };
117 
118 VFS_SET(ufs_vfsops, ufs, VFCF_FILEREVINC);
119 MODULE_VERSION(ufs, 1);
120 
121 static b_strategy_t ffs_geom_strategy;
122 static b_write_t ffs_bufwrite;
123 
124 static struct buf_ops ffs_ops = {
125 	.bop_name =	"FFS",
126 	.bop_write =	ffs_bufwrite,
127 	.bop_strategy =	ffs_geom_strategy,
128 	.bop_sync =	bufsync,
129 #ifdef NO_FFS_SNAPSHOT
130 	.bop_bdflush =	bufbdflush,
131 #else
132 	.bop_bdflush =	ffs_bdflush,
133 #endif
134 };
135 
136 /*
137  * Note that userquota and groupquota options are not currently used
138  * by UFS/FFS code and generally mount(8) does not pass those options
139  * from userland, but they can be passed by loader(8) via
140  * vfs.root.mountfrom.options.
141  */
142 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
143     "noclusterw", "noexec", "export", "force", "from", "groupquota",
144     "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir",
145     "nosymfollow", "sync", "union", "userquota", "untrusted", NULL };
146 
147 static int ffs_enxio_enable = 1;
148 SYSCTL_DECL(_vfs_ffs);
149 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN,
150     &ffs_enxio_enable, 0,
151     "enable mapping of other disk I/O errors to ENXIO");
152 
153 /*
154  * Return buffer with the contents of block "offset" from the beginning of
155  * directory "ip".  If "res" is non-zero, fill it in with a pointer to the
156  * remaining space in the directory.
157  */
158 static int
ffs_blkatoff(struct vnode * vp,off_t offset,char ** res,struct buf ** bpp)159 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp)
160 {
161 	struct inode *ip;
162 	struct fs *fs;
163 	struct buf *bp;
164 	ufs_lbn_t lbn;
165 	int bsize, error;
166 
167 	ip = VTOI(vp);
168 	fs = ITOFS(ip);
169 	lbn = lblkno(fs, offset);
170 	bsize = blksize(fs, ip, lbn);
171 
172 	*bpp = NULL;
173 	error = bread(vp, lbn, bsize, NOCRED, &bp);
174 	if (error) {
175 		return (error);
176 	}
177 	if (res)
178 		*res = (char *)bp->b_data + blkoff(fs, offset);
179 	*bpp = bp;
180 	return (0);
181 }
182 
183 /*
184  * Load up the contents of an inode and copy the appropriate pieces
185  * to the incore copy.
186  */
187 static int
ffs_load_inode(struct buf * bp,struct inode * ip,struct fs * fs,ino_t ino)188 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino)
189 {
190 	struct ufs1_dinode *dip1;
191 	struct ufs2_dinode *dip2;
192 	int error;
193 
194 	if (I_IS_UFS1(ip)) {
195 		dip1 = ip->i_din1;
196 		*dip1 =
197 		    *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
198 		ip->i_mode = dip1->di_mode;
199 		ip->i_nlink = dip1->di_nlink;
200 		ip->i_effnlink = dip1->di_nlink;
201 		ip->i_size = dip1->di_size;
202 		ip->i_flags = dip1->di_flags;
203 		ip->i_gen = dip1->di_gen;
204 		ip->i_uid = dip1->di_uid;
205 		ip->i_gid = dip1->di_gid;
206 		if (ffs_oldfscompat_inode_read(fs, ip->i_dp, time_second) &&
207 		    fs->fs_ronly == 0)
208 			UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
209 		return (0);
210 	}
211 	dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
212 	if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 &&
213 	    !ffs_fsfail_cleanup(ITOUMP(ip), error)) {
214 		printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt,
215 		    (intmax_t)ino);
216 		return (error);
217 	}
218 	*ip->i_din2 = *dip2;
219 	dip2 = ip->i_din2;
220 	ip->i_mode = dip2->di_mode;
221 	ip->i_nlink = dip2->di_nlink;
222 	ip->i_effnlink = dip2->di_nlink;
223 	ip->i_size = dip2->di_size;
224 	ip->i_flags = dip2->di_flags;
225 	ip->i_gen = dip2->di_gen;
226 	ip->i_uid = dip2->di_uid;
227 	ip->i_gid = dip2->di_gid;
228 	if (ffs_oldfscompat_inode_read(fs, ip->i_dp, time_second) &&
229 	    fs->fs_ronly == 0)
230 		UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
231 	return (0);
232 }
233 
234 /*
235  * Verify that a filesystem block number is a valid data block.
236  * This routine is only called on untrusted filesystems.
237  */
238 static int
ffs_check_blkno(struct mount * mp,ino_t inum,ufs2_daddr_t daddr,int blksize)239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize)
240 {
241 	struct fs *fs;
242 	struct ufsmount *ump;
243 	ufs2_daddr_t end_daddr;
244 	int cg, havemtx;
245 
246 	KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0,
247 	    ("ffs_check_blkno called on a trusted file system"));
248 	ump = VFSTOUFS(mp);
249 	fs = ump->um_fs;
250 	cg = dtog(fs, daddr);
251 	end_daddr = daddr + numfrags(fs, blksize);
252 	/*
253 	 * Verify that the block number is a valid data block. Also check
254 	 * that it does not point to an inode block or a superblock. Accept
255 	 * blocks that are unalloacted (0) or part of snapshot metadata
256 	 * (BLK_NOCOPY or BLK_SNAP).
257 	 *
258 	 * Thus, the block must be in a valid range for the filesystem and
259 	 * either in the space before a backup superblock (except the first
260 	 * cylinder group where that space is used by the bootstrap code) or
261 	 * after the inode blocks and before the end of the cylinder group.
262 	 */
263 	if ((uint64_t)daddr <= BLK_SNAP ||
264 	    ((uint64_t)end_daddr <= fs->fs_size &&
265 	    ((cg > 0 && end_daddr <= cgsblock(fs, cg)) ||
266 	    (daddr >= cgdmin(fs, cg) &&
267 	    end_daddr <= cgbase(fs, cg) + fs->fs_fpg))))
268 		return (0);
269 	if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0)
270 		UFS_LOCK(ump);
271 	if (ppsratecheck(&ump->um_last_integritymsg,
272 	    &ump->um_secs_integritymsg, 1)) {
273 		UFS_UNLOCK(ump);
274 		uprintf("\n%s: inode %jd, out-of-range indirect block "
275 		    "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr);
276 		if (havemtx)
277 			UFS_LOCK(ump);
278 	} else if (!havemtx)
279 		UFS_UNLOCK(ump);
280 	return (EINTEGRITY);
281 }
282 
283 /*
284  * On first ENXIO error, initiate an asynchronous forcible unmount.
285  * Used to unmount filesystems whose underlying media has gone away.
286  *
287  * Return true if a cleanup is in progress.
288  */
289 int
ffs_fsfail_cleanup(struct ufsmount * ump,int error)290 ffs_fsfail_cleanup(struct ufsmount *ump, int error)
291 {
292 	int retval;
293 
294 	UFS_LOCK(ump);
295 	retval = ffs_fsfail_cleanup_locked(ump, error);
296 	UFS_UNLOCK(ump);
297 	return (retval);
298 }
299 
300 int
ffs_fsfail_cleanup_locked(struct ufsmount * ump,int error)301 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error)
302 {
303 	mtx_assert(UFS_MTX(ump), MA_OWNED);
304 	if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) {
305 		ump->um_flags |= UM_FSFAIL_CLEANUP;
306 		if (ump->um_mountp == rootvnode->v_mount)
307 			panic("UFS: root fs would be forcibly unmounted");
308 
309 		/*
310 		 * Queue an async forced unmount.
311 		 */
312 		vfs_ref(ump->um_mountp);
313 		dounmount(ump->um_mountp,
314 		    MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread);
315 		printf("UFS: forcibly unmounting %s from %s\n",
316 		    ump->um_mountp->mnt_stat.f_mntfromname,
317 		    ump->um_mountp->mnt_stat.f_mntonname);
318 	}
319 	return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0);
320 }
321 
322 /*
323  * Wrapper used during ENXIO cleanup to allocate empty buffers when
324  * the kernel is unable to read the real one. They are needed so that
325  * the soft updates code can use them to unwind its dependencies.
326  */
327 int
ffs_breadz(struct ufsmount * ump,struct vnode * vp,daddr_t lblkno,daddr_t dblkno,int size,daddr_t * rablkno,int * rabsize,int cnt,struct ucred * cred,int flags,void (* ckhashfunc)(struct buf *),struct buf ** bpp)328 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno,
329     daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt,
330     struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *),
331     struct buf **bpp)
332 {
333 	int error;
334 
335 	flags |= GB_CVTENXIO;
336 	error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt,
337 	    cred, flags, ckhashfunc, bpp);
338 	if (error != 0 && ffs_fsfail_cleanup(ump, error)) {
339 		error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp);
340 		KASSERT(error == 0, ("getblkx failed"));
341 		vfs_bio_bzero_buf(*bpp, 0, size);
342 	}
343 	return (error);
344 }
345 
346 static int
ffs_mount(struct mount * mp)347 ffs_mount(struct mount *mp)
348 {
349 	struct vnode *devvp, *odevvp;
350 	struct thread *td;
351 	struct ufsmount *ump = NULL;
352 	struct fs *fs;
353 	int error, flags;
354 	int error1 __diagused;
355 	uint64_t mntorflags, saved_mnt_flag;
356 	accmode_t accmode;
357 	struct nameidata ndp;
358 	char *fspec;
359 	bool mounted_softdep;
360 
361 	td = curthread;
362 	if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
363 		return (EINVAL);
364 	if (uma_inode == NULL) {
365 		uma_inode = uma_zcreate("FFS inode",
366 		    sizeof(struct inode), NULL, NULL, NULL, NULL,
367 		    UMA_ALIGN_PTR, 0);
368 		uma_ufs1 = uma_zcreate("FFS1 dinode",
369 		    sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
370 		    UMA_ALIGN_PTR, 0);
371 		uma_ufs2 = uma_zcreate("FFS2 dinode",
372 		    sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
373 		    UMA_ALIGN_PTR, 0);
374 		VFS_SMR_ZONE_SET(uma_inode);
375 	}
376 
377 	vfs_deleteopt(mp->mnt_optnew, "groupquota");
378 	vfs_deleteopt(mp->mnt_optnew, "userquota");
379 
380 	fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
381 	if (error)
382 		return (error);
383 
384 	mntorflags = 0;
385 	if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0)
386 		mntorflags |= MNT_UNTRUSTED;
387 
388 	if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
389 		mntorflags |= MNT_ACLS;
390 
391 	if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
392 		mntorflags |= MNT_SNAPSHOT;
393 		/*
394 		 * Once we have set the MNT_SNAPSHOT flag, do not
395 		 * persist "snapshot" in the options list.
396 		 */
397 		vfs_deleteopt(mp->mnt_optnew, "snapshot");
398 		vfs_deleteopt(mp->mnt_opt, "snapshot");
399 	}
400 
401 	if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
402 		if (mntorflags & MNT_ACLS) {
403 			vfs_mount_error(mp,
404 			    "\"acls\" and \"nfsv4acls\" options "
405 			    "are mutually exclusive");
406 			return (EINVAL);
407 		}
408 		mntorflags |= MNT_NFS4ACLS;
409 	}
410 
411 	MNT_ILOCK(mp);
412 	mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
413 	mp->mnt_flag |= mntorflags;
414 	MNT_IUNLOCK(mp);
415 
416 	/*
417 	 * If this is a snapshot request, take the snapshot.
418 	 */
419 	if (mp->mnt_flag & MNT_SNAPSHOT) {
420 		if ((mp->mnt_flag & MNT_UPDATE) == 0)
421 			return (EINVAL);
422 		return (ffs_snapshot(mp, fspec));
423 	}
424 
425 	/*
426 	 * Must not call namei() while owning busy ref.
427 	 */
428 	if (mp->mnt_flag & MNT_UPDATE)
429 		vfs_unbusy(mp);
430 
431 	/*
432 	 * Not an update, or updating the name: look up the name
433 	 * and verify that it refers to a sensible disk device.
434 	 */
435 	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec);
436 	error = namei(&ndp);
437 	if ((mp->mnt_flag & MNT_UPDATE) != 0) {
438 		/*
439 		 * Unmount does not start if MNT_UPDATE is set.  Mount
440 		 * update busies mp before setting MNT_UPDATE.  We
441 		 * must be able to retain our busy ref successfully,
442 		 * without sleep.
443 		 */
444 		error1 = vfs_busy(mp, MBF_NOWAIT);
445 		MPASS(error1 == 0);
446 	}
447 	if (error != 0)
448 		return (error);
449 	NDFREE_PNBUF(&ndp);
450 	if (!vn_isdisk_error(ndp.ni_vp, &error)) {
451 		vput(ndp.ni_vp);
452 		return (error);
453 	}
454 
455 	/*
456 	 * If mount by non-root, then verify that user has necessary
457 	 * permissions on the device.
458 	 */
459 	accmode = VREAD;
460 	if ((mp->mnt_flag & MNT_RDONLY) == 0)
461 		accmode |= VWRITE;
462 	error = VOP_ACCESS(ndp.ni_vp, accmode, td->td_ucred, td);
463 	if (error)
464 		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
465 	if (error) {
466 		vput(ndp.ni_vp);
467 		return (error);
468 	}
469 
470 	/*
471 	 * New mount
472 	 *
473 	 * We need the name for the mount point (also used for
474 	 * "last mounted on") copied in. If an error occurs,
475 	 * the mount point is discarded by the upper level code.
476 	 * Note that vfs_mount_alloc() populates f_mntonname for us.
477 	 */
478 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
479 		if ((error = ffs_mountfs(ndp.ni_vp, mp, td)) != 0) {
480 			vrele(ndp.ni_vp);
481 			return (error);
482 		}
483 	} else {
484 		/*
485 		 * When updating, check whether changing from read-only to
486 		 * read/write; if there is no device name, that's all we do.
487 		 */
488 		ump = VFSTOUFS(mp);
489 		fs = ump->um_fs;
490 		odevvp = ump->um_odevvp;
491 		devvp = ump->um_devvp;
492 
493 		/*
494 		 * If it's not the same vnode, or at least the same device
495 		 * then it's not correct.
496 		 */
497 		if (ndp.ni_vp->v_rdev != ump->um_odevvp->v_rdev)
498 			error = EINVAL; /* needs translation */
499 		vput(ndp.ni_vp);
500 		if (error)
501 			return (error);
502 		if (fs->fs_ronly == 0 &&
503 		    vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
504 			/*
505 			 * Flush any dirty data and suspend filesystem.
506 			 */
507 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
508 				return (error);
509 			error = vfs_write_suspend_umnt(mp);
510 			if (error != 0)
511 				return (error);
512 
513 			fs->fs_ronly = 1;
514 			if (MOUNTEDSOFTDEP(mp)) {
515 				MNT_ILOCK(mp);
516 				mp->mnt_flag &= ~MNT_SOFTDEP;
517 				MNT_IUNLOCK(mp);
518 				mounted_softdep = true;
519 			} else
520 				mounted_softdep = false;
521 
522 			/*
523 			 * Check for and optionally get rid of files open
524 			 * for writing.
525 			 */
526 			flags = WRITECLOSE;
527 			if (mp->mnt_flag & MNT_FORCE)
528 				flags |= FORCECLOSE;
529 			if (mounted_softdep) {
530 				error = softdep_flushfiles(mp, flags, td);
531 			} else {
532 				error = ffs_flushfiles(mp, flags, td);
533 			}
534 			if (error) {
535 				fs->fs_ronly = 0;
536 				if (mounted_softdep) {
537 					MNT_ILOCK(mp);
538 					mp->mnt_flag |= MNT_SOFTDEP;
539 					MNT_IUNLOCK(mp);
540 				}
541 				vfs_write_resume(mp, 0);
542 				return (error);
543 			}
544 
545 			if (fs->fs_pendingblocks != 0 ||
546 			    fs->fs_pendinginodes != 0) {
547 				printf("WARNING: %s Update error: blocks %jd "
548 				    "files %d\n", fs->fs_fsmnt,
549 				    (intmax_t)fs->fs_pendingblocks,
550 				    fs->fs_pendinginodes);
551 				fs->fs_pendingblocks = 0;
552 				fs->fs_pendinginodes = 0;
553 			}
554 			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
555 				fs->fs_clean = 1;
556 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
557 				fs->fs_ronly = 0;
558 				fs->fs_clean = 0;
559 				if (mounted_softdep) {
560 					MNT_ILOCK(mp);
561 					mp->mnt_flag |= MNT_SOFTDEP;
562 					MNT_IUNLOCK(mp);
563 				}
564 				vfs_write_resume(mp, 0);
565 				return (error);
566 			}
567 			if (mounted_softdep)
568 				softdep_unmount(mp);
569 			g_topology_lock();
570 			/*
571 			 * Drop our write and exclusive access.
572 			 */
573 			g_access(ump->um_cp, 0, -1, -1);
574 			g_topology_unlock();
575 			MNT_ILOCK(mp);
576 			mp->mnt_flag |= MNT_RDONLY;
577 			MNT_IUNLOCK(mp);
578 			/*
579 			 * Allow the writers to note that filesystem
580 			 * is ro now.
581 			 */
582 			vfs_write_resume(mp, 0);
583 		}
584 		if ((mp->mnt_flag & MNT_RELOAD) &&
585 		    (error = ffs_reload(mp, 0)) != 0) {
586 			return (error);
587 		} else {
588 			/* ffs_reload replaces the superblock structure */
589 			fs = ump->um_fs;
590 		}
591 		if (fs->fs_ronly &&
592 		    !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
593 			/*
594 			 * If upgrade to read-write by non-root, then verify
595 			 * that user has necessary permissions on the device.
596 			 */
597 			vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY);
598 			error = VOP_ACCESS(odevvp, VREAD | VWRITE,
599 			    td->td_ucred, td);
600 			if (error)
601 				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
602 			VOP_UNLOCK(odevvp);
603 			if (error) {
604 				return (error);
605 			}
606 			fs->fs_flags &= ~FS_UNCLEAN;
607 			if (fs->fs_clean == 0) {
608 				fs->fs_flags |= FS_UNCLEAN;
609 				if ((mp->mnt_flag & MNT_FORCE) ||
610 				    ((fs->fs_flags &
611 				     (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
612 				     (fs->fs_flags & FS_DOSOFTDEP))) {
613 					printf("WARNING: %s was not properly "
614 					   "dismounted\n",
615 					   mp->mnt_stat.f_mntonname);
616 				} else {
617 					vfs_mount_error(mp,
618 					   "R/W mount of %s denied. %s.%s",
619 					   mp->mnt_stat.f_mntonname,
620 					   "Filesystem is not clean - run fsck",
621 					   (fs->fs_flags & FS_SUJ) == 0 ? "" :
622 					   " Forced mount will invalidate"
623 					   " journal contents");
624 					return (EPERM);
625 				}
626 			}
627 			g_topology_lock();
628 			/*
629 			 * Request exclusive write access.
630 			 */
631 			error = g_access(ump->um_cp, 0, 1, 1);
632 			g_topology_unlock();
633 			if (error)
634 				return (error);
635 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
636 				return (error);
637 			error = vfs_write_suspend_umnt(mp);
638 			if (error != 0)
639 				return (error);
640 			fs->fs_ronly = 0;
641 			MNT_ILOCK(mp);
642 			saved_mnt_flag = MNT_RDONLY;
643 			if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag &
644 			    MNT_ASYNC) != 0)
645 				saved_mnt_flag |= MNT_ASYNC;
646 			mp->mnt_flag &= ~saved_mnt_flag;
647 			MNT_IUNLOCK(mp);
648 			fs->fs_mtime = time_second;
649 			/* check to see if we need to start softdep */
650 			if ((fs->fs_flags & FS_DOSOFTDEP) &&
651 			    (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
652 				fs->fs_ronly = 1;
653 				MNT_ILOCK(mp);
654 				mp->mnt_flag |= saved_mnt_flag;
655 				MNT_IUNLOCK(mp);
656 				vfs_write_resume(mp, 0);
657 				return (error);
658 			}
659 			fs->fs_clean = 0;
660 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
661 				fs->fs_ronly = 1;
662 				if ((fs->fs_flags & FS_DOSOFTDEP) != 0)
663 					softdep_unmount(mp);
664 				MNT_ILOCK(mp);
665 				mp->mnt_flag |= saved_mnt_flag;
666 				MNT_IUNLOCK(mp);
667 				vfs_write_resume(mp, 0);
668 				return (error);
669 			}
670 			if (fs->fs_snapinum[0] != 0)
671 				ffs_snapshot_mount(mp);
672 			vfs_write_resume(mp, 0);
673 		}
674 		/*
675 		 * Soft updates is incompatible with "async",
676 		 * so if we are doing softupdates stop the user
677 		 * from setting the async flag in an update.
678 		 * Softdep_mount() clears it in an initial mount
679 		 * or ro->rw remount.
680 		 */
681 		if (MOUNTEDSOFTDEP(mp)) {
682 			/* XXX: Reset too late ? */
683 			MNT_ILOCK(mp);
684 			mp->mnt_flag &= ~MNT_ASYNC;
685 			MNT_IUNLOCK(mp);
686 		}
687 		/*
688 		 * Keep MNT_ACLS flag if it is stored in superblock.
689 		 */
690 		if ((fs->fs_flags & FS_ACLS) != 0) {
691 			/* XXX: Set too late ? */
692 			MNT_ILOCK(mp);
693 			mp->mnt_flag |= MNT_ACLS;
694 			MNT_IUNLOCK(mp);
695 		}
696 
697 		if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
698 			/* XXX: Set too late ? */
699 			MNT_ILOCK(mp);
700 			mp->mnt_flag |= MNT_NFS4ACLS;
701 			MNT_IUNLOCK(mp);
702 		}
703 
704 	}
705 
706 	MNT_ILOCK(mp);
707 	/*
708 	 * This is racy versus lookup, see ufs_fplookup_vexec for details.
709 	 */
710 	if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0)
711 		panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp);
712 	if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0)
713 		mp->mnt_kern_flag |= MNTK_FPLOOKUP;
714 	MNT_IUNLOCK(mp);
715 
716 	vfs_mountedfrom(mp, fspec);
717 	return (0);
718 }
719 
720 /*
721  * Compatibility with old mount system call.
722  */
723 
724 static int
ffs_cmount(struct mntarg * ma,void * data,uint64_t flags)725 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
726 {
727 	struct ufs_args args;
728 	int error;
729 
730 	if (data == NULL)
731 		return (EINVAL);
732 	error = copyin(data, &args, sizeof args);
733 	if (error)
734 		return (error);
735 
736 	ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
737 	ma = mount_arg(ma, "export", &args.export, sizeof(args.export));
738 	error = kernel_mount(ma, flags);
739 
740 	return (error);
741 }
742 
743 /*
744  * Reload all incore data for a filesystem (used after running fsck on
745  * the root filesystem and finding things to fix). If the 'force' flag
746  * is 0, the filesystem must be mounted read-only.
747  *
748  * Things to do to update the mount:
749  *	1) invalidate all cached meta-data.
750  *	2) re-read superblock from disk.
751  *	3) If requested, clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags
752  *	   to allow secondary writers.
753  *	4) invalidate all cached file data.
754  *	5) re-read inode data for all active vnodes.
755  */
756 int
ffs_reload(struct mount * mp,int flags)757 ffs_reload(struct mount *mp, int flags)
758 {
759 	struct vnode *vp, *mvp, *devvp;
760 	struct inode *ip;
761 	struct buf *bp;
762 	struct fs *fs, *newfs;
763 	struct ufsmount *ump;
764 	int error;
765 
766 	ump = VFSTOUFS(mp);
767 
768 	MNT_ILOCK(mp);
769 	if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) {
770 		MNT_IUNLOCK(mp);
771 		return (EINVAL);
772 	}
773 	MNT_IUNLOCK(mp);
774 
775 	/*
776 	 * Step 1: invalidate all cached meta-data.
777 	 */
778 	devvp = VFSTOUFS(mp)->um_devvp;
779 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
780 	if (vinvalbuf(devvp, 0, 0, 0) != 0)
781 		panic("ffs_reload: dirty1");
782 	VOP_UNLOCK(devvp);
783 
784 	/*
785 	 * Step 2: re-read superblock from disk.
786 	 */
787 	if ((error = ffs_sbget(devvp, &newfs, UFS_STDSB, 0, M_UFSMNT,
788 	    ffs_use_bread)) != 0)
789 		return (error);
790 	/*
791 	 * Replace our superblock with the new superblock. Preserve
792 	 * our read-only status.
793 	 */
794 	fs = VFSTOUFS(mp)->um_fs;
795 	newfs->fs_ronly = fs->fs_ronly;
796 	free(fs->fs_csp, M_UFSMNT);
797 	free(fs->fs_si, M_UFSMNT);
798 	free(fs, M_UFSMNT);
799 	fs = VFSTOUFS(mp)->um_fs = newfs;
800 	ump->um_bsize = fs->fs_bsize;
801 	ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
802 	UFS_LOCK(ump);
803 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
804 		printf("WARNING: %s: reload pending error: blocks %jd "
805 		    "files %d\n", mp->mnt_stat.f_mntonname,
806 		    (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
807 		fs->fs_pendingblocks = 0;
808 		fs->fs_pendinginodes = 0;
809 	}
810 	UFS_UNLOCK(ump);
811 	/*
812 	 * Step 3: If requested, clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags
813 	 * to allow secondary writers.
814 	 */
815 	if ((flags & FFSR_UNSUSPEND) != 0) {
816 		MNT_ILOCK(mp);
817 		mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
818 		wakeup(&mp->mnt_flag);
819 		MNT_IUNLOCK(mp);
820 	}
821 
822 loop:
823 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
824 		/*
825 		 * Skip syncer vnode.
826 		 */
827 		if (vp->v_type == VNON) {
828 			VI_UNLOCK(vp);
829 			continue;
830 		}
831 		/*
832 		 * Step 4: invalidate all cached file data.
833 		 */
834 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
835 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
836 			goto loop;
837 		}
838 		if (vinvalbuf(vp, 0, 0, 0))
839 			panic("ffs_reload: dirty2");
840 		/*
841 		 * Step 5: re-read inode data for all active vnodes.
842 		 */
843 		ip = VTOI(vp);
844 		error =
845 		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
846 		    (int)fs->fs_bsize, NOCRED, &bp);
847 		if (error) {
848 			vput(vp);
849 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
850 			return (error);
851 		}
852 		if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) {
853 			brelse(bp);
854 			vput(vp);
855 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
856 			return (error);
857 		}
858 		ip->i_effnlink = ip->i_nlink;
859 		brelse(bp);
860 		vput(vp);
861 	}
862 	return (0);
863 }
864 
865 /*
866  * Common code for mount and mountroot
867  */
868 static int
ffs_mountfs(struct vnode * odevvp,struct mount * mp,struct thread * td)869 ffs_mountfs(struct vnode *odevvp, struct mount *mp, struct thread *td)
870 {
871 	struct ufsmount *ump;
872 	struct fs *fs;
873 	struct cdev *dev;
874 	int error, i, len, ronly;
875 	struct ucred *cred;
876 	struct g_consumer *cp;
877 	struct mount *nmp;
878 	struct vnode *devvp;
879 	int candelete, canspeedup;
880 
881 	fs = NULL;
882 	ump = NULL;
883 	cred = td ? td->td_ucred : NOCRED;
884 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
885 
886 	devvp = mntfs_allocvp(mp, odevvp);
887 	KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
888 	dev = devvp->v_rdev;
889 	KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data"));
890 	if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
891 	    (uintptr_t)mp) == 0) {
892 		mntfs_freevp(devvp);
893 		return (EBUSY);
894 	}
895 	g_topology_lock();
896 	error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
897 	g_topology_unlock();
898 	if (error != 0) {
899 		atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
900 		mntfs_freevp(devvp);
901 		return (error);
902 	}
903 	dev_ref(dev);
904 	devvp->v_bufobj.bo_ops = &ffs_ops;
905 	BO_LOCK(&odevvp->v_bufobj);
906 	odevvp->v_bufobj.bo_flag |= BO_NOBUFS;
907 	BO_UNLOCK(&odevvp->v_bufobj);
908 	VOP_UNLOCK(devvp);
909 	if (dev->si_iosize_max != 0)
910 		mp->mnt_iosize_max = dev->si_iosize_max;
911 	if (mp->mnt_iosize_max > maxphys)
912 		mp->mnt_iosize_max = maxphys;
913 	if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
914 		error = EINVAL;
915 		vfs_mount_error(mp,
916 		    "Invalid sectorsize %d for superblock size %d",
917 		    cp->provider->sectorsize, SBLOCKSIZE);
918 		goto out;
919 	}
920 	/* fetch the superblock and summary information */
921 	if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0)
922 		error = ffs_sbsearch(devvp, &fs, 0, M_UFSMNT, ffs_use_bread);
923 	else
924 		error = ffs_sbget(devvp, &fs, UFS_STDSB, 0, M_UFSMNT,
925 		    ffs_use_bread);
926 	if (error != 0)
927 		goto out;
928 	fs->fs_flags &= ~FS_UNCLEAN;
929 	if (fs->fs_clean == 0) {
930 		fs->fs_flags |= FS_UNCLEAN;
931 		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
932 		    ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
933 		     (fs->fs_flags & FS_DOSOFTDEP))) {
934 			printf("WARNING: %s was not properly dismounted\n",
935 			    mp->mnt_stat.f_mntonname);
936 		} else {
937 			vfs_mount_error(mp, "R/W mount on %s denied. "
938 			    "Filesystem is not clean - run fsck.%s",
939 			    mp->mnt_stat.f_mntonname,
940 			    (fs->fs_flags & FS_SUJ) == 0 ? "" :
941 			    " Forced mount will invalidate journal contents");
942 			error = EPERM;
943 			goto out;
944 		}
945 		if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
946 		    (mp->mnt_flag & MNT_FORCE)) {
947 			printf("WARNING: %s: lost blocks %jd files %d\n",
948 			    mp->mnt_stat.f_mntonname,
949 			    (intmax_t)fs->fs_pendingblocks,
950 			    fs->fs_pendinginodes);
951 			fs->fs_pendingblocks = 0;
952 			fs->fs_pendinginodes = 0;
953 		}
954 	}
955 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
956 		printf("WARNING: %s: mount pending error: blocks %jd "
957 		    "files %d\n", mp->mnt_stat.f_mntonname,
958 		    (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
959 		fs->fs_pendingblocks = 0;
960 		fs->fs_pendinginodes = 0;
961 	}
962 	if ((fs->fs_flags & FS_GJOURNAL) != 0) {
963 #ifdef UFS_GJOURNAL
964 		/*
965 		 * Get journal provider name.
966 		 */
967 		len = 1024;
968 		mp->mnt_gjprovider = malloc((uint64_t)len, M_UFSMNT, M_WAITOK);
969 		if (g_io_getattr("GJOURNAL::provider", cp, &len,
970 		    mp->mnt_gjprovider) == 0) {
971 			mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len,
972 			    M_UFSMNT, M_WAITOK);
973 			MNT_ILOCK(mp);
974 			mp->mnt_flag |= MNT_GJOURNAL;
975 			MNT_IUNLOCK(mp);
976 		} else {
977 			if ((mp->mnt_flag & MNT_RDONLY) == 0)
978 				printf("WARNING: %s: GJOURNAL flag on fs "
979 				    "but no gjournal provider below\n",
980 				    mp->mnt_stat.f_mntonname);
981 			free(mp->mnt_gjprovider, M_UFSMNT);
982 			mp->mnt_gjprovider = NULL;
983 		}
984 #else
985 		printf("WARNING: %s: GJOURNAL flag on fs but no "
986 		    "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
987 #endif
988 	} else {
989 		mp->mnt_gjprovider = NULL;
990 	}
991 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
992 	ump->um_cp = cp;
993 	ump->um_bo = &devvp->v_bufobj;
994 	ump->um_fs = fs;
995 	if (fs->fs_magic == FS_UFS1_MAGIC) {
996 		ump->um_fstype = UFS1;
997 		ump->um_balloc = ffs_balloc_ufs1;
998 	} else {
999 		ump->um_fstype = UFS2;
1000 		ump->um_balloc = ffs_balloc_ufs2;
1001 	}
1002 	ump->um_blkatoff = ffs_blkatoff;
1003 	ump->um_truncate = ffs_truncate;
1004 	ump->um_update = ffs_update;
1005 	ump->um_valloc = ffs_valloc;
1006 	ump->um_vfree = ffs_vfree;
1007 	ump->um_ifree = ffs_ifree;
1008 	ump->um_rdonly = ffs_rdonly;
1009 	ump->um_snapgone = ffs_snapgone;
1010 	if ((mp->mnt_flag & MNT_UNTRUSTED) != 0)
1011 		ump->um_check_blkno = ffs_check_blkno;
1012 	else
1013 		ump->um_check_blkno = NULL;
1014 	mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
1015 	sx_init(&ump->um_checkpath_lock, "uchpth");
1016 	fs->fs_ronly = ronly;
1017 	fs->fs_active = NULL;
1018 	mp->mnt_data = ump;
1019 	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
1020 	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
1021 	nmp = NULL;
1022 	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
1023 	    (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
1024 		if (nmp)
1025 			vfs_rel(nmp);
1026 		vfs_getnewfsid(mp);
1027 	}
1028 	ump->um_bsize = fs->fs_bsize;
1029 	ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1030 	MNT_ILOCK(mp);
1031 	mp->mnt_flag |= MNT_LOCAL;
1032 	MNT_IUNLOCK(mp);
1033 	if ((fs->fs_flags & FS_MULTILABEL) != 0) {
1034 #ifdef MAC
1035 		MNT_ILOCK(mp);
1036 		mp->mnt_flag |= MNT_MULTILABEL;
1037 		MNT_IUNLOCK(mp);
1038 #else
1039 		printf("WARNING: %s: multilabel flag on fs but "
1040 		    "no MAC support\n", mp->mnt_stat.f_mntonname);
1041 #endif
1042 	}
1043 	if ((fs->fs_flags & FS_ACLS) != 0) {
1044 #ifdef UFS_ACL
1045 		MNT_ILOCK(mp);
1046 
1047 		if (mp->mnt_flag & MNT_NFS4ACLS)
1048 			printf("WARNING: %s: ACLs flag on fs conflicts with "
1049 			    "\"nfsv4acls\" mount option; option ignored\n",
1050 			    mp->mnt_stat.f_mntonname);
1051 		mp->mnt_flag &= ~MNT_NFS4ACLS;
1052 		mp->mnt_flag |= MNT_ACLS;
1053 
1054 		MNT_IUNLOCK(mp);
1055 #else
1056 		printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
1057 		    mp->mnt_stat.f_mntonname);
1058 #endif
1059 	}
1060 	if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
1061 #ifdef UFS_ACL
1062 		MNT_ILOCK(mp);
1063 
1064 		if (mp->mnt_flag & MNT_ACLS)
1065 			printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
1066 			    "with \"acls\" mount option; option ignored\n",
1067 			    mp->mnt_stat.f_mntonname);
1068 		mp->mnt_flag &= ~MNT_ACLS;
1069 		mp->mnt_flag |= MNT_NFS4ACLS;
1070 
1071 		MNT_IUNLOCK(mp);
1072 #else
1073 		printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
1074 		    "ACLs support\n", mp->mnt_stat.f_mntonname);
1075 #endif
1076 	}
1077 	if ((fs->fs_flags & FS_TRIM) != 0) {
1078 		len = sizeof(int);
1079 		if (g_io_getattr("GEOM::candelete", cp, &len,
1080 		    &candelete) == 0) {
1081 			if (candelete)
1082 				ump->um_flags |= UM_CANDELETE;
1083 			else
1084 				printf("WARNING: %s: TRIM flag on fs but disk "
1085 				    "does not support TRIM\n",
1086 				    mp->mnt_stat.f_mntonname);
1087 		} else {
1088 			printf("WARNING: %s: TRIM flag on fs but disk does "
1089 			    "not confirm that it supports TRIM\n",
1090 			    mp->mnt_stat.f_mntonname);
1091 		}
1092 		if (((ump->um_flags) & UM_CANDELETE) != 0) {
1093 			ump->um_trim_tq = taskqueue_create("trim", M_WAITOK,
1094 			    taskqueue_thread_enqueue, &ump->um_trim_tq);
1095 			taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS,
1096 			    "%s trim", mp->mnt_stat.f_mntonname);
1097 			ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM,
1098 			    &ump->um_trimlisthashsize);
1099 		}
1100 	}
1101 
1102 	len = sizeof(int);
1103 	if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) {
1104 		if (canspeedup)
1105 			ump->um_flags |= UM_CANSPEEDUP;
1106 	}
1107 
1108 	ump->um_mountp = mp;
1109 	ump->um_dev = dev;
1110 	ump->um_devvp = devvp;
1111 	ump->um_odevvp = odevvp;
1112 	ump->um_nindir = fs->fs_nindir;
1113 	ump->um_bptrtodb = fs->fs_fsbtodb;
1114 	ump->um_seqinc = fs->fs_frag;
1115 	for (i = 0; i < MAXQUOTAS; i++)
1116 		ump->um_quotas[i] = NULLVP;
1117 #ifdef UFS_EXTATTR
1118 	ufs_extattr_uepm_init(&ump->um_extattr);
1119 #endif
1120 	/*
1121 	 * Set FS local "last mounted on" information (NULL pad)
1122 	 */
1123 	bzero(fs->fs_fsmnt, MAXMNTLEN);
1124 	strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1125 	mp->mnt_stat.f_iosize = fs->fs_bsize;
1126 
1127 	if (mp->mnt_flag & MNT_ROOTFS) {
1128 		/*
1129 		 * Root mount; update timestamp in mount structure.
1130 		 * this will be used by the common root mount code
1131 		 * to update the system clock.
1132 		 */
1133 		mp->mnt_time = fs->fs_time;
1134 	}
1135 
1136 	if (ronly == 0) {
1137 		fs->fs_mtime = time_second;
1138 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
1139 		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1140 			ffs_flushfiles(mp, FORCECLOSE, td);
1141 			goto out;
1142 		}
1143 		if (fs->fs_snapinum[0] != 0)
1144 			ffs_snapshot_mount(mp);
1145 		fs->fs_fmod = 1;
1146 		fs->fs_clean = 0;
1147 		(void) ffs_sbupdate(ump, MNT_WAIT, 0);
1148 	}
1149 	/*
1150 	 * Initialize filesystem state information in mount struct.
1151 	 */
1152 	MNT_ILOCK(mp);
1153 	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1154 	    MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE;
1155 	MNT_IUNLOCK(mp);
1156 #ifdef UFS_EXTATTR
1157 #ifdef UFS_EXTATTR_AUTOSTART
1158 	/*
1159 	 *
1160 	 * Auto-starting does the following:
1161 	 *	- check for /.attribute in the fs, and extattr_start if so
1162 	 *	- for each file in .attribute, enable that file with
1163 	 * 	  an attribute of the same name.
1164 	 * Not clear how to report errors -- probably eat them.
1165 	 * This would all happen while the filesystem was busy/not
1166 	 * available, so would effectively be "atomic".
1167 	 */
1168 	(void) ufs_extattr_autostart(mp, td);
1169 #endif /* !UFS_EXTATTR_AUTOSTART */
1170 #endif /* !UFS_EXTATTR */
1171 	return (0);
1172 out:
1173 	if (fs != NULL) {
1174 		free(fs->fs_csp, M_UFSMNT);
1175 		free(fs->fs_si, M_UFSMNT);
1176 		free(fs, M_UFSMNT);
1177 	}
1178 	if (cp != NULL) {
1179 		g_topology_lock();
1180 		g_vfs_close(cp);
1181 		g_topology_unlock();
1182 	}
1183 	if (ump != NULL) {
1184 		mtx_destroy(UFS_MTX(ump));
1185 		sx_destroy(&ump->um_checkpath_lock);
1186 		if (mp->mnt_gjprovider != NULL) {
1187 			free(mp->mnt_gjprovider, M_UFSMNT);
1188 			mp->mnt_gjprovider = NULL;
1189 		}
1190 		MPASS(ump->um_softdep == NULL);
1191 		free(ump, M_UFSMNT);
1192 		mp->mnt_data = NULL;
1193 	}
1194 	BO_LOCK(&odevvp->v_bufobj);
1195 	odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1196 	BO_UNLOCK(&odevvp->v_bufobj);
1197 	atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
1198 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1199 	mntfs_freevp(devvp);
1200 	dev_rel(dev);
1201 	return (error);
1202 }
1203 
1204 /*
1205  * A read function for use by filesystem-layer routines.
1206  */
1207 static int
ffs_use_bread(void * devfd,off_t loc,void ** bufp,int size)1208 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size)
1209 {
1210 	struct buf *bp;
1211 	int error;
1212 
1213 	KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp));
1214 	*bufp = malloc(size, M_UFSMNT, M_WAITOK);
1215 	if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED,
1216 	    &bp)) != 0)
1217 		return (error);
1218 	bcopy(bp->b_data, *bufp, size);
1219 	bp->b_flags |= B_INVAL | B_NOCACHE;
1220 	brelse(bp);
1221 	return (0);
1222 }
1223 
1224 /*
1225  * unmount system call
1226  */
1227 static int
ffs_unmount(struct mount * mp,int mntflags)1228 ffs_unmount(struct mount *mp, int mntflags)
1229 {
1230 	struct thread *td;
1231 	struct ufsmount *ump = VFSTOUFS(mp);
1232 	struct fs *fs;
1233 	int error, flags, susp;
1234 #ifdef UFS_EXTATTR
1235 	int e_restart;
1236 #endif
1237 
1238 	flags = 0;
1239 	td = curthread;
1240 	fs = ump->um_fs;
1241 	if (mntflags & MNT_FORCE)
1242 		flags |= FORCECLOSE;
1243 	susp = fs->fs_ronly == 0;
1244 #ifdef UFS_EXTATTR
1245 	if ((error = ufs_extattr_stop(mp, td))) {
1246 		if (error != EOPNOTSUPP)
1247 			printf("WARNING: unmount %s: ufs_extattr_stop "
1248 			    "returned errno %d\n", mp->mnt_stat.f_mntonname,
1249 			    error);
1250 		e_restart = 0;
1251 	} else {
1252 		ufs_extattr_uepm_destroy(&ump->um_extattr);
1253 		e_restart = 1;
1254 	}
1255 #endif
1256 	if (susp) {
1257 		error = vfs_write_suspend_umnt(mp);
1258 		if (error != 0)
1259 			goto fail1;
1260 	}
1261 	if (MOUNTEDSOFTDEP(mp))
1262 		error = softdep_flushfiles(mp, flags, td);
1263 	else
1264 		error = ffs_flushfiles(mp, flags, td);
1265 	if (error != 0 && !ffs_fsfail_cleanup(ump, error))
1266 		goto fail;
1267 
1268 	UFS_LOCK(ump);
1269 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1270 		printf("WARNING: unmount %s: pending error: blocks %jd "
1271 		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1272 		    fs->fs_pendinginodes);
1273 		fs->fs_pendingblocks = 0;
1274 		fs->fs_pendinginodes = 0;
1275 	}
1276 	UFS_UNLOCK(ump);
1277 	if (MOUNTEDSOFTDEP(mp))
1278 		softdep_unmount(mp);
1279 	MPASS(ump->um_softdep == NULL);
1280 	if (fs->fs_ronly == 0) {
1281 		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1282 		error = ffs_sbupdate(ump, MNT_WAIT, 0);
1283 		if (ffs_fsfail_cleanup(ump, error))
1284 			error = 0;
1285 		if (error != 0 && !ffs_fsfail_cleanup(ump, error)) {
1286 			fs->fs_clean = 0;
1287 			goto fail;
1288 		}
1289 	}
1290 	if (susp)
1291 		vfs_write_resume(mp, VR_START_WRITE);
1292 	if (ump->um_trim_tq != NULL) {
1293 		MPASS(ump->um_trim_inflight == 0);
1294 		taskqueue_free(ump->um_trim_tq);
1295 		free (ump->um_trimhash, M_TRIM);
1296 	}
1297 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1298 	g_topology_lock();
1299 	g_vfs_close(ump->um_cp);
1300 	g_topology_unlock();
1301 	BO_LOCK(&ump->um_odevvp->v_bufobj);
1302 	ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1303 	BO_UNLOCK(&ump->um_odevvp->v_bufobj);
1304 	atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
1305 	mntfs_freevp(ump->um_devvp);
1306 	vrele(ump->um_odevvp);
1307 	dev_rel(ump->um_dev);
1308 	mtx_destroy(UFS_MTX(ump));
1309 	sx_destroy(&ump->um_checkpath_lock);
1310 	if (mp->mnt_gjprovider != NULL) {
1311 		free(mp->mnt_gjprovider, M_UFSMNT);
1312 		mp->mnt_gjprovider = NULL;
1313 	}
1314 	free(fs->fs_csp, M_UFSMNT);
1315 	free(fs->fs_si, M_UFSMNT);
1316 	free(fs, M_UFSMNT);
1317 	free(ump, M_UFSMNT);
1318 	mp->mnt_data = NULL;
1319 	if (td->td_su == mp) {
1320 		td->td_su = NULL;
1321 		vfs_rel(mp);
1322 	}
1323 	return (error);
1324 
1325 fail:
1326 	if (susp)
1327 		vfs_write_resume(mp, VR_START_WRITE);
1328 fail1:
1329 #ifdef UFS_EXTATTR
1330 	if (e_restart) {
1331 		ufs_extattr_uepm_init(&ump->um_extattr);
1332 #ifdef UFS_EXTATTR_AUTOSTART
1333 		(void) ufs_extattr_autostart(mp, td);
1334 #endif
1335 	}
1336 #endif
1337 
1338 	return (error);
1339 }
1340 
1341 /*
1342  * Flush out all the files in a filesystem.
1343  */
1344 int
ffs_flushfiles(struct mount * mp,int flags,struct thread * td)1345 ffs_flushfiles(struct mount *mp, int flags, struct thread *td)
1346 {
1347 	struct ufsmount *ump;
1348 	int qerror, error;
1349 
1350 	ump = VFSTOUFS(mp);
1351 	qerror = 0;
1352 #ifdef QUOTA
1353 	if (mp->mnt_flag & MNT_QUOTA) {
1354 		int i;
1355 		error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1356 		if (error)
1357 			return (error);
1358 		for (i = 0; i < MAXQUOTAS; i++) {
1359 			error = quotaoff(td, mp, i);
1360 			if (error != 0) {
1361 				if ((flags & EARLYFLUSH) == 0)
1362 					return (error);
1363 				else
1364 					qerror = error;
1365 			}
1366 		}
1367 
1368 		/*
1369 		 * Here we fall through to vflush again to ensure that
1370 		 * we have gotten rid of all the system vnodes, unless
1371 		 * quotas must not be closed.
1372 		 */
1373 	}
1374 #endif
1375 	/* devvp is not locked there */
1376 	if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1377 		if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1378 			return (error);
1379 		ffs_snapshot_unmount(mp);
1380 		flags |= FORCECLOSE;
1381 		/*
1382 		 * Here we fall through to vflush again to ensure
1383 		 * that we have gotten rid of all the system vnodes.
1384 		 */
1385 	}
1386 
1387 	/*
1388 	 * Do not close system files if quotas were not closed, to be
1389 	 * able to sync the remaining dquots.  The freeblks softupdate
1390 	 * workitems might hold a reference on a dquot, preventing
1391 	 * quotaoff() from completing.  Next round of
1392 	 * softdep_flushworklist() iteration should process the
1393 	 * blockers, allowing the next run of quotaoff() to finally
1394 	 * flush held dquots.
1395 	 *
1396 	 * Otherwise, flush all the files.
1397 	 */
1398 	if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1399 		return (error);
1400 
1401 	/*
1402 	 * If this is a forcible unmount and there were any files that
1403 	 * were unlinked but still open, then vflush() will have
1404 	 * truncated and freed those files, which might have started
1405 	 * some trim work.  Wait here for any trims to complete
1406 	 * and process the blkfrees which follow the trims.
1407 	 * This may create more dirty devvp buffers and softdep deps.
1408 	 */
1409 	if (ump->um_trim_tq != NULL) {
1410 		while (ump->um_trim_inflight != 0)
1411 			pause("ufsutr", hz);
1412 		taskqueue_drain_all(ump->um_trim_tq);
1413 	}
1414 
1415 	/*
1416 	 * Flush filesystem metadata.
1417 	 */
1418 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1419 	error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1420 	VOP_UNLOCK(ump->um_devvp);
1421 	return (error);
1422 }
1423 
1424 /*
1425  * Get filesystem statistics.
1426  */
1427 static int
ffs_statfs(struct mount * mp,struct statfs * sbp)1428 ffs_statfs(struct mount *mp, struct statfs *sbp)
1429 {
1430 	struct ufsmount *ump;
1431 	struct fs *fs;
1432 
1433 	ump = VFSTOUFS(mp);
1434 	fs = ump->um_fs;
1435 	if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1436 		panic("ffs_statfs");
1437 	sbp->f_version = STATFS_VERSION;
1438 	sbp->f_bsize = fs->fs_fsize;
1439 	sbp->f_iosize = fs->fs_bsize;
1440 	sbp->f_blocks = fs->fs_dsize;
1441 	UFS_LOCK(ump);
1442 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1443 	    fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1444 	sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1445 	    dbtofsb(fs, fs->fs_pendingblocks);
1446 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1447 	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1448 	UFS_UNLOCK(ump);
1449 	sbp->f_namemax = UFS_MAXNAMLEN;
1450 	return (0);
1451 }
1452 
1453 static bool
sync_doupdate(struct inode * ip)1454 sync_doupdate(struct inode *ip)
1455 {
1456 
1457 	return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1458 	    IN_UPDATE)) != 0);
1459 }
1460 
1461 static int
ffs_sync_lazy_filter(struct vnode * vp,void * arg __unused)1462 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused)
1463 {
1464 	struct inode *ip;
1465 
1466 	/*
1467 	 * Flags are safe to access because ->v_data invalidation
1468 	 * is held off by listmtx.
1469 	 */
1470 	if (vp->v_type == VNON)
1471 		return (false);
1472 	ip = VTOI(vp);
1473 	if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0)
1474 		return (false);
1475 	return (true);
1476 }
1477 
1478 /*
1479  * For a lazy sync, we only care about access times, quotas and the
1480  * superblock.  Other filesystem changes are already converted to
1481  * cylinder group blocks or inode blocks updates and are written to
1482  * disk by syncer.
1483  */
1484 static int
ffs_sync_lazy(struct mount * mp)1485 ffs_sync_lazy(struct mount *mp)
1486 {
1487 	struct vnode *mvp, *vp;
1488 	struct inode *ip;
1489 	int allerror, error;
1490 
1491 	allerror = 0;
1492 	if ((mp->mnt_flag & MNT_NOATIME) != 0) {
1493 #ifdef QUOTA
1494 		qsync(mp);
1495 #endif
1496 		goto sbupdate;
1497 	}
1498 	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) {
1499 		if (vp->v_type == VNON) {
1500 			VI_UNLOCK(vp);
1501 			continue;
1502 		}
1503 		ip = VTOI(vp);
1504 
1505 		/*
1506 		 * The IN_ACCESS flag is converted to IN_MODIFIED by
1507 		 * ufs_close() and ufs_getattr() by the calls to
1508 		 * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1509 		 * Test also all the other timestamp flags too, to pick up
1510 		 * any other cases that could be missed.
1511 		 */
1512 		if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) {
1513 			VI_UNLOCK(vp);
1514 			continue;
1515 		}
1516 		if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0)
1517 			continue;
1518 #ifdef QUOTA
1519 		qsyncvp(vp);
1520 #endif
1521 		if (sync_doupdate(ip))
1522 			error = ffs_update(vp, 0);
1523 		if (error != 0)
1524 			allerror = error;
1525 		vput(vp);
1526 	}
1527 sbupdate:
1528 	if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1529 	    (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1530 		allerror = error;
1531 	return (allerror);
1532 }
1533 
1534 /*
1535  * Go through the disk queues to initiate sandbagged IO;
1536  * go through the inodes to write those that have been modified;
1537  * initiate the writing of the super block if it has been modified.
1538  *
1539  * Note: we are always called with the filesystem marked busy using
1540  * vfs_busy().
1541  */
1542 static int
ffs_sync(struct mount * mp,int waitfor)1543 ffs_sync(struct mount *mp, int waitfor)
1544 {
1545 	struct vnode *mvp, *vp, *devvp;
1546 	struct thread *td;
1547 	struct inode *ip;
1548 	struct ufsmount *ump = VFSTOUFS(mp);
1549 	struct fs *fs;
1550 	int error, count, lockreq, allerror = 0;
1551 	int suspend;
1552 	int suspended;
1553 	int secondary_writes;
1554 	int secondary_accwrites;
1555 	int softdep_deps;
1556 	int softdep_accdeps;
1557 	struct bufobj *bo;
1558 
1559 	suspend = 0;
1560 	suspended = 0;
1561 	td = curthread;
1562 	fs = ump->um_fs;
1563 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0)
1564 		panic("%s: ffs_sync: modification on read-only filesystem",
1565 		    fs->fs_fsmnt);
1566 	if (waitfor == MNT_LAZY) {
1567 		if (!rebooting)
1568 			return (ffs_sync_lazy(mp));
1569 		waitfor = MNT_NOWAIT;
1570 	}
1571 
1572 	/*
1573 	 * Write back each (modified) inode.
1574 	 */
1575 	lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1576 	if (waitfor == MNT_SUSPEND) {
1577 		suspend = 1;
1578 		waitfor = MNT_WAIT;
1579 	}
1580 	if (waitfor == MNT_WAIT)
1581 		lockreq = LK_EXCLUSIVE;
1582 	lockreq |= LK_INTERLOCK;
1583 loop:
1584 	/* Grab snapshot of secondary write counts */
1585 	MNT_ILOCK(mp);
1586 	secondary_writes = mp->mnt_secondary_writes;
1587 	secondary_accwrites = mp->mnt_secondary_accwrites;
1588 	MNT_IUNLOCK(mp);
1589 
1590 	/* Grab snapshot of softdep dependency counts */
1591 	softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1592 
1593 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1594 		/*
1595 		 * Depend on the vnode interlock to keep things stable enough
1596 		 * for a quick test.  Since there might be hundreds of
1597 		 * thousands of vnodes, we cannot afford even a subroutine
1598 		 * call unless there's a good chance that we have work to do.
1599 		 */
1600 		if (vp->v_type == VNON) {
1601 			VI_UNLOCK(vp);
1602 			continue;
1603 		}
1604 		ip = VTOI(vp);
1605 		if ((ip->i_flag &
1606 		    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1607 		    vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1608 			VI_UNLOCK(vp);
1609 			continue;
1610 		}
1611 		if ((error = vget(vp, lockreq)) != 0) {
1612 			if (error == ENOENT) {
1613 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1614 				goto loop;
1615 			}
1616 			continue;
1617 		}
1618 #ifdef QUOTA
1619 		qsyncvp(vp);
1620 #endif
1621 		for (;;) {
1622 			error = ffs_syncvnode(vp, waitfor, 0);
1623 			if (error == ERELOOKUP)
1624 				continue;
1625 			if (error != 0)
1626 				allerror = error;
1627 			break;
1628 		}
1629 		vput(vp);
1630 	}
1631 	/*
1632 	 * Force stale filesystem control information to be flushed.
1633 	 */
1634 	if (waitfor == MNT_WAIT || rebooting) {
1635 		if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1636 			allerror = error;
1637 		if (ffs_fsfail_cleanup(ump, allerror))
1638 			allerror = 0;
1639 		/* Flushed work items may create new vnodes to clean */
1640 		if (allerror == 0 && count)
1641 			goto loop;
1642 	}
1643 
1644 	devvp = ump->um_devvp;
1645 	bo = &devvp->v_bufobj;
1646 	BO_LOCK(bo);
1647 	if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1648 		BO_UNLOCK(bo);
1649 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1650 		error = VOP_FSYNC(devvp, waitfor, td);
1651 		VOP_UNLOCK(devvp);
1652 		if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
1653 			error = ffs_sbupdate(ump, waitfor, 0);
1654 		if (error != 0)
1655 			allerror = error;
1656 		if (ffs_fsfail_cleanup(ump, allerror))
1657 			allerror = 0;
1658 		if (allerror == 0 && waitfor == MNT_WAIT)
1659 			goto loop;
1660 	} else if (suspend != 0) {
1661 		if (softdep_check_suspend(mp,
1662 					  devvp,
1663 					  softdep_deps,
1664 					  softdep_accdeps,
1665 					  secondary_writes,
1666 					  secondary_accwrites) != 0) {
1667 			MNT_IUNLOCK(mp);
1668 			goto loop;	/* More work needed */
1669 		}
1670 		mtx_assert(MNT_MTX(mp), MA_OWNED);
1671 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1672 		MNT_IUNLOCK(mp);
1673 		suspended = 1;
1674 	} else
1675 		BO_UNLOCK(bo);
1676 	/*
1677 	 * Write back modified superblock.
1678 	 */
1679 	if (fs->fs_fmod != 0 &&
1680 	    (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1681 		allerror = error;
1682 	if (ffs_fsfail_cleanup(ump, allerror))
1683 		allerror = 0;
1684 	return (allerror);
1685 }
1686 
1687 int
ffs_vget(struct mount * mp,ino_t ino,int flags,struct vnode ** vpp)1688 ffs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
1689 {
1690 	return (ffs_vgetf(mp, ino, flags, vpp, 0));
1691 }
1692 
1693 int
ffs_vgetf(struct mount * mp,ino_t ino,int flags,struct vnode ** vpp,int ffs_flags)1694 ffs_vgetf(struct mount *mp,
1695 	ino_t ino,
1696 	int flags,
1697 	struct vnode **vpp,
1698 	int ffs_flags)
1699 {
1700 	struct fs *fs;
1701 	struct inode *ip;
1702 	struct ufsmount *ump;
1703 	struct buf *bp;
1704 	struct vnode *vp;
1705 	daddr_t dbn;
1706 	int error;
1707 
1708 	MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 ||
1709 	    (flags & LK_EXCLUSIVE) != 0);
1710 
1711 	error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1712 	if (error != 0)
1713 		return (error);
1714 	if (*vpp != NULL) {
1715 		if ((ffs_flags & FFSV_REPLACE) == 0 ||
1716 		    ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 ||
1717 		    !VN_IS_DOOMED(*vpp)))
1718 			return (0);
1719 		vgone(*vpp);
1720 		vput(*vpp);
1721 	}
1722 
1723 	/*
1724 	 * We must promote to an exclusive lock for vnode creation.  This
1725 	 * can happen if lookup is passed LOCKSHARED.
1726 	 */
1727 	if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1728 		flags &= ~LK_TYPE_MASK;
1729 		flags |= LK_EXCLUSIVE;
1730 	}
1731 
1732 	/*
1733 	 * We do not lock vnode creation as it is believed to be too
1734 	 * expensive for such rare case as simultaneous creation of vnode
1735 	 * for same ino by different processes. We just allow them to race
1736 	 * and check later to decide who wins. Let the race begin!
1737 	 */
1738 
1739 	ump = VFSTOUFS(mp);
1740 	fs = ump->um_fs;
1741 	ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO);
1742 
1743 	/* Allocate a new vnode/inode. */
1744 	error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ?
1745 	    &ffs_vnodeops1 : &ffs_vnodeops2, &vp);
1746 	if (error) {
1747 		*vpp = NULL;
1748 		uma_zfree_smr(uma_inode, ip);
1749 		return (error);
1750 	}
1751 	/*
1752 	 * FFS supports recursive locking.
1753 	 */
1754 	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1755 	VN_LOCK_AREC(vp);
1756 	vp->v_data = ip;
1757 	vp->v_bufobj.bo_bsize = fs->fs_bsize;
1758 	ip->i_vnode = vp;
1759 	ip->i_ump = ump;
1760 	ip->i_number = ino;
1761 	ip->i_ea_refs = 0;
1762 	ip->i_nextclustercg = -1;
1763 	ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2;
1764 	ip->i_mode = 0; /* ensure error cases below throw away vnode */
1765 	cluster_init_vn(&ip->i_clusterw);
1766 #ifdef DIAGNOSTIC
1767 	ufs_init_trackers(ip);
1768 #endif
1769 #ifdef QUOTA
1770 	{
1771 		int i;
1772 		for (i = 0; i < MAXQUOTAS; i++)
1773 			ip->i_dquot[i] = NODQUOT;
1774 	}
1775 #endif
1776 
1777 	if (ffs_flags & FFSV_FORCEINSMQ)
1778 		vp->v_vflag |= VV_FORCEINSMQ;
1779 	error = insmntque(vp, mp);
1780 	if (error != 0) {
1781 		uma_zfree_smr(uma_inode, ip);
1782 		*vpp = NULL;
1783 		return (error);
1784 	}
1785 	vp->v_vflag &= ~VV_FORCEINSMQ;
1786 	error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1787 	if (error != 0)
1788 		return (error);
1789 	if (*vpp != NULL) {
1790 		/*
1791 		 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set)
1792 		 * operate on empty inode, which must not be found by
1793 		 * other threads until fully filled.  Vnode for empty
1794 		 * inode must be not re-inserted on the hash by other
1795 		 * thread, after removal by us at the beginning.
1796 		 */
1797 		MPASS((ffs_flags & FFSV_REPLACE) == 0);
1798 		return (0);
1799 	}
1800 	if (I_IS_UFS1(ip))
1801 		ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1802 	else
1803 		ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1804 
1805 	if ((ffs_flags & FFSV_NEWINODE) != 0) {
1806 		/* New inode, just zero out its contents. */
1807 		if (I_IS_UFS1(ip))
1808 			memset(ip->i_din1, 0, sizeof(struct ufs1_dinode));
1809 		else
1810 			memset(ip->i_din2, 0, sizeof(struct ufs2_dinode));
1811 	} else {
1812 		/* Read the disk contents for the inode, copy into the inode. */
1813 		dbn = fsbtodb(fs, ino_to_fsba(fs, ino));
1814 		error = ffs_breadz(ump, ump->um_devvp, dbn, dbn,
1815 		    (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp);
1816 		if (error != 0) {
1817 			/*
1818 			 * The inode does not contain anything useful, so it
1819 			 * would be misleading to leave it on its hash chain.
1820 			 * With mode still zero, it will be unlinked and
1821 			 * returned to the free list by vput().
1822 			 */
1823 			vgone(vp);
1824 			vput(vp);
1825 			*vpp = NULL;
1826 			return (error);
1827 		}
1828 		if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) {
1829 			bqrelse(bp);
1830 			vgone(vp);
1831 			vput(vp);
1832 			*vpp = NULL;
1833 			return (error);
1834 		}
1835 		bqrelse(bp);
1836 	}
1837 	if (DOINGSOFTDEP(vp) && (!fs->fs_ronly ||
1838 	    (ffs_flags & FFSV_FORCEINODEDEP) != 0))
1839 		softdep_load_inodeblock(ip);
1840 	else
1841 		ip->i_effnlink = ip->i_nlink;
1842 
1843 	/*
1844 	 * Initialize the vnode from the inode, check for aliases.
1845 	 * Note that the underlying vnode may have changed.
1846 	 */
1847 	error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2,
1848 	    &vp);
1849 	if (error) {
1850 		vgone(vp);
1851 		vput(vp);
1852 		*vpp = NULL;
1853 		return (error);
1854 	}
1855 
1856 	/*
1857 	 * Finish inode initialization.
1858 	 */
1859 	if (vp->v_type != VFIFO) {
1860 		/* FFS supports shared locking for all files except fifos. */
1861 		VN_LOCK_ASHARE(vp);
1862 	}
1863 
1864 	/*
1865 	 * Set up a generation number for this inode if it does not
1866 	 * already have one. This should only happen on old filesystems.
1867 	 */
1868 	if (ip->i_gen == 0) {
1869 		while (ip->i_gen == 0)
1870 			ip->i_gen = arc4random();
1871 		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
1872 			UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
1873 			DIP_SET(ip, i_gen, ip->i_gen);
1874 		}
1875 	}
1876 #ifdef MAC
1877 	if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
1878 		/*
1879 		 * If this vnode is already allocated, and we're running
1880 		 * multi-label, attempt to perform a label association
1881 		 * from the extended attributes on the inode.
1882 		 */
1883 		error = mac_vnode_associate_extattr(mp, vp);
1884 		if (error) {
1885 			/* ufs_inactive will release ip->i_devvp ref. */
1886 			vgone(vp);
1887 			vput(vp);
1888 			*vpp = NULL;
1889 			return (error);
1890 		}
1891 	}
1892 #endif
1893 
1894 	vn_set_state(vp, VSTATE_CONSTRUCTED);
1895 	*vpp = vp;
1896 	return (0);
1897 }
1898 
1899 /*
1900  * File handle to vnode
1901  *
1902  * Have to be really careful about stale file handles:
1903  * - check that the inode number is valid
1904  * - for UFS2 check that the inode number is initialized
1905  * - call ffs_vget() to get the locked inode
1906  * - check for an unallocated inode (i_mode == 0)
1907  * - check that the given client host has export rights and return
1908  *   those rights via. exflagsp and credanonp
1909  */
1910 static int
ffs_fhtovp(struct mount * mp,struct fid * fhp,int flags,struct vnode ** vpp)1911 ffs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
1912 {
1913 	struct ufid *ufhp;
1914 
1915 	ufhp = (struct ufid *)fhp;
1916 	return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags,
1917 	    vpp, 0));
1918 }
1919 
1920 /*
1921  * Return a vnode from a mounted filesystem for inode with specified
1922  * generation number. Return ESTALE if the inode with given generation
1923  * number no longer exists on that filesystem.
1924  */
1925 int
ffs_inotovp(struct mount * mp,ino_t ino,uint64_t gen,int lflags,struct vnode ** vpp,int ffs_flags)1926 ffs_inotovp(struct mount *mp,
1927 	ino_t ino,
1928 	uint64_t gen,
1929 	int lflags,
1930 	struct vnode **vpp,
1931 	int ffs_flags)
1932 {
1933 	struct ufsmount *ump;
1934 	struct vnode *nvp;
1935 	struct inode *ip;
1936 	struct fs *fs;
1937 	struct cg *cgp;
1938 	struct buf *bp;
1939 	uint64_t cg;
1940 
1941 	ump = VFSTOUFS(mp);
1942 	fs = ump->um_fs;
1943 	*vpp = NULL;
1944 
1945 	if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg)
1946 		return (ESTALE);
1947 
1948 	/*
1949 	 * Need to check if inode is initialized because UFS2 does lazy
1950 	 * initialization and nfs_fhtovp can offer arbitrary inode numbers.
1951 	 */
1952 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1953 		cg = ino_to_cg(fs, ino);
1954 		if (ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp) != 0)
1955 			return (ESTALE);
1956 		if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) {
1957 			brelse(bp);
1958 			return (ESTALE);
1959 		}
1960 		brelse(bp);
1961 	}
1962 
1963 	if (ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags) != 0)
1964 		return (ESTALE);
1965 
1966 	ip = VTOI(nvp);
1967 	if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) {
1968 		if (ip->i_mode == 0)
1969 			vgone(nvp);
1970 		vput(nvp);
1971 		return (ESTALE);
1972 	}
1973 
1974 	vnode_create_vobject(nvp, DIP(ip, i_size), curthread);
1975 	*vpp = nvp;
1976 	return (0);
1977 }
1978 
1979 /*
1980  * Initialize the filesystem.
1981  */
1982 static int
ffs_init(struct vfsconf * vfsp)1983 ffs_init(struct vfsconf *vfsp)
1984 {
1985 
1986 	ffs_susp_initialize();
1987 	softdep_initialize();
1988 	return (ufs_init(vfsp));
1989 }
1990 
1991 /*
1992  * Undo the work of ffs_init().
1993  */
1994 static int
ffs_uninit(struct vfsconf * vfsp)1995 ffs_uninit(struct vfsconf *vfsp)
1996 {
1997 	int ret;
1998 
1999 	ret = ufs_uninit(vfsp);
2000 	softdep_uninitialize();
2001 	ffs_susp_uninitialize();
2002 	taskqueue_drain_all(taskqueue_thread);
2003 	return (ret);
2004 }
2005 
2006 /*
2007  * Structure used to pass information from ffs_sbupdate to its
2008  * helper routine ffs_use_bwrite.
2009  */
2010 struct devfd {
2011 	struct ufsmount	*ump;
2012 	struct buf	*sbbp;
2013 	int		 waitfor;
2014 	int		 suspended;
2015 	int		 error;
2016 };
2017 
2018 /*
2019  * Write a superblock and associated information back to disk.
2020  */
2021 int
ffs_sbupdate(struct ufsmount * ump,int waitfor,int suspended)2022 ffs_sbupdate(struct ufsmount *ump, int waitfor, int suspended)
2023 {
2024 	struct fs *fs;
2025 	struct buf *sbbp;
2026 	struct devfd devfd;
2027 
2028 	fs = ump->um_fs;
2029 	if (fs->fs_ronly == 1 &&
2030 	    (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
2031 	    (MNT_RDONLY | MNT_UPDATE))
2032 		panic("ffs_sbupdate: write read-only filesystem");
2033 	/*
2034 	 * We use the superblock's buf to serialize calls to ffs_sbupdate().
2035 	 */
2036 	sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
2037 	    (int)fs->fs_sbsize, 0, 0, 0);
2038 	/*
2039 	 * Initialize info needed for write function.
2040 	 */
2041 	devfd.ump = ump;
2042 	devfd.sbbp = sbbp;
2043 	devfd.waitfor = waitfor;
2044 	devfd.suspended = suspended;
2045 	devfd.error = 0;
2046 	return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite));
2047 }
2048 
2049 /*
2050  * Write function for use by filesystem-layer routines.
2051  */
2052 static int
ffs_use_bwrite(void * devfd,off_t loc,void * buf,int size)2053 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size)
2054 {
2055 	struct devfd *devfdp;
2056 	struct ufsmount *ump;
2057 	struct buf *bp;
2058 	struct fs *fs;
2059 	int error;
2060 
2061 	devfdp = devfd;
2062 	ump = devfdp->ump;
2063 	fs = ump->um_fs;
2064 	/*
2065 	 * Writing the superblock summary information.
2066 	 */
2067 	if (loc != fs->fs_sblockloc) {
2068 		bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0);
2069 		bcopy(buf, bp->b_data, (uint64_t)size);
2070 		if (devfdp->suspended)
2071 			bp->b_flags |= B_VALIDSUSPWRT;
2072 		if (devfdp->waitfor != MNT_WAIT)
2073 			bawrite(bp);
2074 		else if ((error = bwrite(bp)) != 0)
2075 			devfdp->error = error;
2076 		return (0);
2077 	}
2078 	/*
2079 	 * Writing the superblock itself. We need to do special checks for it.
2080 	 */
2081 	bp = devfdp->sbbp;
2082 	if (ffs_fsfail_cleanup(ump, devfdp->error))
2083 		devfdp->error = 0;
2084 	if (devfdp->error != 0) {
2085 		brelse(bp);
2086 		return (devfdp->error);
2087 	}
2088 	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
2089 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2090 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2091 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
2092 		fs->fs_sblockloc = SBLOCK_UFS1;
2093 	}
2094 	if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
2095 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2096 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2097 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
2098 		fs->fs_sblockloc = SBLOCK_UFS2;
2099 	}
2100 	if (MOUNTEDSOFTDEP(ump->um_mountp))
2101 		softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
2102 	UFS_LOCK(ump);
2103 	bcopy((caddr_t)fs, bp->b_data, (uint64_t)fs->fs_sbsize);
2104 	UFS_UNLOCK(ump);
2105 	fs = (struct fs *)bp->b_data;
2106 	fs->fs_fmod = 0;
2107 	ffs_oldfscompat_write(fs);
2108 	fs->fs_si = NULL;
2109 	/* Recalculate the superblock hash */
2110 	fs->fs_ckhash = ffs_calc_sbhash(fs);
2111 	if (devfdp->suspended)
2112 		bp->b_flags |= B_VALIDSUSPWRT;
2113 	if (devfdp->waitfor != MNT_WAIT)
2114 		bawrite(bp);
2115 	else if ((error = bwrite(bp)) != 0)
2116 		devfdp->error = error;
2117 	return (devfdp->error);
2118 }
2119 
2120 static int
ffs_extattrctl(struct mount * mp,int cmd,struct vnode * filename_vp,int attrnamespace,const char * attrname)2121 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
2122 	int attrnamespace, const char *attrname)
2123 {
2124 
2125 #ifdef UFS_EXTATTR
2126 	return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
2127 	    attrname));
2128 #else
2129 	return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
2130 	    attrname));
2131 #endif
2132 }
2133 
2134 static void
ffs_ifree(struct ufsmount * ump,struct inode * ip)2135 ffs_ifree(struct ufsmount *ump, struct inode *ip)
2136 {
2137 
2138 	if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
2139 		uma_zfree(uma_ufs1, ip->i_din1);
2140 	else if (ip->i_din2 != NULL)
2141 		uma_zfree(uma_ufs2, ip->i_din2);
2142 	uma_zfree_smr(uma_inode, ip);
2143 }
2144 
2145 static int dobkgrdwrite = 1;
2146 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
2147     "Do background writes (honoring the BV_BKGRDWRITE flag)?");
2148 
2149 /*
2150  * Complete a background write started from bwrite.
2151  */
2152 static void
ffs_backgroundwritedone(struct buf * bp)2153 ffs_backgroundwritedone(struct buf *bp)
2154 {
2155 	struct bufobj *bufobj;
2156 	struct buf *origbp;
2157 
2158 #ifdef SOFTUPDATES
2159 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0)
2160 		softdep_handle_error(bp);
2161 #endif
2162 
2163 	/*
2164 	 * Find the original buffer that we are writing.
2165 	 */
2166 	bufobj = bp->b_bufobj;
2167 	BO_LOCK(bufobj);
2168 	if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
2169 		panic("backgroundwritedone: lost buffer");
2170 
2171 	/*
2172 	 * We should mark the cylinder group buffer origbp as
2173 	 * dirty, to not lose the failed write.
2174 	 */
2175 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2176 		origbp->b_vflags |= BV_BKGRDERR;
2177 	BO_UNLOCK(bufobj);
2178 	/*
2179 	 * Process dependencies then return any unfinished ones.
2180 	 */
2181 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0)
2182 		buf_complete(bp);
2183 #ifdef SOFTUPDATES
2184 	if (!LIST_EMPTY(&bp->b_dep))
2185 		softdep_move_dependencies(bp, origbp);
2186 #endif
2187 	/*
2188 	 * This buffer is marked B_NOCACHE so when it is released
2189 	 * by biodone it will be tossed.  Clear B_IOSTARTED in case of error.
2190 	 */
2191 	bp->b_flags |= B_NOCACHE;
2192 	bp->b_flags &= ~(B_CACHE | B_IOSTARTED);
2193 	pbrelvp(bp);
2194 
2195 	/*
2196 	 * Prevent brelse() from trying to keep and re-dirtying bp on
2197 	 * errors. It causes b_bufobj dereference in
2198 	 * bdirty()/reassignbuf(), and b_bufobj was cleared in
2199 	 * pbrelvp() above.
2200 	 */
2201 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2202 		bp->b_flags |= B_INVAL;
2203 	bufdone(bp);
2204 	BO_LOCK(bufobj);
2205 	/*
2206 	 * Clear the BV_BKGRDINPROG flag in the original buffer
2207 	 * and awaken it if it is waiting for the write to complete.
2208 	 * If BV_BKGRDINPROG is not set in the original buffer it must
2209 	 * have been released and re-instantiated - which is not legal.
2210 	 */
2211 	KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
2212 	    ("backgroundwritedone: lost buffer2"));
2213 	origbp->b_vflags &= ~BV_BKGRDINPROG;
2214 	if (origbp->b_vflags & BV_BKGRDWAIT) {
2215 		origbp->b_vflags &= ~BV_BKGRDWAIT;
2216 		wakeup(&origbp->b_xflags);
2217 	}
2218 	BO_UNLOCK(bufobj);
2219 }
2220 
2221 /*
2222  * Write, release buffer on completion.  (Done by iodone
2223  * if async).  Do not bother writing anything if the buffer
2224  * is invalid.
2225  *
2226  * Note that we set B_CACHE here, indicating that buffer is
2227  * fully valid and thus cacheable.  This is true even of NFS
2228  * now so we set it generally.  This could be set either here
2229  * or in biodone() since the I/O is synchronous.  We put it
2230  * here.
2231  */
2232 static int
ffs_bufwrite(struct buf * bp)2233 ffs_bufwrite(struct buf *bp)
2234 {
2235 	struct buf *newbp;
2236 	struct cg *cgp;
2237 
2238 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2239 	if (bp->b_flags & B_INVAL) {
2240 		brelse(bp);
2241 		return (0);
2242 	}
2243 
2244 	if (!BUF_ISLOCKED(bp))
2245 		panic("bufwrite: buffer is not busy???");
2246 	/*
2247 	 * If a background write is already in progress, delay
2248 	 * writing this block if it is asynchronous. Otherwise
2249 	 * wait for the background write to complete.
2250 	 */
2251 	BO_LOCK(bp->b_bufobj);
2252 	if (bp->b_vflags & BV_BKGRDINPROG) {
2253 		if (bp->b_flags & B_ASYNC) {
2254 			BO_UNLOCK(bp->b_bufobj);
2255 			bdwrite(bp);
2256 			return (0);
2257 		}
2258 		bp->b_vflags |= BV_BKGRDWAIT;
2259 		msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2260 		    "bwrbg", 0);
2261 		if (bp->b_vflags & BV_BKGRDINPROG)
2262 			panic("bufwrite: still writing");
2263 	}
2264 	bp->b_vflags &= ~BV_BKGRDERR;
2265 	BO_UNLOCK(bp->b_bufobj);
2266 
2267 	/*
2268 	 * If this buffer is marked for background writing and we
2269 	 * do not have to wait for it, make a copy and write the
2270 	 * copy so as to leave this buffer ready for further use.
2271 	 *
2272 	 * This optimization eats a lot of memory.  If we have a page
2273 	 * or buffer shortfall we can't do it.
2274 	 */
2275 	if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2276 	    (bp->b_flags & B_ASYNC) &&
2277 	    !vm_page_count_severe() &&
2278 	    !buf_dirty_count_severe()) {
2279 		KASSERT(bp->b_iodone == NULL,
2280 		    ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2281 
2282 		/* get a new block */
2283 		newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2284 		if (newbp == NULL)
2285 			goto normal_write;
2286 
2287 		KASSERT(buf_mapped(bp), ("Unmapped cg"));
2288 		memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2289 		BO_LOCK(bp->b_bufobj);
2290 		bp->b_vflags |= BV_BKGRDINPROG;
2291 		BO_UNLOCK(bp->b_bufobj);
2292 		newbp->b_xflags |=
2293 		    (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER;
2294 		newbp->b_lblkno = bp->b_lblkno;
2295 		newbp->b_blkno = bp->b_blkno;
2296 		newbp->b_offset = bp->b_offset;
2297 		newbp->b_iodone = ffs_backgroundwritedone;
2298 		newbp->b_flags |= B_ASYNC;
2299 		newbp->b_flags &= ~B_INVAL;
2300 		pbgetvp(bp->b_vp, newbp);
2301 
2302 #ifdef SOFTUPDATES
2303 		/*
2304 		 * Move over the dependencies.  If there are rollbacks,
2305 		 * leave the parent buffer dirtied as it will need to
2306 		 * be written again.
2307 		 */
2308 		if (LIST_EMPTY(&bp->b_dep) ||
2309 		    softdep_move_dependencies(bp, newbp) == 0)
2310 			bundirty(bp);
2311 #else
2312 		bundirty(bp);
2313 #endif
2314 
2315 		/*
2316 		 * Initiate write on the copy, release the original.  The
2317 		 * BKGRDINPROG flag prevents it from going away until
2318 		 * the background write completes. We have to recalculate
2319 		 * its check hash in case the buffer gets freed and then
2320 		 * reconstituted from the buffer cache during a later read.
2321 		 */
2322 		if ((bp->b_xflags & BX_CYLGRP) != 0) {
2323 			cgp = (struct cg *)bp->b_data;
2324 			cgp->cg_ckhash = 0;
2325 			cgp->cg_ckhash =
2326 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2327 		}
2328 		bqrelse(bp);
2329 		bp = newbp;
2330 	} else
2331 		/* Mark the buffer clean */
2332 		bundirty(bp);
2333 
2334 	/* Let the normal bufwrite do the rest for us */
2335 normal_write:
2336 	/*
2337 	 * If we are writing a cylinder group, update its time.
2338 	 */
2339 	if ((bp->b_xflags & BX_CYLGRP) != 0) {
2340 		cgp = (struct cg *)bp->b_data;
2341 		cgp->cg_old_time = cgp->cg_time = time_second;
2342 	}
2343 	return (bufwrite(bp));
2344 }
2345 
2346 static void
ffs_geom_strategy(struct bufobj * bo,struct buf * bp)2347 ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2348 {
2349 	struct vnode *vp;
2350 	struct buf *tbp;
2351 	int error, nocopy;
2352 
2353 	/*
2354 	 * This is the bufobj strategy for the private VCHR vnodes
2355 	 * used by FFS to access the underlying storage device.
2356 	 * We override the default bufobj strategy and thus bypass
2357 	 * VOP_STRATEGY() for these vnodes.
2358 	 */
2359 	vp = bo2vnode(bo);
2360 	KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR ||
2361 	    bp->b_vp->v_rdev == NULL ||
2362 	    bp->b_vp->v_rdev->si_mountpt == NULL ||
2363 	    VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL ||
2364 	    vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp,
2365 	    ("ffs_geom_strategy() with wrong vp"));
2366 	if (bp->b_iocmd == BIO_WRITE) {
2367 		if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2368 		    bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2369 		    (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2370 			panic("ffs_geom_strategy: bad I/O");
2371 		nocopy = bp->b_flags & B_NOCOPY;
2372 		bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2373 		if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2374 		    vp->v_rdev->si_snapdata != NULL) {
2375 			if ((bp->b_flags & B_CLUSTER) != 0) {
2376 				runningbufwakeup(bp);
2377 				TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2378 					      b_cluster.cluster_entry) {
2379 					error = ffs_copyonwrite(vp, tbp);
2380 					if (error != 0 &&
2381 					    error != EOPNOTSUPP) {
2382 						bp->b_error = error;
2383 						bp->b_ioflags |= BIO_ERROR;
2384 						bp->b_flags &= ~B_BARRIER;
2385 						bufdone(bp);
2386 						return;
2387 					}
2388 				}
2389 				(void)runningbufclaim(bp, bp->b_bufsize);
2390 			} else {
2391 				error = ffs_copyonwrite(vp, bp);
2392 				if (error != 0 && error != EOPNOTSUPP) {
2393 					bp->b_error = error;
2394 					bp->b_ioflags |= BIO_ERROR;
2395 					bp->b_flags &= ~B_BARRIER;
2396 					bufdone(bp);
2397 					return;
2398 				}
2399 			}
2400 		}
2401 #ifdef SOFTUPDATES
2402 		if ((bp->b_flags & B_CLUSTER) != 0) {
2403 			TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2404 				      b_cluster.cluster_entry) {
2405 				if (!LIST_EMPTY(&tbp->b_dep))
2406 					buf_start(tbp);
2407 			}
2408 		} else {
2409 			if (!LIST_EMPTY(&bp->b_dep))
2410 				buf_start(bp);
2411 		}
2412 
2413 #endif
2414 		/*
2415 		 * Check for metadata that needs check-hashes and update them.
2416 		 */
2417 		switch (bp->b_xflags & BX_FSPRIV) {
2418 		case BX_CYLGRP:
2419 			((struct cg *)bp->b_data)->cg_ckhash = 0;
2420 			((struct cg *)bp->b_data)->cg_ckhash =
2421 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2422 			break;
2423 
2424 		case BX_SUPERBLOCK:
2425 		case BX_INODE:
2426 		case BX_INDIR:
2427 		case BX_DIR:
2428 			printf("Check-hash write is unimplemented!!!\n");
2429 			break;
2430 
2431 		case 0:
2432 			break;
2433 
2434 		default:
2435 			printf("multiple buffer types 0x%b\n",
2436 			    (bp->b_xflags & BX_FSPRIV), PRINT_UFS_BUF_XFLAGS);
2437 			break;
2438 		}
2439 	}
2440 	if (bp->b_iocmd != BIO_READ && ffs_enxio_enable)
2441 		bp->b_xflags |= BX_CVTENXIO;
2442 	g_vfs_strategy(bo, bp);
2443 }
2444 
2445 int
ffs_own_mount(const struct mount * mp)2446 ffs_own_mount(const struct mount *mp)
2447 {
2448 
2449 	if (mp->mnt_op == &ufs_vfsops)
2450 		return (1);
2451 	return (0);
2452 }
2453 
2454 #ifdef	DDB
2455 #ifdef SOFTUPDATES
2456 
2457 /* defined in ffs_softdep.c */
2458 extern void db_print_ffs(struct ufsmount *ump);
2459 
DB_SHOW_COMMAND(ffs,db_show_ffs)2460 DB_SHOW_COMMAND(ffs, db_show_ffs)
2461 {
2462 	struct mount *mp;
2463 	struct ufsmount *ump;
2464 
2465 	if (have_addr) {
2466 		ump = VFSTOUFS((struct mount *)addr);
2467 		db_print_ffs(ump);
2468 		return;
2469 	}
2470 
2471 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2472 		if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2473 			db_print_ffs(VFSTOUFS(mp));
2474 	}
2475 }
2476 
2477 #endif	/* SOFTUPDATES */
2478 #endif	/* DDB */
2479