xref: /freebsd/sys/ufs/ffs/ffs_alloc.c (revision 00e9473388d3f7ac84d8e51829b532ca476ab14e)
1 /*-
2  * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND BSD-3-Clause)
3  *
4  * Copyright (c) 2002 Networks Associates Technology, Inc.
5  * All rights reserved.
6  *
7  * This software was developed for the FreeBSD Project by Marshall
8  * Kirk McKusick and Network Associates Laboratories, the Security
9  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
10  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
11  * research program
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1982, 1986, 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)ffs_alloc.c	8.18 (Berkeley) 5/26/95
62  */
63 
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
66 
67 #include "opt_quota.h"
68 
69 #include <sys/param.h>
70 #include <sys/capsicum.h>
71 #include <sys/systm.h>
72 #include <sys/bio.h>
73 #include <sys/buf.h>
74 #include <sys/conf.h>
75 #include <sys/fcntl.h>
76 #include <sys/file.h>
77 #include <sys/filedesc.h>
78 #include <sys/priv.h>
79 #include <sys/proc.h>
80 #include <sys/vnode.h>
81 #include <sys/mount.h>
82 #include <sys/kernel.h>
83 #include <sys/syscallsubr.h>
84 #include <sys/sysctl.h>
85 #include <sys/syslog.h>
86 #include <sys/taskqueue.h>
87 
88 #include <security/audit/audit.h>
89 
90 #include <geom/geom.h>
91 #include <geom/geom_vfs.h>
92 
93 #include <ufs/ufs/dir.h>
94 #include <ufs/ufs/extattr.h>
95 #include <ufs/ufs/quota.h>
96 #include <ufs/ufs/inode.h>
97 #include <ufs/ufs/ufs_extern.h>
98 #include <ufs/ufs/ufsmount.h>
99 
100 #include <ufs/ffs/fs.h>
101 #include <ufs/ffs/ffs_extern.h>
102 #include <ufs/ffs/softdep.h>
103 
104 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
105 				  int size, int rsize);
106 
107 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int);
108 static ufs2_daddr_t
109 	      ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
110 static void	ffs_blkfree_cg(struct ufsmount *, struct fs *,
111 		    struct vnode *, ufs2_daddr_t, long, ino_t,
112 		    struct workhead *);
113 static void	ffs_blkfree_trim_completed(struct buf *);
114 static void	ffs_blkfree_trim_task(void *ctx, int pending __unused);
115 #ifdef INVARIANTS
116 static int	ffs_checkblk(struct inode *, ufs2_daddr_t, long);
117 #endif
118 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int);
119 static ino_t	ffs_dirpref(struct inode *);
120 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
121 		    int, int);
122 static ufs2_daddr_t	ffs_hashalloc
123 		(struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *);
124 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int,
125 		    int);
126 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
127 static int	ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
128 static int	ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
129 static void	ffs_ckhash_cg(struct buf *);
130 
131 /*
132  * Allocate a block in the filesystem.
133  *
134  * The size of the requested block is given, which must be some
135  * multiple of fs_fsize and <= fs_bsize.
136  * A preference may be optionally specified. If a preference is given
137  * the following hierarchy is used to allocate a block:
138  *   1) allocate the requested block.
139  *   2) allocate a rotationally optimal block in the same cylinder.
140  *   3) allocate a block in the same cylinder group.
141  *   4) quadradically rehash into other cylinder groups, until an
142  *      available block is located.
143  * If no block preference is given the following hierarchy is used
144  * to allocate a block:
145  *   1) allocate a block in the cylinder group that contains the
146  *      inode for the file.
147  *   2) quadradically rehash into other cylinder groups, until an
148  *      available block is located.
149  */
150 int
151 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
152 	struct inode *ip;
153 	ufs2_daddr_t lbn, bpref;
154 	int size, flags;
155 	struct ucred *cred;
156 	ufs2_daddr_t *bnp;
157 {
158 	struct fs *fs;
159 	struct ufsmount *ump;
160 	ufs2_daddr_t bno;
161 	u_int cg, reclaimed;
162 	static struct timeval lastfail;
163 	static int curfail;
164 	int64_t delta;
165 #ifdef QUOTA
166 	int error;
167 #endif
168 
169 	*bnp = 0;
170 	ump = ITOUMP(ip);
171 	fs = ump->um_fs;
172 	mtx_assert(UFS_MTX(ump), MA_OWNED);
173 #ifdef INVARIANTS
174 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
175 		printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
176 		    devtoname(ump->um_dev), (long)fs->fs_bsize, size,
177 		    fs->fs_fsmnt);
178 		panic("ffs_alloc: bad size");
179 	}
180 	if (cred == NOCRED)
181 		panic("ffs_alloc: missing credential");
182 #endif /* INVARIANTS */
183 	reclaimed = 0;
184 retry:
185 #ifdef QUOTA
186 	UFS_UNLOCK(ump);
187 	error = chkdq(ip, btodb(size), cred, 0);
188 	if (error)
189 		return (error);
190 	UFS_LOCK(ump);
191 #endif
192 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
193 		goto nospace;
194 	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
195 	    freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
196 		goto nospace;
197 	if (bpref >= fs->fs_size)
198 		bpref = 0;
199 	if (bpref == 0)
200 		cg = ino_to_cg(fs, ip->i_number);
201 	else
202 		cg = dtog(fs, bpref);
203 	bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
204 	if (bno > 0) {
205 		delta = btodb(size);
206 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
207 		if (flags & IO_EXT)
208 			ip->i_flag |= IN_CHANGE;
209 		else
210 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
211 		*bnp = bno;
212 		return (0);
213 	}
214 nospace:
215 #ifdef QUOTA
216 	UFS_UNLOCK(ump);
217 	/*
218 	 * Restore user's disk quota because allocation failed.
219 	 */
220 	(void) chkdq(ip, -btodb(size), cred, FORCE);
221 	UFS_LOCK(ump);
222 #endif
223 	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
224 		reclaimed = 1;
225 		softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
226 		goto retry;
227 	}
228 	UFS_UNLOCK(ump);
229 	if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
230 		ffs_fserr(fs, ip->i_number, "filesystem full");
231 		uprintf("\n%s: write failed, filesystem is full\n",
232 		    fs->fs_fsmnt);
233 	}
234 	return (ENOSPC);
235 }
236 
237 /*
238  * Reallocate a fragment to a bigger size
239  *
240  * The number and size of the old block is given, and a preference
241  * and new size is also specified. The allocator attempts to extend
242  * the original block. Failing that, the regular block allocator is
243  * invoked to get an appropriate block.
244  */
245 int
246 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
247 	struct inode *ip;
248 	ufs2_daddr_t lbprev;
249 	ufs2_daddr_t bprev;
250 	ufs2_daddr_t bpref;
251 	int osize, nsize, flags;
252 	struct ucred *cred;
253 	struct buf **bpp;
254 {
255 	struct vnode *vp;
256 	struct fs *fs;
257 	struct buf *bp;
258 	struct ufsmount *ump;
259 	u_int cg, request, reclaimed;
260 	int error, gbflags;
261 	ufs2_daddr_t bno;
262 	static struct timeval lastfail;
263 	static int curfail;
264 	int64_t delta;
265 
266 	vp = ITOV(ip);
267 	ump = ITOUMP(ip);
268 	fs = ump->um_fs;
269 	bp = NULL;
270 	gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
271 
272 	mtx_assert(UFS_MTX(ump), MA_OWNED);
273 #ifdef INVARIANTS
274 	if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
275 		panic("ffs_realloccg: allocation on suspended filesystem");
276 	if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
277 	    (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
278 		printf(
279 		"dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
280 		    devtoname(ump->um_dev), (long)fs->fs_bsize, osize,
281 		    nsize, fs->fs_fsmnt);
282 		panic("ffs_realloccg: bad size");
283 	}
284 	if (cred == NOCRED)
285 		panic("ffs_realloccg: missing credential");
286 #endif /* INVARIANTS */
287 	reclaimed = 0;
288 retry:
289 	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
290 	    freespace(fs, fs->fs_minfree) -  numfrags(fs, nsize - osize) < 0) {
291 		goto nospace;
292 	}
293 	if (bprev == 0) {
294 		printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
295 		    devtoname(ump->um_dev), (long)fs->fs_bsize, (intmax_t)bprev,
296 		    fs->fs_fsmnt);
297 		panic("ffs_realloccg: bad bprev");
298 	}
299 	UFS_UNLOCK(ump);
300 	/*
301 	 * Allocate the extra space in the buffer.
302 	 */
303 	error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
304 	if (error) {
305 		brelse(bp);
306 		return (error);
307 	}
308 
309 	if (bp->b_blkno == bp->b_lblkno) {
310 		if (lbprev >= UFS_NDADDR)
311 			panic("ffs_realloccg: lbprev out of range");
312 		bp->b_blkno = fsbtodb(fs, bprev);
313 	}
314 
315 #ifdef QUOTA
316 	error = chkdq(ip, btodb(nsize - osize), cred, 0);
317 	if (error) {
318 		brelse(bp);
319 		return (error);
320 	}
321 #endif
322 	/*
323 	 * Check for extension in the existing location.
324 	 */
325 	*bpp = NULL;
326 	cg = dtog(fs, bprev);
327 	UFS_LOCK(ump);
328 	bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
329 	if (bno) {
330 		if (bp->b_blkno != fsbtodb(fs, bno))
331 			panic("ffs_realloccg: bad blockno");
332 		delta = btodb(nsize - osize);
333 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
334 		if (flags & IO_EXT)
335 			ip->i_flag |= IN_CHANGE;
336 		else
337 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
338 		allocbuf(bp, nsize);
339 		bp->b_flags |= B_DONE;
340 		vfs_bio_bzero_buf(bp, osize, nsize - osize);
341 		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
342 			vfs_bio_set_valid(bp, osize, nsize - osize);
343 		*bpp = bp;
344 		return (0);
345 	}
346 	/*
347 	 * Allocate a new disk location.
348 	 */
349 	if (bpref >= fs->fs_size)
350 		bpref = 0;
351 	switch ((int)fs->fs_optim) {
352 	case FS_OPTSPACE:
353 		/*
354 		 * Allocate an exact sized fragment. Although this makes
355 		 * best use of space, we will waste time relocating it if
356 		 * the file continues to grow. If the fragmentation is
357 		 * less than half of the minimum free reserve, we choose
358 		 * to begin optimizing for time.
359 		 */
360 		request = nsize;
361 		if (fs->fs_minfree <= 5 ||
362 		    fs->fs_cstotal.cs_nffree >
363 		    (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
364 			break;
365 		log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
366 			fs->fs_fsmnt);
367 		fs->fs_optim = FS_OPTTIME;
368 		break;
369 	case FS_OPTTIME:
370 		/*
371 		 * At this point we have discovered a file that is trying to
372 		 * grow a small fragment to a larger fragment. To save time,
373 		 * we allocate a full sized block, then free the unused portion.
374 		 * If the file continues to grow, the `ffs_fragextend' call
375 		 * above will be able to grow it in place without further
376 		 * copying. If aberrant programs cause disk fragmentation to
377 		 * grow within 2% of the free reserve, we choose to begin
378 		 * optimizing for space.
379 		 */
380 		request = fs->fs_bsize;
381 		if (fs->fs_cstotal.cs_nffree <
382 		    (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
383 			break;
384 		log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
385 			fs->fs_fsmnt);
386 		fs->fs_optim = FS_OPTSPACE;
387 		break;
388 	default:
389 		printf("dev = %s, optim = %ld, fs = %s\n",
390 		    devtoname(ump->um_dev), (long)fs->fs_optim, fs->fs_fsmnt);
391 		panic("ffs_realloccg: bad optim");
392 		/* NOTREACHED */
393 	}
394 	bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
395 	if (bno > 0) {
396 		bp->b_blkno = fsbtodb(fs, bno);
397 		if (!DOINGSOFTDEP(vp))
398 			ffs_blkfree(ump, fs, ump->um_devvp, bprev, (long)osize,
399 			    ip->i_number, vp->v_type, NULL);
400 		delta = btodb(nsize - osize);
401 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
402 		if (flags & IO_EXT)
403 			ip->i_flag |= IN_CHANGE;
404 		else
405 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
406 		allocbuf(bp, nsize);
407 		bp->b_flags |= B_DONE;
408 		vfs_bio_bzero_buf(bp, osize, nsize - osize);
409 		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
410 			vfs_bio_set_valid(bp, osize, nsize - osize);
411 		*bpp = bp;
412 		return (0);
413 	}
414 #ifdef QUOTA
415 	UFS_UNLOCK(ump);
416 	/*
417 	 * Restore user's disk quota because allocation failed.
418 	 */
419 	(void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
420 	UFS_LOCK(ump);
421 #endif
422 nospace:
423 	/*
424 	 * no space available
425 	 */
426 	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
427 		reclaimed = 1;
428 		UFS_UNLOCK(ump);
429 		if (bp) {
430 			brelse(bp);
431 			bp = NULL;
432 		}
433 		UFS_LOCK(ump);
434 		softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
435 		goto retry;
436 	}
437 	UFS_UNLOCK(ump);
438 	if (bp)
439 		brelse(bp);
440 	if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
441 		ffs_fserr(fs, ip->i_number, "filesystem full");
442 		uprintf("\n%s: write failed, filesystem is full\n",
443 		    fs->fs_fsmnt);
444 	}
445 	return (ENOSPC);
446 }
447 
448 /*
449  * Reallocate a sequence of blocks into a contiguous sequence of blocks.
450  *
451  * The vnode and an array of buffer pointers for a range of sequential
452  * logical blocks to be made contiguous is given. The allocator attempts
453  * to find a range of sequential blocks starting as close as possible
454  * from the end of the allocation for the logical block immediately
455  * preceding the current range. If successful, the physical block numbers
456  * in the buffer pointers and in the inode are changed to reflect the new
457  * allocation. If unsuccessful, the allocation is left unchanged. The
458  * success in doing the reallocation is returned. Note that the error
459  * return is not reflected back to the user. Rather the previous block
460  * allocation will be used.
461  */
462 
463 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
464 
465 static int doasyncfree = 1;
466 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
467 "do not force synchronous writes when blocks are reallocated");
468 
469 static int doreallocblks = 1;
470 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0,
471 "enable block reallocation");
472 
473 static int maxclustersearch = 10;
474 SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch,
475 0, "max number of cylinder group to search for contigous blocks");
476 
477 #ifdef DEBUG
478 static volatile int prtrealloc = 0;
479 #endif
480 
481 int
482 ffs_reallocblks(ap)
483 	struct vop_reallocblks_args /* {
484 		struct vnode *a_vp;
485 		struct cluster_save *a_buflist;
486 	} */ *ap;
487 {
488 	struct ufsmount *ump;
489 
490 	/*
491 	 * If the underlying device can do deletes, then skip reallocating
492 	 * the blocks of this file into contiguous sequences. Devices that
493 	 * benefit from BIO_DELETE also benefit from not moving the data.
494 	 * These devices are flash and therefore work less well with this
495 	 * optimization. Also skip if reallocblks has been disabled globally.
496 	 */
497 	ump = ap->a_vp->v_mount->mnt_data;
498 	if (ump->um_candelete || doreallocblks == 0)
499 		return (ENOSPC);
500 
501 	/*
502 	 * We can't wait in softdep prealloc as it may fsync and recurse
503 	 * here.  Instead we simply fail to reallocate blocks if this
504 	 * rare condition arises.
505 	 */
506 	if (DOINGSOFTDEP(ap->a_vp))
507 		if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
508 			return (ENOSPC);
509 	if (ump->um_fstype == UFS1)
510 		return (ffs_reallocblks_ufs1(ap));
511 	return (ffs_reallocblks_ufs2(ap));
512 }
513 
514 static int
515 ffs_reallocblks_ufs1(ap)
516 	struct vop_reallocblks_args /* {
517 		struct vnode *a_vp;
518 		struct cluster_save *a_buflist;
519 	} */ *ap;
520 {
521 	struct fs *fs;
522 	struct inode *ip;
523 	struct vnode *vp;
524 	struct buf *sbp, *ebp;
525 	ufs1_daddr_t *bap, *sbap, *ebap;
526 	struct cluster_save *buflist;
527 	struct ufsmount *ump;
528 	ufs_lbn_t start_lbn, end_lbn;
529 	ufs1_daddr_t soff, newblk, blkno;
530 	ufs2_daddr_t pref;
531 	struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
532 	int i, cg, len, start_lvl, end_lvl, ssize;
533 
534 	vp = ap->a_vp;
535 	ip = VTOI(vp);
536 	ump = ITOUMP(ip);
537 	fs = ump->um_fs;
538 	/*
539 	 * If we are not tracking block clusters or if we have less than 4%
540 	 * free blocks left, then do not attempt to cluster. Running with
541 	 * less than 5% free block reserve is not recommended and those that
542 	 * choose to do so do not expect to have good file layout.
543 	 */
544 	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
545 		return (ENOSPC);
546 	buflist = ap->a_buflist;
547 	len = buflist->bs_nchildren;
548 	start_lbn = buflist->bs_children[0]->b_lblkno;
549 	end_lbn = start_lbn + len - 1;
550 #ifdef INVARIANTS
551 	for (i = 0; i < len; i++)
552 		if (!ffs_checkblk(ip,
553 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
554 			panic("ffs_reallocblks: unallocated block 1");
555 	for (i = 1; i < len; i++)
556 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
557 			panic("ffs_reallocblks: non-logical cluster");
558 	blkno = buflist->bs_children[0]->b_blkno;
559 	ssize = fsbtodb(fs, fs->fs_frag);
560 	for (i = 1; i < len - 1; i++)
561 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
562 			panic("ffs_reallocblks: non-physical cluster %d", i);
563 #endif
564 	/*
565 	 * If the cluster crosses the boundary for the first indirect
566 	 * block, leave space for the indirect block. Indirect blocks
567 	 * are initially laid out in a position after the last direct
568 	 * block. Block reallocation would usually destroy locality by
569 	 * moving the indirect block out of the way to make room for
570 	 * data blocks if we didn't compensate here. We should also do
571 	 * this for other indirect block boundaries, but it is only
572 	 * important for the first one.
573 	 */
574 	if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
575 		return (ENOSPC);
576 	/*
577 	 * If the latest allocation is in a new cylinder group, assume that
578 	 * the filesystem has decided to move and do not force it back to
579 	 * the previous cylinder group.
580 	 */
581 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
582 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
583 		return (ENOSPC);
584 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
585 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
586 		return (ENOSPC);
587 	/*
588 	 * Get the starting offset and block map for the first block.
589 	 */
590 	if (start_lvl == 0) {
591 		sbap = &ip->i_din1->di_db[0];
592 		soff = start_lbn;
593 	} else {
594 		idp = &start_ap[start_lvl - 1];
595 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
596 			brelse(sbp);
597 			return (ENOSPC);
598 		}
599 		sbap = (ufs1_daddr_t *)sbp->b_data;
600 		soff = idp->in_off;
601 	}
602 	/*
603 	 * If the block range spans two block maps, get the second map.
604 	 */
605 	ebap = NULL;
606 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
607 		ssize = len;
608 	} else {
609 #ifdef INVARIANTS
610 		if (start_lvl > 0 &&
611 		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
612 			panic("ffs_reallocblk: start == end");
613 #endif
614 		ssize = len - (idp->in_off + 1);
615 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
616 			goto fail;
617 		ebap = (ufs1_daddr_t *)ebp->b_data;
618 	}
619 	/*
620 	 * Find the preferred location for the cluster. If we have not
621 	 * previously failed at this endeavor, then follow our standard
622 	 * preference calculation. If we have failed at it, then pick up
623 	 * where we last ended our search.
624 	 */
625 	UFS_LOCK(ump);
626 	if (ip->i_nextclustercg == -1)
627 		pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
628 	else
629 		pref = cgdata(fs, ip->i_nextclustercg);
630 	/*
631 	 * Search the block map looking for an allocation of the desired size.
632 	 * To avoid wasting too much time, we limit the number of cylinder
633 	 * groups that we will search.
634 	 */
635 	cg = dtog(fs, pref);
636 	for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
637 		if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
638 			break;
639 		cg += 1;
640 		if (cg >= fs->fs_ncg)
641 			cg = 0;
642 	}
643 	/*
644 	 * If we have failed in our search, record where we gave up for
645 	 * next time. Otherwise, fall back to our usual search citerion.
646 	 */
647 	if (newblk == 0) {
648 		ip->i_nextclustercg = cg;
649 		UFS_UNLOCK(ump);
650 		goto fail;
651 	}
652 	ip->i_nextclustercg = -1;
653 	/*
654 	 * We have found a new contiguous block.
655 	 *
656 	 * First we have to replace the old block pointers with the new
657 	 * block pointers in the inode and indirect blocks associated
658 	 * with the file.
659 	 */
660 #ifdef DEBUG
661 	if (prtrealloc)
662 		printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
663 		    (uintmax_t)ip->i_number,
664 		    (intmax_t)start_lbn, (intmax_t)end_lbn);
665 #endif
666 	blkno = newblk;
667 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
668 		if (i == ssize) {
669 			bap = ebap;
670 			soff = -i;
671 		}
672 #ifdef INVARIANTS
673 		if (!ffs_checkblk(ip,
674 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
675 			panic("ffs_reallocblks: unallocated block 2");
676 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
677 			panic("ffs_reallocblks: alloc mismatch");
678 #endif
679 #ifdef DEBUG
680 		if (prtrealloc)
681 			printf(" %d,", *bap);
682 #endif
683 		if (DOINGSOFTDEP(vp)) {
684 			if (sbap == &ip->i_din1->di_db[0] && i < ssize)
685 				softdep_setup_allocdirect(ip, start_lbn + i,
686 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
687 				    buflist->bs_children[i]);
688 			else
689 				softdep_setup_allocindir_page(ip, start_lbn + i,
690 				    i < ssize ? sbp : ebp, soff + i, blkno,
691 				    *bap, buflist->bs_children[i]);
692 		}
693 		*bap++ = blkno;
694 	}
695 	/*
696 	 * Next we must write out the modified inode and indirect blocks.
697 	 * For strict correctness, the writes should be synchronous since
698 	 * the old block values may have been written to disk. In practise
699 	 * they are almost never written, but if we are concerned about
700 	 * strict correctness, the `doasyncfree' flag should be set to zero.
701 	 *
702 	 * The test on `doasyncfree' should be changed to test a flag
703 	 * that shows whether the associated buffers and inodes have
704 	 * been written. The flag should be set when the cluster is
705 	 * started and cleared whenever the buffer or inode is flushed.
706 	 * We can then check below to see if it is set, and do the
707 	 * synchronous write only when it has been cleared.
708 	 */
709 	if (sbap != &ip->i_din1->di_db[0]) {
710 		if (doasyncfree)
711 			bdwrite(sbp);
712 		else
713 			bwrite(sbp);
714 	} else {
715 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
716 		if (!doasyncfree)
717 			ffs_update(vp, 1);
718 	}
719 	if (ssize < len) {
720 		if (doasyncfree)
721 			bdwrite(ebp);
722 		else
723 			bwrite(ebp);
724 	}
725 	/*
726 	 * Last, free the old blocks and assign the new blocks to the buffers.
727 	 */
728 #ifdef DEBUG
729 	if (prtrealloc)
730 		printf("\n\tnew:");
731 #endif
732 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
733 		if (!DOINGSOFTDEP(vp))
734 			ffs_blkfree(ump, fs, ump->um_devvp,
735 			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
736 			    fs->fs_bsize, ip->i_number, vp->v_type, NULL);
737 		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
738 #ifdef INVARIANTS
739 		if (!ffs_checkblk(ip,
740 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
741 			panic("ffs_reallocblks: unallocated block 3");
742 #endif
743 #ifdef DEBUG
744 		if (prtrealloc)
745 			printf(" %d,", blkno);
746 #endif
747 	}
748 #ifdef DEBUG
749 	if (prtrealloc) {
750 		prtrealloc--;
751 		printf("\n");
752 	}
753 #endif
754 	return (0);
755 
756 fail:
757 	if (ssize < len)
758 		brelse(ebp);
759 	if (sbap != &ip->i_din1->di_db[0])
760 		brelse(sbp);
761 	return (ENOSPC);
762 }
763 
764 static int
765 ffs_reallocblks_ufs2(ap)
766 	struct vop_reallocblks_args /* {
767 		struct vnode *a_vp;
768 		struct cluster_save *a_buflist;
769 	} */ *ap;
770 {
771 	struct fs *fs;
772 	struct inode *ip;
773 	struct vnode *vp;
774 	struct buf *sbp, *ebp;
775 	ufs2_daddr_t *bap, *sbap, *ebap;
776 	struct cluster_save *buflist;
777 	struct ufsmount *ump;
778 	ufs_lbn_t start_lbn, end_lbn;
779 	ufs2_daddr_t soff, newblk, blkno, pref;
780 	struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
781 	int i, cg, len, start_lvl, end_lvl, ssize;
782 
783 	vp = ap->a_vp;
784 	ip = VTOI(vp);
785 	ump = ITOUMP(ip);
786 	fs = ump->um_fs;
787 	/*
788 	 * If we are not tracking block clusters or if we have less than 4%
789 	 * free blocks left, then do not attempt to cluster. Running with
790 	 * less than 5% free block reserve is not recommended and those that
791 	 * choose to do so do not expect to have good file layout.
792 	 */
793 	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
794 		return (ENOSPC);
795 	buflist = ap->a_buflist;
796 	len = buflist->bs_nchildren;
797 	start_lbn = buflist->bs_children[0]->b_lblkno;
798 	end_lbn = start_lbn + len - 1;
799 #ifdef INVARIANTS
800 	for (i = 0; i < len; i++)
801 		if (!ffs_checkblk(ip,
802 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
803 			panic("ffs_reallocblks: unallocated block 1");
804 	for (i = 1; i < len; i++)
805 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
806 			panic("ffs_reallocblks: non-logical cluster");
807 	blkno = buflist->bs_children[0]->b_blkno;
808 	ssize = fsbtodb(fs, fs->fs_frag);
809 	for (i = 1; i < len - 1; i++)
810 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
811 			panic("ffs_reallocblks: non-physical cluster %d", i);
812 #endif
813 	/*
814 	 * If the cluster crosses the boundary for the first indirect
815 	 * block, do not move anything in it. Indirect blocks are
816 	 * usually initially laid out in a position between the data
817 	 * blocks. Block reallocation would usually destroy locality by
818 	 * moving the indirect block out of the way to make room for
819 	 * data blocks if we didn't compensate here. We should also do
820 	 * this for other indirect block boundaries, but it is only
821 	 * important for the first one.
822 	 */
823 	if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
824 		return (ENOSPC);
825 	/*
826 	 * If the latest allocation is in a new cylinder group, assume that
827 	 * the filesystem has decided to move and do not force it back to
828 	 * the previous cylinder group.
829 	 */
830 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
831 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
832 		return (ENOSPC);
833 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
834 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
835 		return (ENOSPC);
836 	/*
837 	 * Get the starting offset and block map for the first block.
838 	 */
839 	if (start_lvl == 0) {
840 		sbap = &ip->i_din2->di_db[0];
841 		soff = start_lbn;
842 	} else {
843 		idp = &start_ap[start_lvl - 1];
844 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
845 			brelse(sbp);
846 			return (ENOSPC);
847 		}
848 		sbap = (ufs2_daddr_t *)sbp->b_data;
849 		soff = idp->in_off;
850 	}
851 	/*
852 	 * If the block range spans two block maps, get the second map.
853 	 */
854 	ebap = NULL;
855 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
856 		ssize = len;
857 	} else {
858 #ifdef INVARIANTS
859 		if (start_lvl > 0 &&
860 		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
861 			panic("ffs_reallocblk: start == end");
862 #endif
863 		ssize = len - (idp->in_off + 1);
864 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
865 			goto fail;
866 		ebap = (ufs2_daddr_t *)ebp->b_data;
867 	}
868 	/*
869 	 * Find the preferred location for the cluster. If we have not
870 	 * previously failed at this endeavor, then follow our standard
871 	 * preference calculation. If we have failed at it, then pick up
872 	 * where we last ended our search.
873 	 */
874 	UFS_LOCK(ump);
875 	if (ip->i_nextclustercg == -1)
876 		pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
877 	else
878 		pref = cgdata(fs, ip->i_nextclustercg);
879 	/*
880 	 * Search the block map looking for an allocation of the desired size.
881 	 * To avoid wasting too much time, we limit the number of cylinder
882 	 * groups that we will search.
883 	 */
884 	cg = dtog(fs, pref);
885 	for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
886 		if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
887 			break;
888 		cg += 1;
889 		if (cg >= fs->fs_ncg)
890 			cg = 0;
891 	}
892 	/*
893 	 * If we have failed in our search, record where we gave up for
894 	 * next time. Otherwise, fall back to our usual search citerion.
895 	 */
896 	if (newblk == 0) {
897 		ip->i_nextclustercg = cg;
898 		UFS_UNLOCK(ump);
899 		goto fail;
900 	}
901 	ip->i_nextclustercg = -1;
902 	/*
903 	 * We have found a new contiguous block.
904 	 *
905 	 * First we have to replace the old block pointers with the new
906 	 * block pointers in the inode and indirect blocks associated
907 	 * with the file.
908 	 */
909 #ifdef DEBUG
910 	if (prtrealloc)
911 		printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number,
912 		    (intmax_t)start_lbn, (intmax_t)end_lbn);
913 #endif
914 	blkno = newblk;
915 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
916 		if (i == ssize) {
917 			bap = ebap;
918 			soff = -i;
919 		}
920 #ifdef INVARIANTS
921 		if (!ffs_checkblk(ip,
922 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
923 			panic("ffs_reallocblks: unallocated block 2");
924 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
925 			panic("ffs_reallocblks: alloc mismatch");
926 #endif
927 #ifdef DEBUG
928 		if (prtrealloc)
929 			printf(" %jd,", (intmax_t)*bap);
930 #endif
931 		if (DOINGSOFTDEP(vp)) {
932 			if (sbap == &ip->i_din2->di_db[0] && i < ssize)
933 				softdep_setup_allocdirect(ip, start_lbn + i,
934 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
935 				    buflist->bs_children[i]);
936 			else
937 				softdep_setup_allocindir_page(ip, start_lbn + i,
938 				    i < ssize ? sbp : ebp, soff + i, blkno,
939 				    *bap, buflist->bs_children[i]);
940 		}
941 		*bap++ = blkno;
942 	}
943 	/*
944 	 * Next we must write out the modified inode and indirect blocks.
945 	 * For strict correctness, the writes should be synchronous since
946 	 * the old block values may have been written to disk. In practise
947 	 * they are almost never written, but if we are concerned about
948 	 * strict correctness, the `doasyncfree' flag should be set to zero.
949 	 *
950 	 * The test on `doasyncfree' should be changed to test a flag
951 	 * that shows whether the associated buffers and inodes have
952 	 * been written. The flag should be set when the cluster is
953 	 * started and cleared whenever the buffer or inode is flushed.
954 	 * We can then check below to see if it is set, and do the
955 	 * synchronous write only when it has been cleared.
956 	 */
957 	if (sbap != &ip->i_din2->di_db[0]) {
958 		if (doasyncfree)
959 			bdwrite(sbp);
960 		else
961 			bwrite(sbp);
962 	} else {
963 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
964 		if (!doasyncfree)
965 			ffs_update(vp, 1);
966 	}
967 	if (ssize < len) {
968 		if (doasyncfree)
969 			bdwrite(ebp);
970 		else
971 			bwrite(ebp);
972 	}
973 	/*
974 	 * Last, free the old blocks and assign the new blocks to the buffers.
975 	 */
976 #ifdef DEBUG
977 	if (prtrealloc)
978 		printf("\n\tnew:");
979 #endif
980 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
981 		if (!DOINGSOFTDEP(vp))
982 			ffs_blkfree(ump, fs, ump->um_devvp,
983 			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
984 			    fs->fs_bsize, ip->i_number, vp->v_type, NULL);
985 		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
986 #ifdef INVARIANTS
987 		if (!ffs_checkblk(ip,
988 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
989 			panic("ffs_reallocblks: unallocated block 3");
990 #endif
991 #ifdef DEBUG
992 		if (prtrealloc)
993 			printf(" %jd,", (intmax_t)blkno);
994 #endif
995 	}
996 #ifdef DEBUG
997 	if (prtrealloc) {
998 		prtrealloc--;
999 		printf("\n");
1000 	}
1001 #endif
1002 	return (0);
1003 
1004 fail:
1005 	if (ssize < len)
1006 		brelse(ebp);
1007 	if (sbap != &ip->i_din2->di_db[0])
1008 		brelse(sbp);
1009 	return (ENOSPC);
1010 }
1011 
1012 /*
1013  * Allocate an inode in the filesystem.
1014  *
1015  * If allocating a directory, use ffs_dirpref to select the inode.
1016  * If allocating in a directory, the following hierarchy is followed:
1017  *   1) allocate the preferred inode.
1018  *   2) allocate an inode in the same cylinder group.
1019  *   3) quadradically rehash into other cylinder groups, until an
1020  *      available inode is located.
1021  * If no inode preference is given the following hierarchy is used
1022  * to allocate an inode:
1023  *   1) allocate an inode in cylinder group 0.
1024  *   2) quadradically rehash into other cylinder groups, until an
1025  *      available inode is located.
1026  */
1027 int
1028 ffs_valloc(pvp, mode, cred, vpp)
1029 	struct vnode *pvp;
1030 	int mode;
1031 	struct ucred *cred;
1032 	struct vnode **vpp;
1033 {
1034 	struct inode *pip;
1035 	struct fs *fs;
1036 	struct inode *ip;
1037 	struct timespec ts;
1038 	struct ufsmount *ump;
1039 	ino_t ino, ipref;
1040 	u_int cg;
1041 	int error, error1, reclaimed;
1042 	static struct timeval lastfail;
1043 	static int curfail;
1044 
1045 	*vpp = NULL;
1046 	pip = VTOI(pvp);
1047 	ump = ITOUMP(pip);
1048 	fs = ump->um_fs;
1049 
1050 	UFS_LOCK(ump);
1051 	reclaimed = 0;
1052 retry:
1053 	if (fs->fs_cstotal.cs_nifree == 0)
1054 		goto noinodes;
1055 
1056 	if ((mode & IFMT) == IFDIR)
1057 		ipref = ffs_dirpref(pip);
1058 	else
1059 		ipref = pip->i_number;
1060 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
1061 		ipref = 0;
1062 	cg = ino_to_cg(fs, ipref);
1063 	/*
1064 	 * Track number of dirs created one after another
1065 	 * in a same cg without intervening by files.
1066 	 */
1067 	if ((mode & IFMT) == IFDIR) {
1068 		if (fs->fs_contigdirs[cg] < 255)
1069 			fs->fs_contigdirs[cg]++;
1070 	} else {
1071 		if (fs->fs_contigdirs[cg] > 0)
1072 			fs->fs_contigdirs[cg]--;
1073 	}
1074 	ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
1075 					(allocfcn_t *)ffs_nodealloccg);
1076 	if (ino == 0)
1077 		goto noinodes;
1078 	error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
1079 	if (error) {
1080 		error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
1081 		    FFSV_FORCEINSMQ);
1082 		ffs_vfree(pvp, ino, mode);
1083 		if (error1 == 0) {
1084 			ip = VTOI(*vpp);
1085 			if (ip->i_mode)
1086 				goto dup_alloc;
1087 			ip->i_flag |= IN_MODIFIED;
1088 			vput(*vpp);
1089 		}
1090 		return (error);
1091 	}
1092 	ip = VTOI(*vpp);
1093 	if (ip->i_mode) {
1094 dup_alloc:
1095 		printf("mode = 0%o, inum = %ju, fs = %s\n",
1096 		    ip->i_mode, (uintmax_t)ip->i_number, fs->fs_fsmnt);
1097 		panic("ffs_valloc: dup alloc");
1098 	}
1099 	if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) {  /* XXX */
1100 		printf("free inode %s/%lu had %ld blocks\n",
1101 		    fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
1102 		DIP_SET(ip, i_blocks, 0);
1103 	}
1104 	ip->i_flags = 0;
1105 	DIP_SET(ip, i_flags, 0);
1106 	/*
1107 	 * Set up a new generation number for this inode.
1108 	 */
1109 	while (ip->i_gen == 0 || ++ip->i_gen == 0)
1110 		ip->i_gen = arc4random();
1111 	DIP_SET(ip, i_gen, ip->i_gen);
1112 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1113 		vfs_timestamp(&ts);
1114 		ip->i_din2->di_birthtime = ts.tv_sec;
1115 		ip->i_din2->di_birthnsec = ts.tv_nsec;
1116 	}
1117 	ufs_prepare_reclaim(*vpp);
1118 	ip->i_flag = 0;
1119 	(*vpp)->v_vflag = 0;
1120 	(*vpp)->v_type = VNON;
1121 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1122 		(*vpp)->v_op = &ffs_vnodeops2;
1123 		ip->i_flag |= IN_UFS2;
1124 	} else {
1125 		(*vpp)->v_op = &ffs_vnodeops1;
1126 	}
1127 	return (0);
1128 noinodes:
1129 	if (reclaimed == 0) {
1130 		reclaimed = 1;
1131 		softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
1132 		goto retry;
1133 	}
1134 	UFS_UNLOCK(ump);
1135 	if (ppsratecheck(&lastfail, &curfail, 1)) {
1136 		ffs_fserr(fs, pip->i_number, "out of inodes");
1137 		uprintf("\n%s: create/symlink failed, no inodes free\n",
1138 		    fs->fs_fsmnt);
1139 	}
1140 	return (ENOSPC);
1141 }
1142 
1143 /*
1144  * Find a cylinder group to place a directory.
1145  *
1146  * The policy implemented by this algorithm is to allocate a
1147  * directory inode in the same cylinder group as its parent
1148  * directory, but also to reserve space for its files inodes
1149  * and data. Restrict the number of directories which may be
1150  * allocated one after another in the same cylinder group
1151  * without intervening allocation of files.
1152  *
1153  * If we allocate a first level directory then force allocation
1154  * in another cylinder group.
1155  */
1156 static ino_t
1157 ffs_dirpref(pip)
1158 	struct inode *pip;
1159 {
1160 	struct fs *fs;
1161 	int cg, prefcg, dirsize, cgsize;
1162 	u_int avgifree, avgbfree, avgndir, curdirsize;
1163 	u_int minifree, minbfree, maxndir;
1164 	u_int mincg, minndir;
1165 	u_int maxcontigdirs;
1166 
1167 	mtx_assert(UFS_MTX(ITOUMP(pip)), MA_OWNED);
1168 	fs = ITOFS(pip);
1169 
1170 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1171 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1172 	avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1173 
1174 	/*
1175 	 * Force allocation in another cg if creating a first level dir.
1176 	 */
1177 	ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1178 	if (ITOV(pip)->v_vflag & VV_ROOT) {
1179 		prefcg = arc4random() % fs->fs_ncg;
1180 		mincg = prefcg;
1181 		minndir = fs->fs_ipg;
1182 		for (cg = prefcg; cg < fs->fs_ncg; cg++)
1183 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1184 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1185 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1186 				mincg = cg;
1187 				minndir = fs->fs_cs(fs, cg).cs_ndir;
1188 			}
1189 		for (cg = 0; cg < prefcg; cg++)
1190 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1191 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1192 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1193 				mincg = cg;
1194 				minndir = fs->fs_cs(fs, cg).cs_ndir;
1195 			}
1196 		return ((ino_t)(fs->fs_ipg * mincg));
1197 	}
1198 
1199 	/*
1200 	 * Count various limits which used for
1201 	 * optimal allocation of a directory inode.
1202 	 */
1203 	maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1204 	minifree = avgifree - avgifree / 4;
1205 	if (minifree < 1)
1206 		minifree = 1;
1207 	minbfree = avgbfree - avgbfree / 4;
1208 	if (minbfree < 1)
1209 		minbfree = 1;
1210 	cgsize = fs->fs_fsize * fs->fs_fpg;
1211 	dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1212 	curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1213 	if (dirsize < curdirsize)
1214 		dirsize = curdirsize;
1215 	if (dirsize <= 0)
1216 		maxcontigdirs = 0;		/* dirsize overflowed */
1217 	else
1218 		maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1219 	if (fs->fs_avgfpdir > 0)
1220 		maxcontigdirs = min(maxcontigdirs,
1221 				    fs->fs_ipg / fs->fs_avgfpdir);
1222 	if (maxcontigdirs == 0)
1223 		maxcontigdirs = 1;
1224 
1225 	/*
1226 	 * Limit number of dirs in one cg and reserve space for
1227 	 * regular files, but only if we have no deficit in
1228 	 * inodes or space.
1229 	 *
1230 	 * We are trying to find a suitable cylinder group nearby
1231 	 * our preferred cylinder group to place a new directory.
1232 	 * We scan from our preferred cylinder group forward looking
1233 	 * for a cylinder group that meets our criterion. If we get
1234 	 * to the final cylinder group and do not find anything,
1235 	 * we start scanning forwards from the beginning of the
1236 	 * filesystem. While it might seem sensible to start scanning
1237 	 * backwards or even to alternate looking forward and backward,
1238 	 * this approach fails badly when the filesystem is nearly full.
1239 	 * Specifically, we first search all the areas that have no space
1240 	 * and finally try the one preceding that. We repeat this on
1241 	 * every request and in the case of the final block end up
1242 	 * searching the entire filesystem. By jumping to the front
1243 	 * of the filesystem, our future forward searches always look
1244 	 * in new cylinder groups so finds every possible block after
1245 	 * one pass over the filesystem.
1246 	 */
1247 	prefcg = ino_to_cg(fs, pip->i_number);
1248 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1249 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1250 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1251 		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1252 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1253 				return ((ino_t)(fs->fs_ipg * cg));
1254 		}
1255 	for (cg = 0; cg < prefcg; cg++)
1256 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1257 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1258 		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1259 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1260 				return ((ino_t)(fs->fs_ipg * cg));
1261 		}
1262 	/*
1263 	 * This is a backstop when we have deficit in space.
1264 	 */
1265 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1266 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1267 			return ((ino_t)(fs->fs_ipg * cg));
1268 	for (cg = 0; cg < prefcg; cg++)
1269 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1270 			break;
1271 	return ((ino_t)(fs->fs_ipg * cg));
1272 }
1273 
1274 /*
1275  * Select the desired position for the next block in a file.  The file is
1276  * logically divided into sections. The first section is composed of the
1277  * direct blocks and the next fs_maxbpg blocks. Each additional section
1278  * contains fs_maxbpg blocks.
1279  *
1280  * If no blocks have been allocated in the first section, the policy is to
1281  * request a block in the same cylinder group as the inode that describes
1282  * the file. The first indirect is allocated immediately following the last
1283  * direct block and the data blocks for the first indirect immediately
1284  * follow it.
1285  *
1286  * If no blocks have been allocated in any other section, the indirect
1287  * block(s) are allocated in the same cylinder group as its inode in an
1288  * area reserved immediately following the inode blocks. The policy for
1289  * the data blocks is to place them in a cylinder group with a greater than
1290  * average number of free blocks. An appropriate cylinder group is found
1291  * by using a rotor that sweeps the cylinder groups. When a new group of
1292  * blocks is needed, the sweep begins in the cylinder group following the
1293  * cylinder group from which the previous allocation was made. The sweep
1294  * continues until a cylinder group with greater than the average number
1295  * of free blocks is found. If the allocation is for the first block in an
1296  * indirect block or the previous block is a hole, then the information on
1297  * the previous allocation is unavailable; here a best guess is made based
1298  * on the logical block number being allocated.
1299  *
1300  * If a section is already partially allocated, the policy is to
1301  * allocate blocks contiguously within the section if possible.
1302  */
1303 ufs2_daddr_t
1304 ffs_blkpref_ufs1(ip, lbn, indx, bap)
1305 	struct inode *ip;
1306 	ufs_lbn_t lbn;
1307 	int indx;
1308 	ufs1_daddr_t *bap;
1309 {
1310 	struct fs *fs;
1311 	u_int cg, inocg;
1312 	u_int avgbfree, startcg;
1313 	ufs2_daddr_t pref;
1314 
1315 	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1316 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1317 	fs = ITOFS(ip);
1318 	/*
1319 	 * Allocation of indirect blocks is indicated by passing negative
1320 	 * values in indx: -1 for single indirect, -2 for double indirect,
1321 	 * -3 for triple indirect. As noted below, we attempt to allocate
1322 	 * the first indirect inline with the file data. For all later
1323 	 * indirect blocks, the data is often allocated in other cylinder
1324 	 * groups. However to speed random file access and to speed up
1325 	 * fsck, the filesystem reserves the first fs_metaspace blocks
1326 	 * (typically half of fs_minfree) of the data area of each cylinder
1327 	 * group to hold these later indirect blocks.
1328 	 */
1329 	inocg = ino_to_cg(fs, ip->i_number);
1330 	if (indx < 0) {
1331 		/*
1332 		 * Our preference for indirect blocks is the zone at the
1333 		 * beginning of the inode's cylinder group data area that
1334 		 * we try to reserve for indirect blocks.
1335 		 */
1336 		pref = cgmeta(fs, inocg);
1337 		/*
1338 		 * If we are allocating the first indirect block, try to
1339 		 * place it immediately following the last direct block.
1340 		 */
1341 		if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1342 		    ip->i_din1->di_db[UFS_NDADDR - 1] != 0)
1343 			pref = ip->i_din1->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1344 		return (pref);
1345 	}
1346 	/*
1347 	 * If we are allocating the first data block in the first indirect
1348 	 * block and the indirect has been allocated in the data block area,
1349 	 * try to place it immediately following the indirect block.
1350 	 */
1351 	if (lbn == UFS_NDADDR) {
1352 		pref = ip->i_din1->di_ib[0];
1353 		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1354 		    pref < cgbase(fs, inocg + 1))
1355 			return (pref + fs->fs_frag);
1356 	}
1357 	/*
1358 	 * If we are at the beginning of a file, or we have already allocated
1359 	 * the maximum number of blocks per cylinder group, or we do not
1360 	 * have a block allocated immediately preceding us, then we need
1361 	 * to decide where to start allocating new blocks.
1362 	 */
1363 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1364 		/*
1365 		 * If we are allocating a directory data block, we want
1366 		 * to place it in the metadata area.
1367 		 */
1368 		if ((ip->i_mode & IFMT) == IFDIR)
1369 			return (cgmeta(fs, inocg));
1370 		/*
1371 		 * Until we fill all the direct and all the first indirect's
1372 		 * blocks, we try to allocate in the data area of the inode's
1373 		 * cylinder group.
1374 		 */
1375 		if (lbn < UFS_NDADDR + NINDIR(fs))
1376 			return (cgdata(fs, inocg));
1377 		/*
1378 		 * Find a cylinder with greater than average number of
1379 		 * unused data blocks.
1380 		 */
1381 		if (indx == 0 || bap[indx - 1] == 0)
1382 			startcg = inocg + lbn / fs->fs_maxbpg;
1383 		else
1384 			startcg = dtog(fs, bap[indx - 1]) + 1;
1385 		startcg %= fs->fs_ncg;
1386 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1387 		for (cg = startcg; cg < fs->fs_ncg; cg++)
1388 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1389 				fs->fs_cgrotor = cg;
1390 				return (cgdata(fs, cg));
1391 			}
1392 		for (cg = 0; cg <= startcg; cg++)
1393 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1394 				fs->fs_cgrotor = cg;
1395 				return (cgdata(fs, cg));
1396 			}
1397 		return (0);
1398 	}
1399 	/*
1400 	 * Otherwise, we just always try to lay things out contiguously.
1401 	 */
1402 	return (bap[indx - 1] + fs->fs_frag);
1403 }
1404 
1405 /*
1406  * Same as above, but for UFS2
1407  */
1408 ufs2_daddr_t
1409 ffs_blkpref_ufs2(ip, lbn, indx, bap)
1410 	struct inode *ip;
1411 	ufs_lbn_t lbn;
1412 	int indx;
1413 	ufs2_daddr_t *bap;
1414 {
1415 	struct fs *fs;
1416 	u_int cg, inocg;
1417 	u_int avgbfree, startcg;
1418 	ufs2_daddr_t pref;
1419 
1420 	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1421 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1422 	fs = ITOFS(ip);
1423 	/*
1424 	 * Allocation of indirect blocks is indicated by passing negative
1425 	 * values in indx: -1 for single indirect, -2 for double indirect,
1426 	 * -3 for triple indirect. As noted below, we attempt to allocate
1427 	 * the first indirect inline with the file data. For all later
1428 	 * indirect blocks, the data is often allocated in other cylinder
1429 	 * groups. However to speed random file access and to speed up
1430 	 * fsck, the filesystem reserves the first fs_metaspace blocks
1431 	 * (typically half of fs_minfree) of the data area of each cylinder
1432 	 * group to hold these later indirect blocks.
1433 	 */
1434 	inocg = ino_to_cg(fs, ip->i_number);
1435 	if (indx < 0) {
1436 		/*
1437 		 * Our preference for indirect blocks is the zone at the
1438 		 * beginning of the inode's cylinder group data area that
1439 		 * we try to reserve for indirect blocks.
1440 		 */
1441 		pref = cgmeta(fs, inocg);
1442 		/*
1443 		 * If we are allocating the first indirect block, try to
1444 		 * place it immediately following the last direct block.
1445 		 */
1446 		if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1447 		    ip->i_din2->di_db[UFS_NDADDR - 1] != 0)
1448 			pref = ip->i_din2->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1449 		return (pref);
1450 	}
1451 	/*
1452 	 * If we are allocating the first data block in the first indirect
1453 	 * block and the indirect has been allocated in the data block area,
1454 	 * try to place it immediately following the indirect block.
1455 	 */
1456 	if (lbn == UFS_NDADDR) {
1457 		pref = ip->i_din2->di_ib[0];
1458 		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1459 		    pref < cgbase(fs, inocg + 1))
1460 			return (pref + fs->fs_frag);
1461 	}
1462 	/*
1463 	 * If we are at the beginning of a file, or we have already allocated
1464 	 * the maximum number of blocks per cylinder group, or we do not
1465 	 * have a block allocated immediately preceding us, then we need
1466 	 * to decide where to start allocating new blocks.
1467 	 */
1468 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1469 		/*
1470 		 * If we are allocating a directory data block, we want
1471 		 * to place it in the metadata area.
1472 		 */
1473 		if ((ip->i_mode & IFMT) == IFDIR)
1474 			return (cgmeta(fs, inocg));
1475 		/*
1476 		 * Until we fill all the direct and all the first indirect's
1477 		 * blocks, we try to allocate in the data area of the inode's
1478 		 * cylinder group.
1479 		 */
1480 		if (lbn < UFS_NDADDR + NINDIR(fs))
1481 			return (cgdata(fs, inocg));
1482 		/*
1483 		 * Find a cylinder with greater than average number of
1484 		 * unused data blocks.
1485 		 */
1486 		if (indx == 0 || bap[indx - 1] == 0)
1487 			startcg = inocg + lbn / fs->fs_maxbpg;
1488 		else
1489 			startcg = dtog(fs, bap[indx - 1]) + 1;
1490 		startcg %= fs->fs_ncg;
1491 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1492 		for (cg = startcg; cg < fs->fs_ncg; cg++)
1493 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1494 				fs->fs_cgrotor = cg;
1495 				return (cgdata(fs, cg));
1496 			}
1497 		for (cg = 0; cg <= startcg; cg++)
1498 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1499 				fs->fs_cgrotor = cg;
1500 				return (cgdata(fs, cg));
1501 			}
1502 		return (0);
1503 	}
1504 	/*
1505 	 * Otherwise, we just always try to lay things out contiguously.
1506 	 */
1507 	return (bap[indx - 1] + fs->fs_frag);
1508 }
1509 
1510 /*
1511  * Implement the cylinder overflow algorithm.
1512  *
1513  * The policy implemented by this algorithm is:
1514  *   1) allocate the block in its requested cylinder group.
1515  *   2) quadradically rehash on the cylinder group number.
1516  *   3) brute force search for a free block.
1517  *
1518  * Must be called with the UFS lock held.  Will release the lock on success
1519  * and return with it held on failure.
1520  */
1521 /*VARARGS5*/
1522 static ufs2_daddr_t
1523 ffs_hashalloc(ip, cg, pref, size, rsize, allocator)
1524 	struct inode *ip;
1525 	u_int cg;
1526 	ufs2_daddr_t pref;
1527 	int size;	/* Search size for data blocks, mode for inodes */
1528 	int rsize;	/* Real allocated size. */
1529 	allocfcn_t *allocator;
1530 {
1531 	struct fs *fs;
1532 	ufs2_daddr_t result;
1533 	u_int i, icg = cg;
1534 
1535 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1536 #ifdef INVARIANTS
1537 	if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1538 		panic("ffs_hashalloc: allocation on suspended filesystem");
1539 #endif
1540 	fs = ITOFS(ip);
1541 	/*
1542 	 * 1: preferred cylinder group
1543 	 */
1544 	result = (*allocator)(ip, cg, pref, size, rsize);
1545 	if (result)
1546 		return (result);
1547 	/*
1548 	 * 2: quadratic rehash
1549 	 */
1550 	for (i = 1; i < fs->fs_ncg; i *= 2) {
1551 		cg += i;
1552 		if (cg >= fs->fs_ncg)
1553 			cg -= fs->fs_ncg;
1554 		result = (*allocator)(ip, cg, 0, size, rsize);
1555 		if (result)
1556 			return (result);
1557 	}
1558 	/*
1559 	 * 3: brute force search
1560 	 * Note that we start at i == 2, since 0 was checked initially,
1561 	 * and 1 is always checked in the quadratic rehash.
1562 	 */
1563 	cg = (icg + 2) % fs->fs_ncg;
1564 	for (i = 2; i < fs->fs_ncg; i++) {
1565 		result = (*allocator)(ip, cg, 0, size, rsize);
1566 		if (result)
1567 			return (result);
1568 		cg++;
1569 		if (cg == fs->fs_ncg)
1570 			cg = 0;
1571 	}
1572 	return (0);
1573 }
1574 
1575 /*
1576  * Determine whether a fragment can be extended.
1577  *
1578  * Check to see if the necessary fragments are available, and
1579  * if they are, allocate them.
1580  */
1581 static ufs2_daddr_t
1582 ffs_fragextend(ip, cg, bprev, osize, nsize)
1583 	struct inode *ip;
1584 	u_int cg;
1585 	ufs2_daddr_t bprev;
1586 	int osize, nsize;
1587 {
1588 	struct fs *fs;
1589 	struct cg *cgp;
1590 	struct buf *bp;
1591 	struct ufsmount *ump;
1592 	int nffree;
1593 	long bno;
1594 	int frags, bbase;
1595 	int i, error;
1596 	u_int8_t *blksfree;
1597 
1598 	ump = ITOUMP(ip);
1599 	fs = ump->um_fs;
1600 	if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1601 		return (0);
1602 	frags = numfrags(fs, nsize);
1603 	bbase = fragnum(fs, bprev);
1604 	if (bbase > fragnum(fs, (bprev + frags - 1))) {
1605 		/* cannot extend across a block boundary */
1606 		return (0);
1607 	}
1608 	UFS_UNLOCK(ump);
1609 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0)
1610 		goto fail;
1611 	bno = dtogd(fs, bprev);
1612 	blksfree = cg_blksfree(cgp);
1613 	for (i = numfrags(fs, osize); i < frags; i++)
1614 		if (isclr(blksfree, bno + i))
1615 			goto fail;
1616 	/*
1617 	 * the current fragment can be extended
1618 	 * deduct the count on fragment being extended into
1619 	 * increase the count on the remaining fragment (if any)
1620 	 * allocate the extended piece
1621 	 */
1622 	for (i = frags; i < fs->fs_frag - bbase; i++)
1623 		if (isclr(blksfree, bno + i))
1624 			break;
1625 	cgp->cg_frsum[i - numfrags(fs, osize)]--;
1626 	if (i != frags)
1627 		cgp->cg_frsum[i - frags]++;
1628 	for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1629 		clrbit(blksfree, bno + i);
1630 		cgp->cg_cs.cs_nffree--;
1631 		nffree++;
1632 	}
1633 	UFS_LOCK(ump);
1634 	fs->fs_cstotal.cs_nffree -= nffree;
1635 	fs->fs_cs(fs, cg).cs_nffree -= nffree;
1636 	fs->fs_fmod = 1;
1637 	ACTIVECLEAR(fs, cg);
1638 	UFS_UNLOCK(ump);
1639 	if (DOINGSOFTDEP(ITOV(ip)))
1640 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
1641 		    frags, numfrags(fs, osize));
1642 	bdwrite(bp);
1643 	return (bprev);
1644 
1645 fail:
1646 	brelse(bp);
1647 	UFS_LOCK(ump);
1648 	return (0);
1649 
1650 }
1651 
1652 /*
1653  * Determine whether a block can be allocated.
1654  *
1655  * Check to see if a block of the appropriate size is available,
1656  * and if it is, allocate it.
1657  */
1658 static ufs2_daddr_t
1659 ffs_alloccg(ip, cg, bpref, size, rsize)
1660 	struct inode *ip;
1661 	u_int cg;
1662 	ufs2_daddr_t bpref;
1663 	int size;
1664 	int rsize;
1665 {
1666 	struct fs *fs;
1667 	struct cg *cgp;
1668 	struct buf *bp;
1669 	struct ufsmount *ump;
1670 	ufs1_daddr_t bno;
1671 	ufs2_daddr_t blkno;
1672 	int i, allocsiz, error, frags;
1673 	u_int8_t *blksfree;
1674 
1675 	ump = ITOUMP(ip);
1676 	fs = ump->um_fs;
1677 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1678 		return (0);
1679 	UFS_UNLOCK(ump);
1680 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0 ||
1681 	   (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1682 		goto fail;
1683 	if (size == fs->fs_bsize) {
1684 		UFS_LOCK(ump);
1685 		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1686 		ACTIVECLEAR(fs, cg);
1687 		UFS_UNLOCK(ump);
1688 		bdwrite(bp);
1689 		return (blkno);
1690 	}
1691 	/*
1692 	 * check to see if any fragments are already available
1693 	 * allocsiz is the size which will be allocated, hacking
1694 	 * it down to a smaller size if necessary
1695 	 */
1696 	blksfree = cg_blksfree(cgp);
1697 	frags = numfrags(fs, size);
1698 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1699 		if (cgp->cg_frsum[allocsiz] != 0)
1700 			break;
1701 	if (allocsiz == fs->fs_frag) {
1702 		/*
1703 		 * no fragments were available, so a block will be
1704 		 * allocated, and hacked up
1705 		 */
1706 		if (cgp->cg_cs.cs_nbfree == 0)
1707 			goto fail;
1708 		UFS_LOCK(ump);
1709 		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1710 		ACTIVECLEAR(fs, cg);
1711 		UFS_UNLOCK(ump);
1712 		bdwrite(bp);
1713 		return (blkno);
1714 	}
1715 	KASSERT(size == rsize,
1716 	    ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
1717 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1718 	if (bno < 0)
1719 		goto fail;
1720 	for (i = 0; i < frags; i++)
1721 		clrbit(blksfree, bno + i);
1722 	cgp->cg_cs.cs_nffree -= frags;
1723 	cgp->cg_frsum[allocsiz]--;
1724 	if (frags != allocsiz)
1725 		cgp->cg_frsum[allocsiz - frags]++;
1726 	UFS_LOCK(ump);
1727 	fs->fs_cstotal.cs_nffree -= frags;
1728 	fs->fs_cs(fs, cg).cs_nffree -= frags;
1729 	fs->fs_fmod = 1;
1730 	blkno = cgbase(fs, cg) + bno;
1731 	ACTIVECLEAR(fs, cg);
1732 	UFS_UNLOCK(ump);
1733 	if (DOINGSOFTDEP(ITOV(ip)))
1734 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
1735 	bdwrite(bp);
1736 	return (blkno);
1737 
1738 fail:
1739 	brelse(bp);
1740 	UFS_LOCK(ump);
1741 	return (0);
1742 }
1743 
1744 /*
1745  * Allocate a block in a cylinder group.
1746  *
1747  * This algorithm implements the following policy:
1748  *   1) allocate the requested block.
1749  *   2) allocate a rotationally optimal block in the same cylinder.
1750  *   3) allocate the next available block on the block rotor for the
1751  *      specified cylinder group.
1752  * Note that this routine only allocates fs_bsize blocks; these
1753  * blocks may be fragmented by the routine that allocates them.
1754  */
1755 static ufs2_daddr_t
1756 ffs_alloccgblk(ip, bp, bpref, size)
1757 	struct inode *ip;
1758 	struct buf *bp;
1759 	ufs2_daddr_t bpref;
1760 	int size;
1761 {
1762 	struct fs *fs;
1763 	struct cg *cgp;
1764 	struct ufsmount *ump;
1765 	ufs1_daddr_t bno;
1766 	ufs2_daddr_t blkno;
1767 	u_int8_t *blksfree;
1768 	int i, cgbpref;
1769 
1770 	ump = ITOUMP(ip);
1771 	fs = ump->um_fs;
1772 	mtx_assert(UFS_MTX(ump), MA_OWNED);
1773 	cgp = (struct cg *)bp->b_data;
1774 	blksfree = cg_blksfree(cgp);
1775 	if (bpref == 0) {
1776 		bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
1777 	} else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1778 		/* map bpref to correct zone in this cg */
1779 		if (bpref < cgdata(fs, cgbpref))
1780 			bpref = cgmeta(fs, cgp->cg_cgx);
1781 		else
1782 			bpref = cgdata(fs, cgp->cg_cgx);
1783 	}
1784 	/*
1785 	 * if the requested block is available, use it
1786 	 */
1787 	bno = dtogd(fs, blknum(fs, bpref));
1788 	if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1789 		goto gotit;
1790 	/*
1791 	 * Take the next available block in this cylinder group.
1792 	 */
1793 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1794 	if (bno < 0)
1795 		return (0);
1796 	/* Update cg_rotor only if allocated from the data zone */
1797 	if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1798 		cgp->cg_rotor = bno;
1799 gotit:
1800 	blkno = fragstoblks(fs, bno);
1801 	ffs_clrblock(fs, blksfree, (long)blkno);
1802 	ffs_clusteracct(fs, cgp, blkno, -1);
1803 	cgp->cg_cs.cs_nbfree--;
1804 	fs->fs_cstotal.cs_nbfree--;
1805 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1806 	fs->fs_fmod = 1;
1807 	blkno = cgbase(fs, cgp->cg_cgx) + bno;
1808 	/*
1809 	 * If the caller didn't want the whole block free the frags here.
1810 	 */
1811 	size = numfrags(fs, size);
1812 	if (size != fs->fs_frag) {
1813 		bno = dtogd(fs, blkno);
1814 		for (i = size; i < fs->fs_frag; i++)
1815 			setbit(blksfree, bno + i);
1816 		i = fs->fs_frag - size;
1817 		cgp->cg_cs.cs_nffree += i;
1818 		fs->fs_cstotal.cs_nffree += i;
1819 		fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
1820 		fs->fs_fmod = 1;
1821 		cgp->cg_frsum[i]++;
1822 	}
1823 	/* XXX Fixme. */
1824 	UFS_UNLOCK(ump);
1825 	if (DOINGSOFTDEP(ITOV(ip)))
1826 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno,
1827 		    size, 0);
1828 	UFS_LOCK(ump);
1829 	return (blkno);
1830 }
1831 
1832 /*
1833  * Determine whether a cluster can be allocated.
1834  *
1835  * We do not currently check for optimal rotational layout if there
1836  * are multiple choices in the same cylinder group. Instead we just
1837  * take the first one that we find following bpref.
1838  */
1839 static ufs2_daddr_t
1840 ffs_clusteralloc(ip, cg, bpref, len)
1841 	struct inode *ip;
1842 	u_int cg;
1843 	ufs2_daddr_t bpref;
1844 	int len;
1845 {
1846 	struct fs *fs;
1847 	struct cg *cgp;
1848 	struct buf *bp;
1849 	struct ufsmount *ump;
1850 	int i, run, bit, map, got, error;
1851 	ufs2_daddr_t bno;
1852 	u_char *mapp;
1853 	int32_t *lp;
1854 	u_int8_t *blksfree;
1855 
1856 	ump = ITOUMP(ip);
1857 	fs = ump->um_fs;
1858 	if (fs->fs_maxcluster[cg] < len)
1859 		return (0);
1860 	UFS_UNLOCK(ump);
1861 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) {
1862 		UFS_LOCK(ump);
1863 		return (0);
1864 	}
1865 	/*
1866 	 * Check to see if a cluster of the needed size (or bigger) is
1867 	 * available in this cylinder group.
1868 	 */
1869 	lp = &cg_clustersum(cgp)[len];
1870 	for (i = len; i <= fs->fs_contigsumsize; i++)
1871 		if (*lp++ > 0)
1872 			break;
1873 	if (i > fs->fs_contigsumsize) {
1874 		/*
1875 		 * This is the first time looking for a cluster in this
1876 		 * cylinder group. Update the cluster summary information
1877 		 * to reflect the true maximum sized cluster so that
1878 		 * future cluster allocation requests can avoid reading
1879 		 * the cylinder group map only to find no clusters.
1880 		 */
1881 		lp = &cg_clustersum(cgp)[len - 1];
1882 		for (i = len - 1; i > 0; i--)
1883 			if (*lp-- > 0)
1884 				break;
1885 		UFS_LOCK(ump);
1886 		fs->fs_maxcluster[cg] = i;
1887 		brelse(bp);
1888 		return (0);
1889 	}
1890 	/*
1891 	 * Search the cluster map to find a big enough cluster.
1892 	 * We take the first one that we find, even if it is larger
1893 	 * than we need as we prefer to get one close to the previous
1894 	 * block allocation. We do not search before the current
1895 	 * preference point as we do not want to allocate a block
1896 	 * that is allocated before the previous one (as we will
1897 	 * then have to wait for another pass of the elevator
1898 	 * algorithm before it will be read). We prefer to fail and
1899 	 * be recalled to try an allocation in the next cylinder group.
1900 	 */
1901 	if (dtog(fs, bpref) != cg)
1902 		bpref = cgdata(fs, cg);
1903 	else
1904 		bpref = blknum(fs, bpref);
1905 	bpref = fragstoblks(fs, dtogd(fs, bpref));
1906 	mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1907 	map = *mapp++;
1908 	bit = 1 << (bpref % NBBY);
1909 	for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1910 		if ((map & bit) == 0) {
1911 			run = 0;
1912 		} else {
1913 			run++;
1914 			if (run == len)
1915 				break;
1916 		}
1917 		if ((got & (NBBY - 1)) != (NBBY - 1)) {
1918 			bit <<= 1;
1919 		} else {
1920 			map = *mapp++;
1921 			bit = 1;
1922 		}
1923 	}
1924 	if (got >= cgp->cg_nclusterblks) {
1925 		UFS_LOCK(ump);
1926 		brelse(bp);
1927 		return (0);
1928 	}
1929 	/*
1930 	 * Allocate the cluster that we have found.
1931 	 */
1932 	blksfree = cg_blksfree(cgp);
1933 	for (i = 1; i <= len; i++)
1934 		if (!ffs_isblock(fs, blksfree, got - run + i))
1935 			panic("ffs_clusteralloc: map mismatch");
1936 	bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
1937 	if (dtog(fs, bno) != cg)
1938 		panic("ffs_clusteralloc: allocated out of group");
1939 	len = blkstofrags(fs, len);
1940 	UFS_LOCK(ump);
1941 	for (i = 0; i < len; i += fs->fs_frag)
1942 		if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
1943 			panic("ffs_clusteralloc: lost block");
1944 	ACTIVECLEAR(fs, cg);
1945 	UFS_UNLOCK(ump);
1946 	bdwrite(bp);
1947 	return (bno);
1948 }
1949 
1950 static inline struct buf *
1951 getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags)
1952 {
1953 	struct fs *fs;
1954 
1955 	fs = ITOFS(ip);
1956 	return (getblk(ITODEVVP(ip), fsbtodb(fs, ino_to_fsba(fs,
1957 	    cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
1958 	    gbflags));
1959 }
1960 
1961 /*
1962  * Synchronous inode initialization is needed only when barrier writes do not
1963  * work as advertised, and will impose a heavy cost on file creation in a newly
1964  * created filesystem.
1965  */
1966 static int doasyncinodeinit = 1;
1967 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncinodeinit, CTLFLAG_RWTUN,
1968     &doasyncinodeinit, 0,
1969     "Perform inode block initialization using asynchronous writes");
1970 
1971 /*
1972  * Determine whether an inode can be allocated.
1973  *
1974  * Check to see if an inode is available, and if it is,
1975  * allocate it using the following policy:
1976  *   1) allocate the requested inode.
1977  *   2) allocate the next available inode after the requested
1978  *      inode in the specified cylinder group.
1979  */
1980 static ufs2_daddr_t
1981 ffs_nodealloccg(ip, cg, ipref, mode, unused)
1982 	struct inode *ip;
1983 	u_int cg;
1984 	ufs2_daddr_t ipref;
1985 	int mode;
1986 	int unused;
1987 {
1988 	struct fs *fs;
1989 	struct cg *cgp;
1990 	struct buf *bp, *ibp;
1991 	struct ufsmount *ump;
1992 	u_int8_t *inosused, *loc;
1993 	struct ufs2_dinode *dp2;
1994 	int error, start, len, i;
1995 	u_int32_t old_initediblk;
1996 
1997 	ump = ITOUMP(ip);
1998 	fs = ump->um_fs;
1999 check_nifree:
2000 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
2001 		return (0);
2002 	UFS_UNLOCK(ump);
2003 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) {
2004 		UFS_LOCK(ump);
2005 		return (0);
2006 	}
2007 restart:
2008 	if (cgp->cg_cs.cs_nifree == 0) {
2009 		brelse(bp);
2010 		UFS_LOCK(ump);
2011 		return (0);
2012 	}
2013 	inosused = cg_inosused(cgp);
2014 	if (ipref) {
2015 		ipref %= fs->fs_ipg;
2016 		if (isclr(inosused, ipref))
2017 			goto gotit;
2018 	}
2019 	start = cgp->cg_irotor / NBBY;
2020 	len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
2021 	loc = memcchr(&inosused[start], 0xff, len);
2022 	if (loc == NULL) {
2023 		len = start + 1;
2024 		start = 0;
2025 		loc = memcchr(&inosused[start], 0xff, len);
2026 		if (loc == NULL) {
2027 			printf("cg = %d, irotor = %ld, fs = %s\n",
2028 			    cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
2029 			panic("ffs_nodealloccg: map corrupted");
2030 			/* NOTREACHED */
2031 		}
2032 	}
2033 	ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1;
2034 gotit:
2035 	/*
2036 	 * Check to see if we need to initialize more inodes.
2037 	 */
2038 	if (fs->fs_magic == FS_UFS2_MAGIC &&
2039 	    ipref + INOPB(fs) > cgp->cg_initediblk &&
2040 	    cgp->cg_initediblk < cgp->cg_niblk) {
2041 		old_initediblk = cgp->cg_initediblk;
2042 
2043 		/*
2044 		 * Free the cylinder group lock before writing the
2045 		 * initialized inode block.  Entering the
2046 		 * babarrierwrite() with the cylinder group lock
2047 		 * causes lock order violation between the lock and
2048 		 * snaplk.
2049 		 *
2050 		 * Another thread can decide to initialize the same
2051 		 * inode block, but whichever thread first gets the
2052 		 * cylinder group lock after writing the newly
2053 		 * allocated inode block will update it and the other
2054 		 * will realize that it has lost and leave the
2055 		 * cylinder group unchanged.
2056 		 */
2057 		ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
2058 		brelse(bp);
2059 		if (ibp == NULL) {
2060 			/*
2061 			 * The inode block buffer is already owned by
2062 			 * another thread, which must initialize it.
2063 			 * Wait on the buffer to allow another thread
2064 			 * to finish the updates, with dropped cg
2065 			 * buffer lock, then retry.
2066 			 */
2067 			ibp = getinobuf(ip, cg, old_initediblk, 0);
2068 			brelse(ibp);
2069 			UFS_LOCK(ump);
2070 			goto check_nifree;
2071 		}
2072 		bzero(ibp->b_data, (int)fs->fs_bsize);
2073 		dp2 = (struct ufs2_dinode *)(ibp->b_data);
2074 		for (i = 0; i < INOPB(fs); i++) {
2075 			while (dp2->di_gen == 0)
2076 				dp2->di_gen = arc4random();
2077 			dp2++;
2078 		}
2079 
2080 		/*
2081 		 * Rather than adding a soft updates dependency to ensure
2082 		 * that the new inode block is written before it is claimed
2083 		 * by the cylinder group map, we just do a barrier write
2084 		 * here. The barrier write will ensure that the inode block
2085 		 * gets written before the updated cylinder group map can be
2086 		 * written. The barrier write should only slow down bulk
2087 		 * loading of newly created filesystems.
2088 		 */
2089 		if (doasyncinodeinit)
2090 			babarrierwrite(ibp);
2091 		else
2092 			bwrite(ibp);
2093 
2094 		/*
2095 		 * After the inode block is written, try to update the
2096 		 * cg initediblk pointer.  If another thread beat us
2097 		 * to it, then leave it unchanged as the other thread
2098 		 * has already set it correctly.
2099 		 */
2100 		error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp);
2101 		UFS_LOCK(ump);
2102 		ACTIVECLEAR(fs, cg);
2103 		UFS_UNLOCK(ump);
2104 		if (error != 0)
2105 			return (error);
2106 		if (cgp->cg_initediblk == old_initediblk)
2107 			cgp->cg_initediblk += INOPB(fs);
2108 		goto restart;
2109 	}
2110 	cgp->cg_irotor = ipref;
2111 	UFS_LOCK(ump);
2112 	ACTIVECLEAR(fs, cg);
2113 	setbit(inosused, ipref);
2114 	cgp->cg_cs.cs_nifree--;
2115 	fs->fs_cstotal.cs_nifree--;
2116 	fs->fs_cs(fs, cg).cs_nifree--;
2117 	fs->fs_fmod = 1;
2118 	if ((mode & IFMT) == IFDIR) {
2119 		cgp->cg_cs.cs_ndir++;
2120 		fs->fs_cstotal.cs_ndir++;
2121 		fs->fs_cs(fs, cg).cs_ndir++;
2122 	}
2123 	UFS_UNLOCK(ump);
2124 	if (DOINGSOFTDEP(ITOV(ip)))
2125 		softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
2126 	bdwrite(bp);
2127 	return ((ino_t)(cg * fs->fs_ipg + ipref));
2128 }
2129 
2130 /*
2131  * Free a block or fragment.
2132  *
2133  * The specified block or fragment is placed back in the
2134  * free map. If a fragment is deallocated, a possible
2135  * block reassembly is checked.
2136  */
2137 static void
2138 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd)
2139 	struct ufsmount *ump;
2140 	struct fs *fs;
2141 	struct vnode *devvp;
2142 	ufs2_daddr_t bno;
2143 	long size;
2144 	ino_t inum;
2145 	struct workhead *dephd;
2146 {
2147 	struct mount *mp;
2148 	struct cg *cgp;
2149 	struct buf *bp;
2150 	ufs1_daddr_t fragno, cgbno;
2151 	int i, blk, frags, bbase, error;
2152 	u_int cg;
2153 	u_int8_t *blksfree;
2154 	struct cdev *dev;
2155 
2156 	cg = dtog(fs, bno);
2157 	if (devvp->v_type == VREG) {
2158 		/* devvp is a snapshot */
2159 		MPASS(devvp->v_mount->mnt_data == ump);
2160 		dev = ump->um_devvp->v_rdev;
2161 	} else if (devvp->v_type == VCHR) {
2162 		/* devvp is a normal disk device */
2163 		dev = devvp->v_rdev;
2164 		ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg");
2165 	} else
2166 		return;
2167 #ifdef INVARIANTS
2168 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
2169 	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
2170 		printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
2171 		    devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
2172 		    size, fs->fs_fsmnt);
2173 		panic("ffs_blkfree_cg: bad size");
2174 	}
2175 #endif
2176 	if ((u_int)bno >= fs->fs_size) {
2177 		printf("bad block %jd, ino %lu\n", (intmax_t)bno,
2178 		    (u_long)inum);
2179 		ffs_fserr(fs, inum, "bad block");
2180 		return;
2181 	}
2182 	if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2183 		return;
2184 	cgbno = dtogd(fs, bno);
2185 	blksfree = cg_blksfree(cgp);
2186 	UFS_LOCK(ump);
2187 	if (size == fs->fs_bsize) {
2188 		fragno = fragstoblks(fs, cgbno);
2189 		if (!ffs_isfreeblock(fs, blksfree, fragno)) {
2190 			if (devvp->v_type == VREG) {
2191 				UFS_UNLOCK(ump);
2192 				/* devvp is a snapshot */
2193 				brelse(bp);
2194 				return;
2195 			}
2196 			printf("dev = %s, block = %jd, fs = %s\n",
2197 			    devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
2198 			panic("ffs_blkfree_cg: freeing free block");
2199 		}
2200 		ffs_setblock(fs, blksfree, fragno);
2201 		ffs_clusteracct(fs, cgp, fragno, 1);
2202 		cgp->cg_cs.cs_nbfree++;
2203 		fs->fs_cstotal.cs_nbfree++;
2204 		fs->fs_cs(fs, cg).cs_nbfree++;
2205 	} else {
2206 		bbase = cgbno - fragnum(fs, cgbno);
2207 		/*
2208 		 * decrement the counts associated with the old frags
2209 		 */
2210 		blk = blkmap(fs, blksfree, bbase);
2211 		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
2212 		/*
2213 		 * deallocate the fragment
2214 		 */
2215 		frags = numfrags(fs, size);
2216 		for (i = 0; i < frags; i++) {
2217 			if (isset(blksfree, cgbno + i)) {
2218 				printf("dev = %s, block = %jd, fs = %s\n",
2219 				    devtoname(dev), (intmax_t)(bno + i),
2220 				    fs->fs_fsmnt);
2221 				panic("ffs_blkfree_cg: freeing free frag");
2222 			}
2223 			setbit(blksfree, cgbno + i);
2224 		}
2225 		cgp->cg_cs.cs_nffree += i;
2226 		fs->fs_cstotal.cs_nffree += i;
2227 		fs->fs_cs(fs, cg).cs_nffree += i;
2228 		/*
2229 		 * add back in counts associated with the new frags
2230 		 */
2231 		blk = blkmap(fs, blksfree, bbase);
2232 		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
2233 		/*
2234 		 * if a complete block has been reassembled, account for it
2235 		 */
2236 		fragno = fragstoblks(fs, bbase);
2237 		if (ffs_isblock(fs, blksfree, fragno)) {
2238 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
2239 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
2240 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
2241 			ffs_clusteracct(fs, cgp, fragno, 1);
2242 			cgp->cg_cs.cs_nbfree++;
2243 			fs->fs_cstotal.cs_nbfree++;
2244 			fs->fs_cs(fs, cg).cs_nbfree++;
2245 		}
2246 	}
2247 	fs->fs_fmod = 1;
2248 	ACTIVECLEAR(fs, cg);
2249 	UFS_UNLOCK(ump);
2250 	mp = UFSTOVFS(ump);
2251 	if (MOUNTEDSOFTDEP(mp) && devvp->v_type == VCHR)
2252 		softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2253 		    numfrags(fs, size), dephd);
2254 	bdwrite(bp);
2255 }
2256 
2257 struct ffs_blkfree_trim_params {
2258 	struct task task;
2259 	struct ufsmount *ump;
2260 	struct vnode *devvp;
2261 	ufs2_daddr_t bno;
2262 	long size;
2263 	ino_t inum;
2264 	struct workhead *pdephd;
2265 	struct workhead dephd;
2266 };
2267 
2268 static void
2269 ffs_blkfree_trim_task(ctx, pending)
2270 	void *ctx;
2271 	int pending;
2272 {
2273 	struct ffs_blkfree_trim_params *tp;
2274 
2275 	tp = ctx;
2276 	ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size,
2277 	    tp->inum, tp->pdephd);
2278 	vn_finished_secondary_write(UFSTOVFS(tp->ump));
2279 	atomic_add_int(&tp->ump->um_trim_inflight, -1);
2280 	free(tp, M_TEMP);
2281 }
2282 
2283 static void
2284 ffs_blkfree_trim_completed(bp)
2285 	struct buf *bp;
2286 {
2287 	struct ffs_blkfree_trim_params *tp;
2288 
2289 	tp = bp->b_fsprivate1;
2290 	free(bp, M_TEMP);
2291 	TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
2292 	taskqueue_enqueue(tp->ump->um_trim_tq, &tp->task);
2293 }
2294 
2295 void
2296 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd)
2297 	struct ufsmount *ump;
2298 	struct fs *fs;
2299 	struct vnode *devvp;
2300 	ufs2_daddr_t bno;
2301 	long size;
2302 	ino_t inum;
2303 	enum vtype vtype;
2304 	struct workhead *dephd;
2305 {
2306 	struct mount *mp;
2307 	struct buf *bp;
2308 	struct ffs_blkfree_trim_params *tp;
2309 
2310 	/*
2311 	 * Check to see if a snapshot wants to claim the block.
2312 	 * Check that devvp is a normal disk device, not a snapshot,
2313 	 * it has a snapshot(s) associated with it, and one of the
2314 	 * snapshots wants to claim the block.
2315 	 */
2316 	if (devvp->v_type == VCHR &&
2317 	    (devvp->v_vflag & VV_COPYONWRITE) &&
2318 	    ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
2319 		return;
2320 	}
2321 	/*
2322 	 * Nothing to delay if TRIM is disabled, or the operation is
2323 	 * performed on the snapshot.
2324 	 */
2325 	if (!ump->um_candelete || devvp->v_type == VREG) {
2326 		ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
2327 		return;
2328 	}
2329 
2330 	/*
2331 	 * Postpone the set of the free bit in the cg bitmap until the
2332 	 * BIO_DELETE is completed.  Otherwise, due to disk queue
2333 	 * reordering, TRIM might be issued after we reuse the block
2334 	 * and write some new data into it.
2335 	 */
2336 	atomic_add_int(&ump->um_trim_inflight, 1);
2337 	tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK);
2338 	tp->ump = ump;
2339 	tp->devvp = devvp;
2340 	tp->bno = bno;
2341 	tp->size = size;
2342 	tp->inum = inum;
2343 	if (dephd != NULL) {
2344 		LIST_INIT(&tp->dephd);
2345 		LIST_SWAP(dephd, &tp->dephd, worklist, wk_list);
2346 		tp->pdephd = &tp->dephd;
2347 	} else
2348 		tp->pdephd = NULL;
2349 
2350 	bp = malloc(sizeof(*bp), M_TEMP, M_WAITOK | M_ZERO);
2351 	bp->b_iocmd = BIO_DELETE;
2352 	bp->b_iooffset = dbtob(fsbtodb(fs, bno));
2353 	bp->b_iodone = ffs_blkfree_trim_completed;
2354 	bp->b_bcount = size;
2355 	bp->b_fsprivate1 = tp;
2356 
2357 	mp = UFSTOVFS(ump);
2358 	vn_start_secondary_write(NULL, &mp, 0);
2359 	g_vfs_strategy(ump->um_bo, bp);
2360 }
2361 
2362 #ifdef INVARIANTS
2363 /*
2364  * Verify allocation of a block or fragment. Returns true if block or
2365  * fragment is allocated, false if it is free.
2366  */
2367 static int
2368 ffs_checkblk(ip, bno, size)
2369 	struct inode *ip;
2370 	ufs2_daddr_t bno;
2371 	long size;
2372 {
2373 	struct fs *fs;
2374 	struct cg *cgp;
2375 	struct buf *bp;
2376 	ufs1_daddr_t cgbno;
2377 	int i, error, frags, free;
2378 	u_int8_t *blksfree;
2379 
2380 	fs = ITOFS(ip);
2381 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
2382 		printf("bsize = %ld, size = %ld, fs = %s\n",
2383 		    (long)fs->fs_bsize, size, fs->fs_fsmnt);
2384 		panic("ffs_checkblk: bad size");
2385 	}
2386 	if ((u_int)bno >= fs->fs_size)
2387 		panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
2388 	error = ffs_getcg(fs, ITODEVVP(ip), dtog(fs, bno), &bp, &cgp);
2389 	if (error)
2390 		panic("ffs_checkblk: cylinder group read failed");
2391 	blksfree = cg_blksfree(cgp);
2392 	cgbno = dtogd(fs, bno);
2393 	if (size == fs->fs_bsize) {
2394 		free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2395 	} else {
2396 		frags = numfrags(fs, size);
2397 		for (free = 0, i = 0; i < frags; i++)
2398 			if (isset(blksfree, cgbno + i))
2399 				free++;
2400 		if (free != 0 && free != frags)
2401 			panic("ffs_checkblk: partially free fragment");
2402 	}
2403 	brelse(bp);
2404 	return (!free);
2405 }
2406 #endif /* INVARIANTS */
2407 
2408 /*
2409  * Free an inode.
2410  */
2411 int
2412 ffs_vfree(pvp, ino, mode)
2413 	struct vnode *pvp;
2414 	ino_t ino;
2415 	int mode;
2416 {
2417 	struct ufsmount *ump;
2418 
2419 	if (DOINGSOFTDEP(pvp)) {
2420 		softdep_freefile(pvp, ino, mode);
2421 		return (0);
2422 	}
2423 	ump = VFSTOUFS(pvp->v_mount);
2424 	return (ffs_freefile(ump, ump->um_fs, ump->um_devvp, ino, mode, NULL));
2425 }
2426 
2427 /*
2428  * Do the actual free operation.
2429  * The specified inode is placed back in the free map.
2430  */
2431 int
2432 ffs_freefile(ump, fs, devvp, ino, mode, wkhd)
2433 	struct ufsmount *ump;
2434 	struct fs *fs;
2435 	struct vnode *devvp;
2436 	ino_t ino;
2437 	int mode;
2438 	struct workhead *wkhd;
2439 {
2440 	struct cg *cgp;
2441 	struct buf *bp;
2442 	int error;
2443 	u_int cg;
2444 	u_int8_t *inosused;
2445 	struct cdev *dev;
2446 
2447 	cg = ino_to_cg(fs, ino);
2448 	if (devvp->v_type == VREG) {
2449 		/* devvp is a snapshot */
2450 		MPASS(devvp->v_mount->mnt_data == ump);
2451 		dev = ump->um_devvp->v_rdev;
2452 	} else if (devvp->v_type == VCHR) {
2453 		/* devvp is a normal disk device */
2454 		dev = devvp->v_rdev;
2455 	} else {
2456 		bp = NULL;
2457 		return (0);
2458 	}
2459 	if (ino >= fs->fs_ipg * fs->fs_ncg)
2460 		panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s",
2461 		    devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt);
2462 	if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2463 		return (error);
2464 	inosused = cg_inosused(cgp);
2465 	ino %= fs->fs_ipg;
2466 	if (isclr(inosused, ino)) {
2467 		printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev),
2468 		    (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt);
2469 		if (fs->fs_ronly == 0)
2470 			panic("ffs_freefile: freeing free inode");
2471 	}
2472 	clrbit(inosused, ino);
2473 	if (ino < cgp->cg_irotor)
2474 		cgp->cg_irotor = ino;
2475 	cgp->cg_cs.cs_nifree++;
2476 	UFS_LOCK(ump);
2477 	fs->fs_cstotal.cs_nifree++;
2478 	fs->fs_cs(fs, cg).cs_nifree++;
2479 	if ((mode & IFMT) == IFDIR) {
2480 		cgp->cg_cs.cs_ndir--;
2481 		fs->fs_cstotal.cs_ndir--;
2482 		fs->fs_cs(fs, cg).cs_ndir--;
2483 	}
2484 	fs->fs_fmod = 1;
2485 	ACTIVECLEAR(fs, cg);
2486 	UFS_UNLOCK(ump);
2487 	if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type == VCHR)
2488 		softdep_setup_inofree(UFSTOVFS(ump), bp,
2489 		    ino + cg * fs->fs_ipg, wkhd);
2490 	bdwrite(bp);
2491 	return (0);
2492 }
2493 
2494 /*
2495  * Check to see if a file is free.
2496  * Used to check for allocated files in snapshots.
2497  */
2498 int
2499 ffs_checkfreefile(fs, devvp, ino)
2500 	struct fs *fs;
2501 	struct vnode *devvp;
2502 	ino_t ino;
2503 {
2504 	struct cg *cgp;
2505 	struct buf *bp;
2506 	int ret, error;
2507 	u_int cg;
2508 	u_int8_t *inosused;
2509 
2510 	cg = ino_to_cg(fs, ino);
2511 	if ((devvp->v_type != VREG) && (devvp->v_type != VCHR))
2512 		return (1);
2513 	if (ino >= fs->fs_ipg * fs->fs_ncg)
2514 		return (1);
2515 	if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2516 		return (1);
2517 	inosused = cg_inosused(cgp);
2518 	ino %= fs->fs_ipg;
2519 	ret = isclr(inosused, ino);
2520 	brelse(bp);
2521 	return (ret);
2522 }
2523 
2524 /*
2525  * Find a block of the specified size in the specified cylinder group.
2526  *
2527  * It is a panic if a request is made to find a block if none are
2528  * available.
2529  */
2530 static ufs1_daddr_t
2531 ffs_mapsearch(fs, cgp, bpref, allocsiz)
2532 	struct fs *fs;
2533 	struct cg *cgp;
2534 	ufs2_daddr_t bpref;
2535 	int allocsiz;
2536 {
2537 	ufs1_daddr_t bno;
2538 	int start, len, loc, i;
2539 	int blk, field, subfield, pos;
2540 	u_int8_t *blksfree;
2541 
2542 	/*
2543 	 * find the fragment by searching through the free block
2544 	 * map for an appropriate bit pattern
2545 	 */
2546 	if (bpref)
2547 		start = dtogd(fs, bpref) / NBBY;
2548 	else
2549 		start = cgp->cg_frotor / NBBY;
2550 	blksfree = cg_blksfree(cgp);
2551 	len = howmany(fs->fs_fpg, NBBY) - start;
2552 	loc = scanc((u_int)len, (u_char *)&blksfree[start],
2553 		fragtbl[fs->fs_frag],
2554 		(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2555 	if (loc == 0) {
2556 		len = start + 1;
2557 		start = 0;
2558 		loc = scanc((u_int)len, (u_char *)&blksfree[0],
2559 			fragtbl[fs->fs_frag],
2560 			(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2561 		if (loc == 0) {
2562 			printf("start = %d, len = %d, fs = %s\n",
2563 			    start, len, fs->fs_fsmnt);
2564 			panic("ffs_alloccg: map corrupted");
2565 			/* NOTREACHED */
2566 		}
2567 	}
2568 	bno = (start + len - loc) * NBBY;
2569 	cgp->cg_frotor = bno;
2570 	/*
2571 	 * found the byte in the map
2572 	 * sift through the bits to find the selected frag
2573 	 */
2574 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2575 		blk = blkmap(fs, blksfree, bno);
2576 		blk <<= 1;
2577 		field = around[allocsiz];
2578 		subfield = inside[allocsiz];
2579 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2580 			if ((blk & field) == subfield)
2581 				return (bno + pos);
2582 			field <<= 1;
2583 			subfield <<= 1;
2584 		}
2585 	}
2586 	printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2587 	panic("ffs_alloccg: block not in map");
2588 	return (-1);
2589 }
2590 
2591 static const struct statfs *
2592 ffs_getmntstat(struct vnode *devvp)
2593 {
2594 
2595 	if (devvp->v_type == VCHR)
2596 		return (&devvp->v_rdev->si_mountpt->mnt_stat);
2597 	return (ffs_getmntstat(VFSTOUFS(devvp->v_mount)->um_devvp));
2598 }
2599 
2600 /*
2601  * Fetch and verify a cylinder group.
2602  */
2603 int
2604 ffs_getcg(fs, devvp, cg, bpp, cgpp)
2605 	struct fs *fs;
2606 	struct vnode *devvp;
2607 	u_int cg;
2608 	struct buf **bpp;
2609 	struct cg **cgpp;
2610 {
2611 	struct buf *bp;
2612 	struct cg *cgp;
2613 	const struct statfs *sfs;
2614 	int flags, error;
2615 
2616 	*bpp = NULL;
2617 	*cgpp = NULL;
2618 	flags = 0;
2619 	if ((fs->fs_metackhash & CK_CYLGRP) != 0)
2620 		flags |= GB_CKHASH;
2621 	error = breadn_flags(devvp, devvp->v_type == VREG ?
2622 	    fragstoblks(fs, cgtod(fs, cg)) : fsbtodb(fs, cgtod(fs, cg)),
2623 	    (int)fs->fs_cgsize, NULL, NULL, 0, NOCRED, flags,
2624 	    ffs_ckhash_cg, &bp);
2625 	if (error != 0)
2626 		return (error);
2627 	cgp = (struct cg *)bp->b_data;
2628 	if ((fs->fs_metackhash & CK_CYLGRP) != 0 &&
2629 	    (bp->b_flags & B_CKHASH) != 0 &&
2630 	    cgp->cg_ckhash != bp->b_ckhash) {
2631 		sfs = ffs_getmntstat(devvp);
2632 		printf("UFS %s%s (%s) cylinder checksum failed: cg %u, cgp: "
2633 		    "0x%x != bp: 0x%jx\n",
2634 		    devvp->v_type == VCHR ? "" : "snapshot of ",
2635 		    sfs->f_mntfromname, sfs->f_mntonname,
2636 		    cg, cgp->cg_ckhash, (uintmax_t)bp->b_ckhash);
2637 		bp->b_flags &= ~B_CKHASH;
2638 		bp->b_flags |= B_INVAL | B_NOCACHE;
2639 		brelse(bp);
2640 		return (EIO);
2641 	}
2642 	if (!cg_chkmagic(cgp) || cgp->cg_cgx != cg) {
2643 		sfs = ffs_getmntstat(devvp);
2644 		printf("UFS %s%s (%s)",
2645 		    devvp->v_type == VCHR ? "" : "snapshot of ",
2646 		    sfs->f_mntfromname, sfs->f_mntonname);
2647 		if (!cg_chkmagic(cgp))
2648 			printf(" cg %u: bad magic number 0x%x should be 0x%x\n",
2649 			    cg, cgp->cg_magic, CG_MAGIC);
2650 		else
2651 			printf(": wrong cylinder group cg %u != cgx %u\n", cg,
2652 			    cgp->cg_cgx);
2653 		bp->b_flags &= ~B_CKHASH;
2654 		bp->b_flags |= B_INVAL | B_NOCACHE;
2655 		brelse(bp);
2656 		return (EIO);
2657 	}
2658 	bp->b_flags &= ~B_CKHASH;
2659 	bp->b_xflags |= BX_BKGRDWRITE;
2660 	/*
2661 	 * If we are using check hashes on the cylinder group then we want
2662 	 * to limit changing the cylinder group time to when we are actually
2663 	 * going to write it to disk so that its check hash remains correct
2664 	 * in memory. If the CK_CYLGRP flag is set the time is updated in
2665 	 * ffs_bufwrite() as the buffer is queued for writing. Otherwise we
2666 	 * update the time here as we have done historically.
2667 	 */
2668 	if ((fs->fs_metackhash & CK_CYLGRP) != 0)
2669 		bp->b_xflags |= BX_CYLGRP;
2670 	else
2671 		cgp->cg_old_time = cgp->cg_time = time_second;
2672 	*bpp = bp;
2673 	*cgpp = cgp;
2674 	return (0);
2675 }
2676 
2677 static void
2678 ffs_ckhash_cg(bp)
2679 	struct buf *bp;
2680 {
2681 	uint32_t ckhash;
2682 	struct cg *cgp;
2683 
2684 	cgp = (struct cg *)bp->b_data;
2685 	ckhash = cgp->cg_ckhash;
2686 	cgp->cg_ckhash = 0;
2687 	bp->b_ckhash = calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2688 	cgp->cg_ckhash = ckhash;
2689 }
2690 
2691 /*
2692  * Fserr prints the name of a filesystem with an error diagnostic.
2693  *
2694  * The form of the error message is:
2695  *	fs: error message
2696  */
2697 void
2698 ffs_fserr(fs, inum, cp)
2699 	struct fs *fs;
2700 	ino_t inum;
2701 	char *cp;
2702 {
2703 	struct thread *td = curthread;	/* XXX */
2704 	struct proc *p = td->td_proc;
2705 
2706 	log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n",
2707 	    p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum,
2708 	    fs->fs_fsmnt, cp);
2709 }
2710 
2711 /*
2712  * This function provides the capability for the fsck program to
2713  * update an active filesystem. Fourteen operations are provided:
2714  *
2715  * adjrefcnt(inode, amt) - adjusts the reference count on the
2716  *	specified inode by the specified amount. Under normal
2717  *	operation the count should always go down. Decrementing
2718  *	the count to zero will cause the inode to be freed.
2719  * adjblkcnt(inode, amt) - adjust the number of blocks used by the
2720  *	inode by the specified amount.
2721  * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
2722  *	adjust the superblock summary.
2723  * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
2724  *	are marked as free. Inodes should never have to be marked
2725  *	as in use.
2726  * freefiles(inode, count) - file inodes [inode..inode + count - 1]
2727  *	are marked as free. Inodes should never have to be marked
2728  *	as in use.
2729  * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
2730  *	are marked as free. Blocks should never have to be marked
2731  *	as in use.
2732  * setflags(flags, set/clear) - the fs_flags field has the specified
2733  *	flags set (second parameter +1) or cleared (second parameter -1).
2734  * setcwd(dirinode) - set the current directory to dirinode in the
2735  *	filesystem associated with the snapshot.
2736  * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
2737  *	in the current directory is oldvalue then change it to newvalue.
2738  * unlink(nameptr, oldvalue) - Verify that the inode number associated
2739  *	with nameptr in the current directory is oldvalue then unlink it.
2740  *
2741  * The following functions may only be used on a quiescent filesystem
2742  * by the soft updates journal. They are not safe to be run on an active
2743  * filesystem.
2744  *
2745  * setinode(inode, dip) - the specified disk inode is replaced with the
2746  *	contents pointed to by dip.
2747  * setbufoutput(fd, flags) - output associated with the specified file
2748  *	descriptor (which must reference the character device supporting
2749  *	the filesystem) switches from using physio to running through the
2750  *	buffer cache when flags is set to 1. The descriptor reverts to
2751  *	physio for output when flags is set to zero.
2752  */
2753 
2754 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
2755 
2756 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
2757 	0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
2758 
2759 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
2760 	sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
2761 
2762 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR,
2763 	sysctl_ffs_fsck, "Adjust number of directories");
2764 
2765 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR,
2766 	sysctl_ffs_fsck, "Adjust number of free blocks");
2767 
2768 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR,
2769 	sysctl_ffs_fsck, "Adjust number of free inodes");
2770 
2771 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR,
2772 	sysctl_ffs_fsck, "Adjust number of free frags");
2773 
2774 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR,
2775 	sysctl_ffs_fsck, "Adjust number of free clusters");
2776 
2777 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
2778 	sysctl_ffs_fsck, "Free Range of Directory Inodes");
2779 
2780 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
2781 	sysctl_ffs_fsck, "Free Range of File Inodes");
2782 
2783 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
2784 	sysctl_ffs_fsck, "Free Range of Blocks");
2785 
2786 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
2787 	sysctl_ffs_fsck, "Change Filesystem Flags");
2788 
2789 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR,
2790 	sysctl_ffs_fsck, "Set Current Working Directory");
2791 
2792 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR,
2793 	sysctl_ffs_fsck, "Change Value of .. Entry");
2794 
2795 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR,
2796 	sysctl_ffs_fsck, "Unlink a Duplicate Name");
2797 
2798 static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR,
2799 	sysctl_ffs_fsck, "Update an On-Disk Inode");
2800 
2801 static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR,
2802 	sysctl_ffs_fsck, "Set Buffered Writing for Descriptor");
2803 
2804 #define DEBUG 1
2805 #ifdef DEBUG
2806 static int fsckcmds = 0;
2807 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
2808 #endif /* DEBUG */
2809 
2810 static int buffered_write(struct file *, struct uio *, struct ucred *,
2811 	int, struct thread *);
2812 
2813 static int
2814 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
2815 {
2816 	struct thread *td = curthread;
2817 	struct fsck_cmd cmd;
2818 	struct ufsmount *ump;
2819 	struct vnode *vp, *dvp, *fdvp;
2820 	struct inode *ip, *dp;
2821 	struct mount *mp;
2822 	struct fs *fs;
2823 	ufs2_daddr_t blkno;
2824 	long blkcnt, blksize;
2825 	struct file *fp, *vfp;
2826 	cap_rights_t rights;
2827 	int filetype, error;
2828 	static struct fileops *origops, bufferedops;
2829 
2830 	if (req->newlen > sizeof cmd)
2831 		return (EBADRPC);
2832 	if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
2833 		return (error);
2834 	if (cmd.version != FFS_CMD_VERSION)
2835 		return (ERPCMISMATCH);
2836 	if ((error = getvnode(td, cmd.handle,
2837 	    cap_rights_init(&rights, CAP_FSCK), &fp)) != 0)
2838 		return (error);
2839 	vp = fp->f_data;
2840 	if (vp->v_type != VREG && vp->v_type != VDIR) {
2841 		fdrop(fp, td);
2842 		return (EINVAL);
2843 	}
2844 	vn_start_write(vp, &mp, V_WAIT);
2845 	if (mp == NULL ||
2846 	    strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
2847 		vn_finished_write(mp);
2848 		fdrop(fp, td);
2849 		return (EINVAL);
2850 	}
2851 	ump = VFSTOUFS(mp);
2852 	if ((mp->mnt_flag & MNT_RDONLY) &&
2853 	    ump->um_fsckpid != td->td_proc->p_pid) {
2854 		vn_finished_write(mp);
2855 		fdrop(fp, td);
2856 		return (EROFS);
2857 	}
2858 	fs = ump->um_fs;
2859 	filetype = IFREG;
2860 
2861 	switch (oidp->oid_number) {
2862 
2863 	case FFS_SET_FLAGS:
2864 #ifdef DEBUG
2865 		if (fsckcmds)
2866 			printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
2867 			    cmd.size > 0 ? "set" : "clear");
2868 #endif /* DEBUG */
2869 		if (cmd.size > 0)
2870 			fs->fs_flags |= (long)cmd.value;
2871 		else
2872 			fs->fs_flags &= ~(long)cmd.value;
2873 		break;
2874 
2875 	case FFS_ADJ_REFCNT:
2876 #ifdef DEBUG
2877 		if (fsckcmds) {
2878 			printf("%s: adjust inode %jd link count by %jd\n",
2879 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2880 			    (intmax_t)cmd.size);
2881 		}
2882 #endif /* DEBUG */
2883 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2884 			break;
2885 		ip = VTOI(vp);
2886 		ip->i_nlink += cmd.size;
2887 		DIP_SET(ip, i_nlink, ip->i_nlink);
2888 		ip->i_effnlink += cmd.size;
2889 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2890 		error = ffs_update(vp, 1);
2891 		if (DOINGSOFTDEP(vp))
2892 			softdep_change_linkcnt(ip);
2893 		vput(vp);
2894 		break;
2895 
2896 	case FFS_ADJ_BLKCNT:
2897 #ifdef DEBUG
2898 		if (fsckcmds) {
2899 			printf("%s: adjust inode %jd block count by %jd\n",
2900 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2901 			    (intmax_t)cmd.size);
2902 		}
2903 #endif /* DEBUG */
2904 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2905 			break;
2906 		ip = VTOI(vp);
2907 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
2908 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2909 		error = ffs_update(vp, 1);
2910 		vput(vp);
2911 		break;
2912 
2913 	case FFS_DIR_FREE:
2914 		filetype = IFDIR;
2915 		/* fall through */
2916 
2917 	case FFS_FILE_FREE:
2918 #ifdef DEBUG
2919 		if (fsckcmds) {
2920 			if (cmd.size == 1)
2921 				printf("%s: free %s inode %ju\n",
2922 				    mp->mnt_stat.f_mntonname,
2923 				    filetype == IFDIR ? "directory" : "file",
2924 				    (uintmax_t)cmd.value);
2925 			else
2926 				printf("%s: free %s inodes %ju-%ju\n",
2927 				    mp->mnt_stat.f_mntonname,
2928 				    filetype == IFDIR ? "directory" : "file",
2929 				    (uintmax_t)cmd.value,
2930 				    (uintmax_t)(cmd.value + cmd.size - 1));
2931 		}
2932 #endif /* DEBUG */
2933 		while (cmd.size > 0) {
2934 			if ((error = ffs_freefile(ump, fs, ump->um_devvp,
2935 			    cmd.value, filetype, NULL)))
2936 				break;
2937 			cmd.size -= 1;
2938 			cmd.value += 1;
2939 		}
2940 		break;
2941 
2942 	case FFS_BLK_FREE:
2943 #ifdef DEBUG
2944 		if (fsckcmds) {
2945 			if (cmd.size == 1)
2946 				printf("%s: free block %jd\n",
2947 				    mp->mnt_stat.f_mntonname,
2948 				    (intmax_t)cmd.value);
2949 			else
2950 				printf("%s: free blocks %jd-%jd\n",
2951 				    mp->mnt_stat.f_mntonname,
2952 				    (intmax_t)cmd.value,
2953 				    (intmax_t)cmd.value + cmd.size - 1);
2954 		}
2955 #endif /* DEBUG */
2956 		blkno = cmd.value;
2957 		blkcnt = cmd.size;
2958 		blksize = fs->fs_frag - (blkno % fs->fs_frag);
2959 		while (blkcnt > 0) {
2960 			if (blksize > blkcnt)
2961 				blksize = blkcnt;
2962 			ffs_blkfree(ump, fs, ump->um_devvp, blkno,
2963 			    blksize * fs->fs_fsize, UFS_ROOTINO, VDIR, NULL);
2964 			blkno += blksize;
2965 			blkcnt -= blksize;
2966 			blksize = fs->fs_frag;
2967 		}
2968 		break;
2969 
2970 	/*
2971 	 * Adjust superblock summaries.  fsck(8) is expected to
2972 	 * submit deltas when necessary.
2973 	 */
2974 	case FFS_ADJ_NDIR:
2975 #ifdef DEBUG
2976 		if (fsckcmds) {
2977 			printf("%s: adjust number of directories by %jd\n",
2978 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2979 		}
2980 #endif /* DEBUG */
2981 		fs->fs_cstotal.cs_ndir += cmd.value;
2982 		break;
2983 
2984 	case FFS_ADJ_NBFREE:
2985 #ifdef DEBUG
2986 		if (fsckcmds) {
2987 			printf("%s: adjust number of free blocks by %+jd\n",
2988 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2989 		}
2990 #endif /* DEBUG */
2991 		fs->fs_cstotal.cs_nbfree += cmd.value;
2992 		break;
2993 
2994 	case FFS_ADJ_NIFREE:
2995 #ifdef DEBUG
2996 		if (fsckcmds) {
2997 			printf("%s: adjust number of free inodes by %+jd\n",
2998 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2999 		}
3000 #endif /* DEBUG */
3001 		fs->fs_cstotal.cs_nifree += cmd.value;
3002 		break;
3003 
3004 	case FFS_ADJ_NFFREE:
3005 #ifdef DEBUG
3006 		if (fsckcmds) {
3007 			printf("%s: adjust number of free frags by %+jd\n",
3008 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3009 		}
3010 #endif /* DEBUG */
3011 		fs->fs_cstotal.cs_nffree += cmd.value;
3012 		break;
3013 
3014 	case FFS_ADJ_NUMCLUSTERS:
3015 #ifdef DEBUG
3016 		if (fsckcmds) {
3017 			printf("%s: adjust number of free clusters by %+jd\n",
3018 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3019 		}
3020 #endif /* DEBUG */
3021 		fs->fs_cstotal.cs_numclusters += cmd.value;
3022 		break;
3023 
3024 	case FFS_SET_CWD:
3025 #ifdef DEBUG
3026 		if (fsckcmds) {
3027 			printf("%s: set current directory to inode %jd\n",
3028 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3029 		}
3030 #endif /* DEBUG */
3031 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
3032 			break;
3033 		AUDIT_ARG_VNODE1(vp);
3034 		if ((error = change_dir(vp, td)) != 0) {
3035 			vput(vp);
3036 			break;
3037 		}
3038 		VOP_UNLOCK(vp, 0);
3039 		pwd_chdir(td, vp);
3040 		break;
3041 
3042 	case FFS_SET_DOTDOT:
3043 #ifdef DEBUG
3044 		if (fsckcmds) {
3045 			printf("%s: change .. in cwd from %jd to %jd\n",
3046 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3047 			    (intmax_t)cmd.size);
3048 		}
3049 #endif /* DEBUG */
3050 		/*
3051 		 * First we have to get and lock the parent directory
3052 		 * to which ".." points.
3053 		 */
3054 		error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
3055 		if (error)
3056 			break;
3057 		/*
3058 		 * Now we get and lock the child directory containing "..".
3059 		 */
3060 		FILEDESC_SLOCK(td->td_proc->p_fd);
3061 		dvp = td->td_proc->p_fd->fd_cdir;
3062 		FILEDESC_SUNLOCK(td->td_proc->p_fd);
3063 		if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
3064 			vput(fdvp);
3065 			break;
3066 		}
3067 		dp = VTOI(dvp);
3068 		dp->i_offset = 12;	/* XXX mastertemplate.dot_reclen */
3069 		error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
3070 		    DT_DIR, 0);
3071 		cache_purge(fdvp);
3072 		cache_purge(dvp);
3073 		vput(dvp);
3074 		vput(fdvp);
3075 		break;
3076 
3077 	case FFS_UNLINK:
3078 #ifdef DEBUG
3079 		if (fsckcmds) {
3080 			char buf[32];
3081 
3082 			if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
3083 				strncpy(buf, "Name_too_long", 32);
3084 			printf("%s: unlink %s (inode %jd)\n",
3085 			    mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
3086 		}
3087 #endif /* DEBUG */
3088 		/*
3089 		 * kern_unlinkat will do its own start/finish writes and
3090 		 * they do not nest, so drop ours here. Setting mp == NULL
3091 		 * indicates that vn_finished_write is not needed down below.
3092 		 */
3093 		vn_finished_write(mp);
3094 		mp = NULL;
3095 		error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value,
3096 		    UIO_USERSPACE, (ino_t)cmd.size);
3097 		break;
3098 
3099 	case FFS_SET_INODE:
3100 		if (ump->um_fsckpid != td->td_proc->p_pid) {
3101 			error = EPERM;
3102 			break;
3103 		}
3104 #ifdef DEBUG
3105 		if (fsckcmds) {
3106 			printf("%s: update inode %jd\n",
3107 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3108 		}
3109 #endif /* DEBUG */
3110 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3111 			break;
3112 		AUDIT_ARG_VNODE1(vp);
3113 		ip = VTOI(vp);
3114 		if (I_IS_UFS1(ip))
3115 			error = copyin((void *)(intptr_t)cmd.size, ip->i_din1,
3116 			    sizeof(struct ufs1_dinode));
3117 		else
3118 			error = copyin((void *)(intptr_t)cmd.size, ip->i_din2,
3119 			    sizeof(struct ufs2_dinode));
3120 		if (error) {
3121 			vput(vp);
3122 			break;
3123 		}
3124 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
3125 		error = ffs_update(vp, 1);
3126 		vput(vp);
3127 		break;
3128 
3129 	case FFS_SET_BUFOUTPUT:
3130 		if (ump->um_fsckpid != td->td_proc->p_pid) {
3131 			error = EPERM;
3132 			break;
3133 		}
3134 		if (ITOUMP(VTOI(vp)) != ump) {
3135 			error = EINVAL;
3136 			break;
3137 		}
3138 #ifdef DEBUG
3139 		if (fsckcmds) {
3140 			printf("%s: %s buffered output for descriptor %jd\n",
3141 			    mp->mnt_stat.f_mntonname,
3142 			    cmd.size == 1 ? "enable" : "disable",
3143 			    (intmax_t)cmd.value);
3144 		}
3145 #endif /* DEBUG */
3146 		if ((error = getvnode(td, cmd.value,
3147 		    cap_rights_init(&rights, CAP_FSCK), &vfp)) != 0)
3148 			break;
3149 		if (vfp->f_vnode->v_type != VCHR) {
3150 			fdrop(vfp, td);
3151 			error = EINVAL;
3152 			break;
3153 		}
3154 		if (origops == NULL) {
3155 			origops = vfp->f_ops;
3156 			bcopy((void *)origops, (void *)&bufferedops,
3157 			    sizeof(bufferedops));
3158 			bufferedops.fo_write = buffered_write;
3159 		}
3160 		if (cmd.size == 1)
3161 			atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3162 			    (uintptr_t)&bufferedops);
3163 		else
3164 			atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3165 			    (uintptr_t)origops);
3166 		fdrop(vfp, td);
3167 		break;
3168 
3169 	default:
3170 #ifdef DEBUG
3171 		if (fsckcmds) {
3172 			printf("Invalid request %d from fsck\n",
3173 			    oidp->oid_number);
3174 		}
3175 #endif /* DEBUG */
3176 		error = EINVAL;
3177 		break;
3178 
3179 	}
3180 	fdrop(fp, td);
3181 	vn_finished_write(mp);
3182 	return (error);
3183 }
3184 
3185 /*
3186  * Function to switch a descriptor to use the buffer cache to stage
3187  * its I/O. This is needed so that writes to the filesystem device
3188  * will give snapshots a chance to copy modified blocks for which it
3189  * needs to retain copies.
3190  */
3191 static int
3192 buffered_write(fp, uio, active_cred, flags, td)
3193 	struct file *fp;
3194 	struct uio *uio;
3195 	struct ucred *active_cred;
3196 	int flags;
3197 	struct thread *td;
3198 {
3199 	struct vnode *devvp, *vp;
3200 	struct inode *ip;
3201 	struct buf *bp;
3202 	struct fs *fs;
3203 	struct filedesc *fdp;
3204 	int error;
3205 	daddr_t lbn;
3206 
3207 	/*
3208 	 * The devvp is associated with the /dev filesystem. To discover
3209 	 * the filesystem with which the device is associated, we depend
3210 	 * on the application setting the current directory to a location
3211 	 * within the filesystem being written. Yes, this is an ugly hack.
3212 	 */
3213 	devvp = fp->f_vnode;
3214 	if (!vn_isdisk(devvp, NULL))
3215 		return (EINVAL);
3216 	fdp = td->td_proc->p_fd;
3217 	FILEDESC_SLOCK(fdp);
3218 	vp = fdp->fd_cdir;
3219 	vref(vp);
3220 	FILEDESC_SUNLOCK(fdp);
3221 	vn_lock(vp, LK_SHARED | LK_RETRY);
3222 	/*
3223 	 * Check that the current directory vnode indeed belongs to
3224 	 * UFS before trying to dereference UFS-specific v_data fields.
3225 	 */
3226 	if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) {
3227 		vput(vp);
3228 		return (EINVAL);
3229 	}
3230 	ip = VTOI(vp);
3231 	if (ITODEVVP(ip) != devvp) {
3232 		vput(vp);
3233 		return (EINVAL);
3234 	}
3235 	fs = ITOFS(ip);
3236 	vput(vp);
3237 	foffset_lock_uio(fp, uio, flags);
3238 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
3239 #ifdef DEBUG
3240 	if (fsckcmds) {
3241 		printf("%s: buffered write for block %jd\n",
3242 		    fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset));
3243 	}
3244 #endif /* DEBUG */
3245 	/*
3246 	 * All I/O must be contained within a filesystem block, start on
3247 	 * a fragment boundary, and be a multiple of fragments in length.
3248 	 */
3249 	if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) ||
3250 	    fragoff(fs, uio->uio_offset) != 0 ||
3251 	    fragoff(fs, uio->uio_resid) != 0) {
3252 		error = EINVAL;
3253 		goto out;
3254 	}
3255 	lbn = numfrags(fs, uio->uio_offset);
3256 	bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0);
3257 	bp->b_flags |= B_RELBUF;
3258 	if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) {
3259 		brelse(bp);
3260 		goto out;
3261 	}
3262 	error = bwrite(bp);
3263 out:
3264 	VOP_UNLOCK(devvp, 0);
3265 	foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF);
3266 	return (error);
3267 }
3268