xref: /freebsd/sys/ufs/ffs/ffs_alloc.c (revision 8a16b7a18f5d0b031f09832fd7752fba717e2a97)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Networks Associates Technology, Inc.
5  * All rights reserved.
6  *
7  * This software was developed for the FreeBSD Project by Marshall
8  * Kirk McKusick and Network Associates Laboratories, the Security
9  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
10  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
11  * research program
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1982, 1986, 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)ffs_alloc.c	8.18 (Berkeley) 5/26/95
62  */
63 
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
66 
67 #include "opt_quota.h"
68 
69 #include <sys/param.h>
70 #include <sys/capsicum.h>
71 #include <sys/systm.h>
72 #include <sys/bio.h>
73 #include <sys/buf.h>
74 #include <sys/conf.h>
75 #include <sys/fcntl.h>
76 #include <sys/file.h>
77 #include <sys/filedesc.h>
78 #include <sys/priv.h>
79 #include <sys/proc.h>
80 #include <sys/vnode.h>
81 #include <sys/mount.h>
82 #include <sys/kernel.h>
83 #include <sys/syscallsubr.h>
84 #include <sys/sysctl.h>
85 #include <sys/syslog.h>
86 #include <sys/taskqueue.h>
87 
88 #include <security/audit/audit.h>
89 
90 #include <geom/geom.h>
91 
92 #include <ufs/ufs/dir.h>
93 #include <ufs/ufs/extattr.h>
94 #include <ufs/ufs/quota.h>
95 #include <ufs/ufs/inode.h>
96 #include <ufs/ufs/ufs_extern.h>
97 #include <ufs/ufs/ufsmount.h>
98 
99 #include <ufs/ffs/fs.h>
100 #include <ufs/ffs/ffs_extern.h>
101 #include <ufs/ffs/softdep.h>
102 
103 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
104 				  int size, int rsize);
105 
106 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int);
107 static ufs2_daddr_t
108 	      ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
109 static void	ffs_blkfree_cg(struct ufsmount *, struct fs *,
110 		    struct vnode *, ufs2_daddr_t, long, ino_t,
111 		    struct workhead *);
112 static void	ffs_blkfree_trim_completed(struct bio *);
113 static void	ffs_blkfree_trim_task(void *ctx, int pending __unused);
114 #ifdef INVARIANTS
115 static int	ffs_checkblk(struct inode *, ufs2_daddr_t, long);
116 #endif
117 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int);
118 static ino_t	ffs_dirpref(struct inode *);
119 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
120 		    int, int);
121 static ufs2_daddr_t	ffs_hashalloc
122 		(struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *);
123 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int,
124 		    int);
125 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
126 static int	ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
127 static int	ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
128 static void	ffs_ckhash_cg(struct buf *);
129 
130 /*
131  * Allocate a block in the filesystem.
132  *
133  * The size of the requested block is given, which must be some
134  * multiple of fs_fsize and <= fs_bsize.
135  * A preference may be optionally specified. If a preference is given
136  * the following hierarchy is used to allocate a block:
137  *   1) allocate the requested block.
138  *   2) allocate a rotationally optimal block in the same cylinder.
139  *   3) allocate a block in the same cylinder group.
140  *   4) quadradically rehash into other cylinder groups, until an
141  *      available block is located.
142  * If no block preference is given the following hierarchy is used
143  * to allocate a block:
144  *   1) allocate a block in the cylinder group that contains the
145  *      inode for the file.
146  *   2) quadradically rehash into other cylinder groups, until an
147  *      available block is located.
148  */
149 int
150 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
151 	struct inode *ip;
152 	ufs2_daddr_t lbn, bpref;
153 	int size, flags;
154 	struct ucred *cred;
155 	ufs2_daddr_t *bnp;
156 {
157 	struct fs *fs;
158 	struct ufsmount *ump;
159 	ufs2_daddr_t bno;
160 	u_int cg, reclaimed;
161 	static struct timeval lastfail;
162 	static int curfail;
163 	int64_t delta;
164 #ifdef QUOTA
165 	int error;
166 #endif
167 
168 	*bnp = 0;
169 	ump = ITOUMP(ip);
170 	fs = ump->um_fs;
171 	mtx_assert(UFS_MTX(ump), MA_OWNED);
172 #ifdef INVARIANTS
173 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
174 		printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
175 		    devtoname(ump->um_dev), (long)fs->fs_bsize, size,
176 		    fs->fs_fsmnt);
177 		panic("ffs_alloc: bad size");
178 	}
179 	if (cred == NOCRED)
180 		panic("ffs_alloc: missing credential");
181 #endif /* INVARIANTS */
182 	reclaimed = 0;
183 retry:
184 #ifdef QUOTA
185 	UFS_UNLOCK(ump);
186 	error = chkdq(ip, btodb(size), cred, 0);
187 	if (error)
188 		return (error);
189 	UFS_LOCK(ump);
190 #endif
191 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
192 		goto nospace;
193 	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
194 	    freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
195 		goto nospace;
196 	if (bpref >= fs->fs_size)
197 		bpref = 0;
198 	if (bpref == 0)
199 		cg = ino_to_cg(fs, ip->i_number);
200 	else
201 		cg = dtog(fs, bpref);
202 	bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
203 	if (bno > 0) {
204 		delta = btodb(size);
205 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
206 		if (flags & IO_EXT)
207 			ip->i_flag |= IN_CHANGE;
208 		else
209 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
210 		*bnp = bno;
211 		return (0);
212 	}
213 nospace:
214 #ifdef QUOTA
215 	UFS_UNLOCK(ump);
216 	/*
217 	 * Restore user's disk quota because allocation failed.
218 	 */
219 	(void) chkdq(ip, -btodb(size), cred, FORCE);
220 	UFS_LOCK(ump);
221 #endif
222 	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
223 		reclaimed = 1;
224 		softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
225 		goto retry;
226 	}
227 	UFS_UNLOCK(ump);
228 	if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
229 		ffs_fserr(fs, ip->i_number, "filesystem full");
230 		uprintf("\n%s: write failed, filesystem is full\n",
231 		    fs->fs_fsmnt);
232 	}
233 	return (ENOSPC);
234 }
235 
236 /*
237  * Reallocate a fragment to a bigger size
238  *
239  * The number and size of the old block is given, and a preference
240  * and new size is also specified. The allocator attempts to extend
241  * the original block. Failing that, the regular block allocator is
242  * invoked to get an appropriate block.
243  */
244 int
245 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
246 	struct inode *ip;
247 	ufs2_daddr_t lbprev;
248 	ufs2_daddr_t bprev;
249 	ufs2_daddr_t bpref;
250 	int osize, nsize, flags;
251 	struct ucred *cred;
252 	struct buf **bpp;
253 {
254 	struct vnode *vp;
255 	struct fs *fs;
256 	struct buf *bp;
257 	struct ufsmount *ump;
258 	u_int cg, request, reclaimed;
259 	int error, gbflags;
260 	ufs2_daddr_t bno;
261 	static struct timeval lastfail;
262 	static int curfail;
263 	int64_t delta;
264 
265 	vp = ITOV(ip);
266 	ump = ITOUMP(ip);
267 	fs = ump->um_fs;
268 	bp = NULL;
269 	gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
270 
271 	mtx_assert(UFS_MTX(ump), MA_OWNED);
272 #ifdef INVARIANTS
273 	if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
274 		panic("ffs_realloccg: allocation on suspended filesystem");
275 	if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
276 	    (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
277 		printf(
278 		"dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
279 		    devtoname(ump->um_dev), (long)fs->fs_bsize, osize,
280 		    nsize, fs->fs_fsmnt);
281 		panic("ffs_realloccg: bad size");
282 	}
283 	if (cred == NOCRED)
284 		panic("ffs_realloccg: missing credential");
285 #endif /* INVARIANTS */
286 	reclaimed = 0;
287 retry:
288 	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
289 	    freespace(fs, fs->fs_minfree) -  numfrags(fs, nsize - osize) < 0) {
290 		goto nospace;
291 	}
292 	if (bprev == 0) {
293 		printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
294 		    devtoname(ump->um_dev), (long)fs->fs_bsize, (intmax_t)bprev,
295 		    fs->fs_fsmnt);
296 		panic("ffs_realloccg: bad bprev");
297 	}
298 	UFS_UNLOCK(ump);
299 	/*
300 	 * Allocate the extra space in the buffer.
301 	 */
302 	error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
303 	if (error) {
304 		brelse(bp);
305 		return (error);
306 	}
307 
308 	if (bp->b_blkno == bp->b_lblkno) {
309 		if (lbprev >= UFS_NDADDR)
310 			panic("ffs_realloccg: lbprev out of range");
311 		bp->b_blkno = fsbtodb(fs, bprev);
312 	}
313 
314 #ifdef QUOTA
315 	error = chkdq(ip, btodb(nsize - osize), cred, 0);
316 	if (error) {
317 		brelse(bp);
318 		return (error);
319 	}
320 #endif
321 	/*
322 	 * Check for extension in the existing location.
323 	 */
324 	*bpp = NULL;
325 	cg = dtog(fs, bprev);
326 	UFS_LOCK(ump);
327 	bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
328 	if (bno) {
329 		if (bp->b_blkno != fsbtodb(fs, bno))
330 			panic("ffs_realloccg: bad blockno");
331 		delta = btodb(nsize - osize);
332 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
333 		if (flags & IO_EXT)
334 			ip->i_flag |= IN_CHANGE;
335 		else
336 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
337 		allocbuf(bp, nsize);
338 		bp->b_flags |= B_DONE;
339 		vfs_bio_bzero_buf(bp, osize, nsize - osize);
340 		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
341 			vfs_bio_set_valid(bp, osize, nsize - osize);
342 		*bpp = bp;
343 		return (0);
344 	}
345 	/*
346 	 * Allocate a new disk location.
347 	 */
348 	if (bpref >= fs->fs_size)
349 		bpref = 0;
350 	switch ((int)fs->fs_optim) {
351 	case FS_OPTSPACE:
352 		/*
353 		 * Allocate an exact sized fragment. Although this makes
354 		 * best use of space, we will waste time relocating it if
355 		 * the file continues to grow. If the fragmentation is
356 		 * less than half of the minimum free reserve, we choose
357 		 * to begin optimizing for time.
358 		 */
359 		request = nsize;
360 		if (fs->fs_minfree <= 5 ||
361 		    fs->fs_cstotal.cs_nffree >
362 		    (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
363 			break;
364 		log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
365 			fs->fs_fsmnt);
366 		fs->fs_optim = FS_OPTTIME;
367 		break;
368 	case FS_OPTTIME:
369 		/*
370 		 * At this point we have discovered a file that is trying to
371 		 * grow a small fragment to a larger fragment. To save time,
372 		 * we allocate a full sized block, then free the unused portion.
373 		 * If the file continues to grow, the `ffs_fragextend' call
374 		 * above will be able to grow it in place without further
375 		 * copying. If aberrant programs cause disk fragmentation to
376 		 * grow within 2% of the free reserve, we choose to begin
377 		 * optimizing for space.
378 		 */
379 		request = fs->fs_bsize;
380 		if (fs->fs_cstotal.cs_nffree <
381 		    (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
382 			break;
383 		log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
384 			fs->fs_fsmnt);
385 		fs->fs_optim = FS_OPTSPACE;
386 		break;
387 	default:
388 		printf("dev = %s, optim = %ld, fs = %s\n",
389 		    devtoname(ump->um_dev), (long)fs->fs_optim, fs->fs_fsmnt);
390 		panic("ffs_realloccg: bad optim");
391 		/* NOTREACHED */
392 	}
393 	bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
394 	if (bno > 0) {
395 		bp->b_blkno = fsbtodb(fs, bno);
396 		if (!DOINGSOFTDEP(vp))
397 			ffs_blkfree(ump, fs, ump->um_devvp, bprev, (long)osize,
398 			    ip->i_number, vp->v_type, NULL);
399 		delta = btodb(nsize - osize);
400 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
401 		if (flags & IO_EXT)
402 			ip->i_flag |= IN_CHANGE;
403 		else
404 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
405 		allocbuf(bp, nsize);
406 		bp->b_flags |= B_DONE;
407 		vfs_bio_bzero_buf(bp, osize, nsize - osize);
408 		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
409 			vfs_bio_set_valid(bp, osize, nsize - osize);
410 		*bpp = bp;
411 		return (0);
412 	}
413 #ifdef QUOTA
414 	UFS_UNLOCK(ump);
415 	/*
416 	 * Restore user's disk quota because allocation failed.
417 	 */
418 	(void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
419 	UFS_LOCK(ump);
420 #endif
421 nospace:
422 	/*
423 	 * no space available
424 	 */
425 	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
426 		reclaimed = 1;
427 		UFS_UNLOCK(ump);
428 		if (bp) {
429 			brelse(bp);
430 			bp = NULL;
431 		}
432 		UFS_LOCK(ump);
433 		softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
434 		goto retry;
435 	}
436 	UFS_UNLOCK(ump);
437 	if (bp)
438 		brelse(bp);
439 	if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
440 		ffs_fserr(fs, ip->i_number, "filesystem full");
441 		uprintf("\n%s: write failed, filesystem is full\n",
442 		    fs->fs_fsmnt);
443 	}
444 	return (ENOSPC);
445 }
446 
447 /*
448  * Reallocate a sequence of blocks into a contiguous sequence of blocks.
449  *
450  * The vnode and an array of buffer pointers for a range of sequential
451  * logical blocks to be made contiguous is given. The allocator attempts
452  * to find a range of sequential blocks starting as close as possible
453  * from the end of the allocation for the logical block immediately
454  * preceding the current range. If successful, the physical block numbers
455  * in the buffer pointers and in the inode are changed to reflect the new
456  * allocation. If unsuccessful, the allocation is left unchanged. The
457  * success in doing the reallocation is returned. Note that the error
458  * return is not reflected back to the user. Rather the previous block
459  * allocation will be used.
460  */
461 
462 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
463 
464 static int doasyncfree = 1;
465 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
466 "do not force synchronous writes when blocks are reallocated");
467 
468 static int doreallocblks = 1;
469 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0,
470 "enable block reallocation");
471 
472 static int maxclustersearch = 10;
473 SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch,
474 0, "max number of cylinder group to search for contigous blocks");
475 
476 #ifdef DEBUG
477 static volatile int prtrealloc = 0;
478 #endif
479 
480 int
481 ffs_reallocblks(ap)
482 	struct vop_reallocblks_args /* {
483 		struct vnode *a_vp;
484 		struct cluster_save *a_buflist;
485 	} */ *ap;
486 {
487 	struct ufsmount *ump;
488 
489 	/*
490 	 * If the underlying device can do deletes, then skip reallocating
491 	 * the blocks of this file into contiguous sequences. Devices that
492 	 * benefit from BIO_DELETE also benefit from not moving the data.
493 	 * These devices are flash and therefore work less well with this
494 	 * optimization. Also skip if reallocblks has been disabled globally.
495 	 */
496 	ump = ap->a_vp->v_mount->mnt_data;
497 	if (ump->um_candelete || doreallocblks == 0)
498 		return (ENOSPC);
499 
500 	/*
501 	 * We can't wait in softdep prealloc as it may fsync and recurse
502 	 * here.  Instead we simply fail to reallocate blocks if this
503 	 * rare condition arises.
504 	 */
505 	if (DOINGSOFTDEP(ap->a_vp))
506 		if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
507 			return (ENOSPC);
508 	if (ump->um_fstype == UFS1)
509 		return (ffs_reallocblks_ufs1(ap));
510 	return (ffs_reallocblks_ufs2(ap));
511 }
512 
513 static int
514 ffs_reallocblks_ufs1(ap)
515 	struct vop_reallocblks_args /* {
516 		struct vnode *a_vp;
517 		struct cluster_save *a_buflist;
518 	} */ *ap;
519 {
520 	struct fs *fs;
521 	struct inode *ip;
522 	struct vnode *vp;
523 	struct buf *sbp, *ebp;
524 	ufs1_daddr_t *bap, *sbap, *ebap;
525 	struct cluster_save *buflist;
526 	struct ufsmount *ump;
527 	ufs_lbn_t start_lbn, end_lbn;
528 	ufs1_daddr_t soff, newblk, blkno;
529 	ufs2_daddr_t pref;
530 	struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
531 	int i, cg, len, start_lvl, end_lvl, ssize;
532 
533 	vp = ap->a_vp;
534 	ip = VTOI(vp);
535 	ump = ITOUMP(ip);
536 	fs = ump->um_fs;
537 	/*
538 	 * If we are not tracking block clusters or if we have less than 4%
539 	 * free blocks left, then do not attempt to cluster. Running with
540 	 * less than 5% free block reserve is not recommended and those that
541 	 * choose to do so do not expect to have good file layout.
542 	 */
543 	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
544 		return (ENOSPC);
545 	buflist = ap->a_buflist;
546 	len = buflist->bs_nchildren;
547 	start_lbn = buflist->bs_children[0]->b_lblkno;
548 	end_lbn = start_lbn + len - 1;
549 #ifdef INVARIANTS
550 	for (i = 0; i < len; i++)
551 		if (!ffs_checkblk(ip,
552 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
553 			panic("ffs_reallocblks: unallocated block 1");
554 	for (i = 1; i < len; i++)
555 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
556 			panic("ffs_reallocblks: non-logical cluster");
557 	blkno = buflist->bs_children[0]->b_blkno;
558 	ssize = fsbtodb(fs, fs->fs_frag);
559 	for (i = 1; i < len - 1; i++)
560 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
561 			panic("ffs_reallocblks: non-physical cluster %d", i);
562 #endif
563 	/*
564 	 * If the cluster crosses the boundary for the first indirect
565 	 * block, leave space for the indirect block. Indirect blocks
566 	 * are initially laid out in a position after the last direct
567 	 * block. Block reallocation would usually destroy locality by
568 	 * moving the indirect block out of the way to make room for
569 	 * data blocks if we didn't compensate here. We should also do
570 	 * this for other indirect block boundaries, but it is only
571 	 * important for the first one.
572 	 */
573 	if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
574 		return (ENOSPC);
575 	/*
576 	 * If the latest allocation is in a new cylinder group, assume that
577 	 * the filesystem has decided to move and do not force it back to
578 	 * the previous cylinder group.
579 	 */
580 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
581 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
582 		return (ENOSPC);
583 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
584 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
585 		return (ENOSPC);
586 	/*
587 	 * Get the starting offset and block map for the first block.
588 	 */
589 	if (start_lvl == 0) {
590 		sbap = &ip->i_din1->di_db[0];
591 		soff = start_lbn;
592 	} else {
593 		idp = &start_ap[start_lvl - 1];
594 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
595 			brelse(sbp);
596 			return (ENOSPC);
597 		}
598 		sbap = (ufs1_daddr_t *)sbp->b_data;
599 		soff = idp->in_off;
600 	}
601 	/*
602 	 * If the block range spans two block maps, get the second map.
603 	 */
604 	ebap = NULL;
605 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
606 		ssize = len;
607 	} else {
608 #ifdef INVARIANTS
609 		if (start_lvl > 0 &&
610 		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
611 			panic("ffs_reallocblk: start == end");
612 #endif
613 		ssize = len - (idp->in_off + 1);
614 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
615 			goto fail;
616 		ebap = (ufs1_daddr_t *)ebp->b_data;
617 	}
618 	/*
619 	 * Find the preferred location for the cluster. If we have not
620 	 * previously failed at this endeavor, then follow our standard
621 	 * preference calculation. If we have failed at it, then pick up
622 	 * where we last ended our search.
623 	 */
624 	UFS_LOCK(ump);
625 	if (ip->i_nextclustercg == -1)
626 		pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
627 	else
628 		pref = cgdata(fs, ip->i_nextclustercg);
629 	/*
630 	 * Search the block map looking for an allocation of the desired size.
631 	 * To avoid wasting too much time, we limit the number of cylinder
632 	 * groups that we will search.
633 	 */
634 	cg = dtog(fs, pref);
635 	for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
636 		if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
637 			break;
638 		cg += 1;
639 		if (cg >= fs->fs_ncg)
640 			cg = 0;
641 	}
642 	/*
643 	 * If we have failed in our search, record where we gave up for
644 	 * next time. Otherwise, fall back to our usual search citerion.
645 	 */
646 	if (newblk == 0) {
647 		ip->i_nextclustercg = cg;
648 		UFS_UNLOCK(ump);
649 		goto fail;
650 	}
651 	ip->i_nextclustercg = -1;
652 	/*
653 	 * We have found a new contiguous block.
654 	 *
655 	 * First we have to replace the old block pointers with the new
656 	 * block pointers in the inode and indirect blocks associated
657 	 * with the file.
658 	 */
659 #ifdef DEBUG
660 	if (prtrealloc)
661 		printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
662 		    (uintmax_t)ip->i_number,
663 		    (intmax_t)start_lbn, (intmax_t)end_lbn);
664 #endif
665 	blkno = newblk;
666 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
667 		if (i == ssize) {
668 			bap = ebap;
669 			soff = -i;
670 		}
671 #ifdef INVARIANTS
672 		if (!ffs_checkblk(ip,
673 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
674 			panic("ffs_reallocblks: unallocated block 2");
675 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
676 			panic("ffs_reallocblks: alloc mismatch");
677 #endif
678 #ifdef DEBUG
679 		if (prtrealloc)
680 			printf(" %d,", *bap);
681 #endif
682 		if (DOINGSOFTDEP(vp)) {
683 			if (sbap == &ip->i_din1->di_db[0] && i < ssize)
684 				softdep_setup_allocdirect(ip, start_lbn + i,
685 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
686 				    buflist->bs_children[i]);
687 			else
688 				softdep_setup_allocindir_page(ip, start_lbn + i,
689 				    i < ssize ? sbp : ebp, soff + i, blkno,
690 				    *bap, buflist->bs_children[i]);
691 		}
692 		*bap++ = blkno;
693 	}
694 	/*
695 	 * Next we must write out the modified inode and indirect blocks.
696 	 * For strict correctness, the writes should be synchronous since
697 	 * the old block values may have been written to disk. In practise
698 	 * they are almost never written, but if we are concerned about
699 	 * strict correctness, the `doasyncfree' flag should be set to zero.
700 	 *
701 	 * The test on `doasyncfree' should be changed to test a flag
702 	 * that shows whether the associated buffers and inodes have
703 	 * been written. The flag should be set when the cluster is
704 	 * started and cleared whenever the buffer or inode is flushed.
705 	 * We can then check below to see if it is set, and do the
706 	 * synchronous write only when it has been cleared.
707 	 */
708 	if (sbap != &ip->i_din1->di_db[0]) {
709 		if (doasyncfree)
710 			bdwrite(sbp);
711 		else
712 			bwrite(sbp);
713 	} else {
714 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
715 		if (!doasyncfree)
716 			ffs_update(vp, 1);
717 	}
718 	if (ssize < len) {
719 		if (doasyncfree)
720 			bdwrite(ebp);
721 		else
722 			bwrite(ebp);
723 	}
724 	/*
725 	 * Last, free the old blocks and assign the new blocks to the buffers.
726 	 */
727 #ifdef DEBUG
728 	if (prtrealloc)
729 		printf("\n\tnew:");
730 #endif
731 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
732 		if (!DOINGSOFTDEP(vp))
733 			ffs_blkfree(ump, fs, ump->um_devvp,
734 			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
735 			    fs->fs_bsize, ip->i_number, vp->v_type, NULL);
736 		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
737 #ifdef INVARIANTS
738 		if (!ffs_checkblk(ip,
739 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
740 			panic("ffs_reallocblks: unallocated block 3");
741 #endif
742 #ifdef DEBUG
743 		if (prtrealloc)
744 			printf(" %d,", blkno);
745 #endif
746 	}
747 #ifdef DEBUG
748 	if (prtrealloc) {
749 		prtrealloc--;
750 		printf("\n");
751 	}
752 #endif
753 	return (0);
754 
755 fail:
756 	if (ssize < len)
757 		brelse(ebp);
758 	if (sbap != &ip->i_din1->di_db[0])
759 		brelse(sbp);
760 	return (ENOSPC);
761 }
762 
763 static int
764 ffs_reallocblks_ufs2(ap)
765 	struct vop_reallocblks_args /* {
766 		struct vnode *a_vp;
767 		struct cluster_save *a_buflist;
768 	} */ *ap;
769 {
770 	struct fs *fs;
771 	struct inode *ip;
772 	struct vnode *vp;
773 	struct buf *sbp, *ebp;
774 	ufs2_daddr_t *bap, *sbap, *ebap;
775 	struct cluster_save *buflist;
776 	struct ufsmount *ump;
777 	ufs_lbn_t start_lbn, end_lbn;
778 	ufs2_daddr_t soff, newblk, blkno, pref;
779 	struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
780 	int i, cg, len, start_lvl, end_lvl, ssize;
781 
782 	vp = ap->a_vp;
783 	ip = VTOI(vp);
784 	ump = ITOUMP(ip);
785 	fs = ump->um_fs;
786 	/*
787 	 * If we are not tracking block clusters or if we have less than 4%
788 	 * free blocks left, then do not attempt to cluster. Running with
789 	 * less than 5% free block reserve is not recommended and those that
790 	 * choose to do so do not expect to have good file layout.
791 	 */
792 	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
793 		return (ENOSPC);
794 	buflist = ap->a_buflist;
795 	len = buflist->bs_nchildren;
796 	start_lbn = buflist->bs_children[0]->b_lblkno;
797 	end_lbn = start_lbn + len - 1;
798 #ifdef INVARIANTS
799 	for (i = 0; i < len; i++)
800 		if (!ffs_checkblk(ip,
801 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
802 			panic("ffs_reallocblks: unallocated block 1");
803 	for (i = 1; i < len; i++)
804 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
805 			panic("ffs_reallocblks: non-logical cluster");
806 	blkno = buflist->bs_children[0]->b_blkno;
807 	ssize = fsbtodb(fs, fs->fs_frag);
808 	for (i = 1; i < len - 1; i++)
809 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
810 			panic("ffs_reallocblks: non-physical cluster %d", i);
811 #endif
812 	/*
813 	 * If the cluster crosses the boundary for the first indirect
814 	 * block, do not move anything in it. Indirect blocks are
815 	 * usually initially laid out in a position between the data
816 	 * blocks. Block reallocation would usually destroy locality by
817 	 * moving the indirect block out of the way to make room for
818 	 * data blocks if we didn't compensate here. We should also do
819 	 * this for other indirect block boundaries, but it is only
820 	 * important for the first one.
821 	 */
822 	if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
823 		return (ENOSPC);
824 	/*
825 	 * If the latest allocation is in a new cylinder group, assume that
826 	 * the filesystem has decided to move and do not force it back to
827 	 * the previous cylinder group.
828 	 */
829 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
830 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
831 		return (ENOSPC);
832 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
833 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
834 		return (ENOSPC);
835 	/*
836 	 * Get the starting offset and block map for the first block.
837 	 */
838 	if (start_lvl == 0) {
839 		sbap = &ip->i_din2->di_db[0];
840 		soff = start_lbn;
841 	} else {
842 		idp = &start_ap[start_lvl - 1];
843 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
844 			brelse(sbp);
845 			return (ENOSPC);
846 		}
847 		sbap = (ufs2_daddr_t *)sbp->b_data;
848 		soff = idp->in_off;
849 	}
850 	/*
851 	 * If the block range spans two block maps, get the second map.
852 	 */
853 	ebap = NULL;
854 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
855 		ssize = len;
856 	} else {
857 #ifdef INVARIANTS
858 		if (start_lvl > 0 &&
859 		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
860 			panic("ffs_reallocblk: start == end");
861 #endif
862 		ssize = len - (idp->in_off + 1);
863 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
864 			goto fail;
865 		ebap = (ufs2_daddr_t *)ebp->b_data;
866 	}
867 	/*
868 	 * Find the preferred location for the cluster. If we have not
869 	 * previously failed at this endeavor, then follow our standard
870 	 * preference calculation. If we have failed at it, then pick up
871 	 * where we last ended our search.
872 	 */
873 	UFS_LOCK(ump);
874 	if (ip->i_nextclustercg == -1)
875 		pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
876 	else
877 		pref = cgdata(fs, ip->i_nextclustercg);
878 	/*
879 	 * Search the block map looking for an allocation of the desired size.
880 	 * To avoid wasting too much time, we limit the number of cylinder
881 	 * groups that we will search.
882 	 */
883 	cg = dtog(fs, pref);
884 	for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
885 		if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
886 			break;
887 		cg += 1;
888 		if (cg >= fs->fs_ncg)
889 			cg = 0;
890 	}
891 	/*
892 	 * If we have failed in our search, record where we gave up for
893 	 * next time. Otherwise, fall back to our usual search citerion.
894 	 */
895 	if (newblk == 0) {
896 		ip->i_nextclustercg = cg;
897 		UFS_UNLOCK(ump);
898 		goto fail;
899 	}
900 	ip->i_nextclustercg = -1;
901 	/*
902 	 * We have found a new contiguous block.
903 	 *
904 	 * First we have to replace the old block pointers with the new
905 	 * block pointers in the inode and indirect blocks associated
906 	 * with the file.
907 	 */
908 #ifdef DEBUG
909 	if (prtrealloc)
910 		printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number,
911 		    (intmax_t)start_lbn, (intmax_t)end_lbn);
912 #endif
913 	blkno = newblk;
914 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
915 		if (i == ssize) {
916 			bap = ebap;
917 			soff = -i;
918 		}
919 #ifdef INVARIANTS
920 		if (!ffs_checkblk(ip,
921 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
922 			panic("ffs_reallocblks: unallocated block 2");
923 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
924 			panic("ffs_reallocblks: alloc mismatch");
925 #endif
926 #ifdef DEBUG
927 		if (prtrealloc)
928 			printf(" %jd,", (intmax_t)*bap);
929 #endif
930 		if (DOINGSOFTDEP(vp)) {
931 			if (sbap == &ip->i_din2->di_db[0] && i < ssize)
932 				softdep_setup_allocdirect(ip, start_lbn + i,
933 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
934 				    buflist->bs_children[i]);
935 			else
936 				softdep_setup_allocindir_page(ip, start_lbn + i,
937 				    i < ssize ? sbp : ebp, soff + i, blkno,
938 				    *bap, buflist->bs_children[i]);
939 		}
940 		*bap++ = blkno;
941 	}
942 	/*
943 	 * Next we must write out the modified inode and indirect blocks.
944 	 * For strict correctness, the writes should be synchronous since
945 	 * the old block values may have been written to disk. In practise
946 	 * they are almost never written, but if we are concerned about
947 	 * strict correctness, the `doasyncfree' flag should be set to zero.
948 	 *
949 	 * The test on `doasyncfree' should be changed to test a flag
950 	 * that shows whether the associated buffers and inodes have
951 	 * been written. The flag should be set when the cluster is
952 	 * started and cleared whenever the buffer or inode is flushed.
953 	 * We can then check below to see if it is set, and do the
954 	 * synchronous write only when it has been cleared.
955 	 */
956 	if (sbap != &ip->i_din2->di_db[0]) {
957 		if (doasyncfree)
958 			bdwrite(sbp);
959 		else
960 			bwrite(sbp);
961 	} else {
962 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
963 		if (!doasyncfree)
964 			ffs_update(vp, 1);
965 	}
966 	if (ssize < len) {
967 		if (doasyncfree)
968 			bdwrite(ebp);
969 		else
970 			bwrite(ebp);
971 	}
972 	/*
973 	 * Last, free the old blocks and assign the new blocks to the buffers.
974 	 */
975 #ifdef DEBUG
976 	if (prtrealloc)
977 		printf("\n\tnew:");
978 #endif
979 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
980 		if (!DOINGSOFTDEP(vp))
981 			ffs_blkfree(ump, fs, ump->um_devvp,
982 			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
983 			    fs->fs_bsize, ip->i_number, vp->v_type, NULL);
984 		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
985 #ifdef INVARIANTS
986 		if (!ffs_checkblk(ip,
987 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
988 			panic("ffs_reallocblks: unallocated block 3");
989 #endif
990 #ifdef DEBUG
991 		if (prtrealloc)
992 			printf(" %jd,", (intmax_t)blkno);
993 #endif
994 	}
995 #ifdef DEBUG
996 	if (prtrealloc) {
997 		prtrealloc--;
998 		printf("\n");
999 	}
1000 #endif
1001 	return (0);
1002 
1003 fail:
1004 	if (ssize < len)
1005 		brelse(ebp);
1006 	if (sbap != &ip->i_din2->di_db[0])
1007 		brelse(sbp);
1008 	return (ENOSPC);
1009 }
1010 
1011 /*
1012  * Allocate an inode in the filesystem.
1013  *
1014  * If allocating a directory, use ffs_dirpref to select the inode.
1015  * If allocating in a directory, the following hierarchy is followed:
1016  *   1) allocate the preferred inode.
1017  *   2) allocate an inode in the same cylinder group.
1018  *   3) quadradically rehash into other cylinder groups, until an
1019  *      available inode is located.
1020  * If no inode preference is given the following hierarchy is used
1021  * to allocate an inode:
1022  *   1) allocate an inode in cylinder group 0.
1023  *   2) quadradically rehash into other cylinder groups, until an
1024  *      available inode is located.
1025  */
1026 int
1027 ffs_valloc(pvp, mode, cred, vpp)
1028 	struct vnode *pvp;
1029 	int mode;
1030 	struct ucred *cred;
1031 	struct vnode **vpp;
1032 {
1033 	struct inode *pip;
1034 	struct fs *fs;
1035 	struct inode *ip;
1036 	struct timespec ts;
1037 	struct ufsmount *ump;
1038 	ino_t ino, ipref;
1039 	u_int cg;
1040 	int error, error1, reclaimed;
1041 	static struct timeval lastfail;
1042 	static int curfail;
1043 
1044 	*vpp = NULL;
1045 	pip = VTOI(pvp);
1046 	ump = ITOUMP(pip);
1047 	fs = ump->um_fs;
1048 
1049 	UFS_LOCK(ump);
1050 	reclaimed = 0;
1051 retry:
1052 	if (fs->fs_cstotal.cs_nifree == 0)
1053 		goto noinodes;
1054 
1055 	if ((mode & IFMT) == IFDIR)
1056 		ipref = ffs_dirpref(pip);
1057 	else
1058 		ipref = pip->i_number;
1059 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
1060 		ipref = 0;
1061 	cg = ino_to_cg(fs, ipref);
1062 	/*
1063 	 * Track number of dirs created one after another
1064 	 * in a same cg without intervening by files.
1065 	 */
1066 	if ((mode & IFMT) == IFDIR) {
1067 		if (fs->fs_contigdirs[cg] < 255)
1068 			fs->fs_contigdirs[cg]++;
1069 	} else {
1070 		if (fs->fs_contigdirs[cg] > 0)
1071 			fs->fs_contigdirs[cg]--;
1072 	}
1073 	ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
1074 					(allocfcn_t *)ffs_nodealloccg);
1075 	if (ino == 0)
1076 		goto noinodes;
1077 	error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
1078 	if (error) {
1079 		error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
1080 		    FFSV_FORCEINSMQ);
1081 		ffs_vfree(pvp, ino, mode);
1082 		if (error1 == 0) {
1083 			ip = VTOI(*vpp);
1084 			if (ip->i_mode)
1085 				goto dup_alloc;
1086 			ip->i_flag |= IN_MODIFIED;
1087 			vput(*vpp);
1088 		}
1089 		return (error);
1090 	}
1091 	ip = VTOI(*vpp);
1092 	if (ip->i_mode) {
1093 dup_alloc:
1094 		printf("mode = 0%o, inum = %ju, fs = %s\n",
1095 		    ip->i_mode, (uintmax_t)ip->i_number, fs->fs_fsmnt);
1096 		panic("ffs_valloc: dup alloc");
1097 	}
1098 	if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) {  /* XXX */
1099 		printf("free inode %s/%lu had %ld blocks\n",
1100 		    fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
1101 		DIP_SET(ip, i_blocks, 0);
1102 	}
1103 	ip->i_flags = 0;
1104 	DIP_SET(ip, i_flags, 0);
1105 	/*
1106 	 * Set up a new generation number for this inode.
1107 	 */
1108 	while (ip->i_gen == 0 || ++ip->i_gen == 0)
1109 		ip->i_gen = arc4random();
1110 	DIP_SET(ip, i_gen, ip->i_gen);
1111 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1112 		vfs_timestamp(&ts);
1113 		ip->i_din2->di_birthtime = ts.tv_sec;
1114 		ip->i_din2->di_birthnsec = ts.tv_nsec;
1115 	}
1116 	ufs_prepare_reclaim(*vpp);
1117 	ip->i_flag = 0;
1118 	(*vpp)->v_vflag = 0;
1119 	(*vpp)->v_type = VNON;
1120 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1121 		(*vpp)->v_op = &ffs_vnodeops2;
1122 		ip->i_flag |= IN_UFS2;
1123 	} else {
1124 		(*vpp)->v_op = &ffs_vnodeops1;
1125 	}
1126 	return (0);
1127 noinodes:
1128 	if (reclaimed == 0) {
1129 		reclaimed = 1;
1130 		softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
1131 		goto retry;
1132 	}
1133 	UFS_UNLOCK(ump);
1134 	if (ppsratecheck(&lastfail, &curfail, 1)) {
1135 		ffs_fserr(fs, pip->i_number, "out of inodes");
1136 		uprintf("\n%s: create/symlink failed, no inodes free\n",
1137 		    fs->fs_fsmnt);
1138 	}
1139 	return (ENOSPC);
1140 }
1141 
1142 /*
1143  * Find a cylinder group to place a directory.
1144  *
1145  * The policy implemented by this algorithm is to allocate a
1146  * directory inode in the same cylinder group as its parent
1147  * directory, but also to reserve space for its files inodes
1148  * and data. Restrict the number of directories which may be
1149  * allocated one after another in the same cylinder group
1150  * without intervening allocation of files.
1151  *
1152  * If we allocate a first level directory then force allocation
1153  * in another cylinder group.
1154  */
1155 static ino_t
1156 ffs_dirpref(pip)
1157 	struct inode *pip;
1158 {
1159 	struct fs *fs;
1160 	int cg, prefcg, dirsize, cgsize;
1161 	u_int avgifree, avgbfree, avgndir, curdirsize;
1162 	u_int minifree, minbfree, maxndir;
1163 	u_int mincg, minndir;
1164 	u_int maxcontigdirs;
1165 
1166 	mtx_assert(UFS_MTX(ITOUMP(pip)), MA_OWNED);
1167 	fs = ITOFS(pip);
1168 
1169 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1170 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1171 	avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1172 
1173 	/*
1174 	 * Force allocation in another cg if creating a first level dir.
1175 	 */
1176 	ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1177 	if (ITOV(pip)->v_vflag & VV_ROOT) {
1178 		prefcg = arc4random() % fs->fs_ncg;
1179 		mincg = prefcg;
1180 		minndir = fs->fs_ipg;
1181 		for (cg = prefcg; cg < fs->fs_ncg; cg++)
1182 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1183 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1184 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1185 				mincg = cg;
1186 				minndir = fs->fs_cs(fs, cg).cs_ndir;
1187 			}
1188 		for (cg = 0; cg < prefcg; cg++)
1189 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1190 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1191 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1192 				mincg = cg;
1193 				minndir = fs->fs_cs(fs, cg).cs_ndir;
1194 			}
1195 		return ((ino_t)(fs->fs_ipg * mincg));
1196 	}
1197 
1198 	/*
1199 	 * Count various limits which used for
1200 	 * optimal allocation of a directory inode.
1201 	 */
1202 	maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1203 	minifree = avgifree - avgifree / 4;
1204 	if (minifree < 1)
1205 		minifree = 1;
1206 	minbfree = avgbfree - avgbfree / 4;
1207 	if (minbfree < 1)
1208 		minbfree = 1;
1209 	cgsize = fs->fs_fsize * fs->fs_fpg;
1210 	dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1211 	curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1212 	if (dirsize < curdirsize)
1213 		dirsize = curdirsize;
1214 	if (dirsize <= 0)
1215 		maxcontigdirs = 0;		/* dirsize overflowed */
1216 	else
1217 		maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1218 	if (fs->fs_avgfpdir > 0)
1219 		maxcontigdirs = min(maxcontigdirs,
1220 				    fs->fs_ipg / fs->fs_avgfpdir);
1221 	if (maxcontigdirs == 0)
1222 		maxcontigdirs = 1;
1223 
1224 	/*
1225 	 * Limit number of dirs in one cg and reserve space for
1226 	 * regular files, but only if we have no deficit in
1227 	 * inodes or space.
1228 	 *
1229 	 * We are trying to find a suitable cylinder group nearby
1230 	 * our preferred cylinder group to place a new directory.
1231 	 * We scan from our preferred cylinder group forward looking
1232 	 * for a cylinder group that meets our criterion. If we get
1233 	 * to the final cylinder group and do not find anything,
1234 	 * we start scanning forwards from the beginning of the
1235 	 * filesystem. While it might seem sensible to start scanning
1236 	 * backwards or even to alternate looking forward and backward,
1237 	 * this approach fails badly when the filesystem is nearly full.
1238 	 * Specifically, we first search all the areas that have no space
1239 	 * and finally try the one preceding that. We repeat this on
1240 	 * every request and in the case of the final block end up
1241 	 * searching the entire filesystem. By jumping to the front
1242 	 * of the filesystem, our future forward searches always look
1243 	 * in new cylinder groups so finds every possible block after
1244 	 * one pass over the filesystem.
1245 	 */
1246 	prefcg = ino_to_cg(fs, pip->i_number);
1247 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1248 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1249 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1250 		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1251 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1252 				return ((ino_t)(fs->fs_ipg * cg));
1253 		}
1254 	for (cg = 0; cg < prefcg; cg++)
1255 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1256 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1257 		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1258 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1259 				return ((ino_t)(fs->fs_ipg * cg));
1260 		}
1261 	/*
1262 	 * This is a backstop when we have deficit in space.
1263 	 */
1264 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1265 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1266 			return ((ino_t)(fs->fs_ipg * cg));
1267 	for (cg = 0; cg < prefcg; cg++)
1268 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1269 			break;
1270 	return ((ino_t)(fs->fs_ipg * cg));
1271 }
1272 
1273 /*
1274  * Select the desired position for the next block in a file.  The file is
1275  * logically divided into sections. The first section is composed of the
1276  * direct blocks and the next fs_maxbpg blocks. Each additional section
1277  * contains fs_maxbpg blocks.
1278  *
1279  * If no blocks have been allocated in the first section, the policy is to
1280  * request a block in the same cylinder group as the inode that describes
1281  * the file. The first indirect is allocated immediately following the last
1282  * direct block and the data blocks for the first indirect immediately
1283  * follow it.
1284  *
1285  * If no blocks have been allocated in any other section, the indirect
1286  * block(s) are allocated in the same cylinder group as its inode in an
1287  * area reserved immediately following the inode blocks. The policy for
1288  * the data blocks is to place them in a cylinder group with a greater than
1289  * average number of free blocks. An appropriate cylinder group is found
1290  * by using a rotor that sweeps the cylinder groups. When a new group of
1291  * blocks is needed, the sweep begins in the cylinder group following the
1292  * cylinder group from which the previous allocation was made. The sweep
1293  * continues until a cylinder group with greater than the average number
1294  * of free blocks is found. If the allocation is for the first block in an
1295  * indirect block or the previous block is a hole, then the information on
1296  * the previous allocation is unavailable; here a best guess is made based
1297  * on the logical block number being allocated.
1298  *
1299  * If a section is already partially allocated, the policy is to
1300  * allocate blocks contiguously within the section if possible.
1301  */
1302 ufs2_daddr_t
1303 ffs_blkpref_ufs1(ip, lbn, indx, bap)
1304 	struct inode *ip;
1305 	ufs_lbn_t lbn;
1306 	int indx;
1307 	ufs1_daddr_t *bap;
1308 {
1309 	struct fs *fs;
1310 	u_int cg, inocg;
1311 	u_int avgbfree, startcg;
1312 	ufs2_daddr_t pref;
1313 
1314 	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1315 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1316 	fs = ITOFS(ip);
1317 	/*
1318 	 * Allocation of indirect blocks is indicated by passing negative
1319 	 * values in indx: -1 for single indirect, -2 for double indirect,
1320 	 * -3 for triple indirect. As noted below, we attempt to allocate
1321 	 * the first indirect inline with the file data. For all later
1322 	 * indirect blocks, the data is often allocated in other cylinder
1323 	 * groups. However to speed random file access and to speed up
1324 	 * fsck, the filesystem reserves the first fs_metaspace blocks
1325 	 * (typically half of fs_minfree) of the data area of each cylinder
1326 	 * group to hold these later indirect blocks.
1327 	 */
1328 	inocg = ino_to_cg(fs, ip->i_number);
1329 	if (indx < 0) {
1330 		/*
1331 		 * Our preference for indirect blocks is the zone at the
1332 		 * beginning of the inode's cylinder group data area that
1333 		 * we try to reserve for indirect blocks.
1334 		 */
1335 		pref = cgmeta(fs, inocg);
1336 		/*
1337 		 * If we are allocating the first indirect block, try to
1338 		 * place it immediately following the last direct block.
1339 		 */
1340 		if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1341 		    ip->i_din1->di_db[UFS_NDADDR - 1] != 0)
1342 			pref = ip->i_din1->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1343 		return (pref);
1344 	}
1345 	/*
1346 	 * If we are allocating the first data block in the first indirect
1347 	 * block and the indirect has been allocated in the data block area,
1348 	 * try to place it immediately following the indirect block.
1349 	 */
1350 	if (lbn == UFS_NDADDR) {
1351 		pref = ip->i_din1->di_ib[0];
1352 		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1353 		    pref < cgbase(fs, inocg + 1))
1354 			return (pref + fs->fs_frag);
1355 	}
1356 	/*
1357 	 * If we are at the beginning of a file, or we have already allocated
1358 	 * the maximum number of blocks per cylinder group, or we do not
1359 	 * have a block allocated immediately preceding us, then we need
1360 	 * to decide where to start allocating new blocks.
1361 	 */
1362 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1363 		/*
1364 		 * If we are allocating a directory data block, we want
1365 		 * to place it in the metadata area.
1366 		 */
1367 		if ((ip->i_mode & IFMT) == IFDIR)
1368 			return (cgmeta(fs, inocg));
1369 		/*
1370 		 * Until we fill all the direct and all the first indirect's
1371 		 * blocks, we try to allocate in the data area of the inode's
1372 		 * cylinder group.
1373 		 */
1374 		if (lbn < UFS_NDADDR + NINDIR(fs))
1375 			return (cgdata(fs, inocg));
1376 		/*
1377 		 * Find a cylinder with greater than average number of
1378 		 * unused data blocks.
1379 		 */
1380 		if (indx == 0 || bap[indx - 1] == 0)
1381 			startcg = inocg + lbn / fs->fs_maxbpg;
1382 		else
1383 			startcg = dtog(fs, bap[indx - 1]) + 1;
1384 		startcg %= fs->fs_ncg;
1385 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1386 		for (cg = startcg; cg < fs->fs_ncg; cg++)
1387 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1388 				fs->fs_cgrotor = cg;
1389 				return (cgdata(fs, cg));
1390 			}
1391 		for (cg = 0; cg <= startcg; cg++)
1392 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1393 				fs->fs_cgrotor = cg;
1394 				return (cgdata(fs, cg));
1395 			}
1396 		return (0);
1397 	}
1398 	/*
1399 	 * Otherwise, we just always try to lay things out contiguously.
1400 	 */
1401 	return (bap[indx - 1] + fs->fs_frag);
1402 }
1403 
1404 /*
1405  * Same as above, but for UFS2
1406  */
1407 ufs2_daddr_t
1408 ffs_blkpref_ufs2(ip, lbn, indx, bap)
1409 	struct inode *ip;
1410 	ufs_lbn_t lbn;
1411 	int indx;
1412 	ufs2_daddr_t *bap;
1413 {
1414 	struct fs *fs;
1415 	u_int cg, inocg;
1416 	u_int avgbfree, startcg;
1417 	ufs2_daddr_t pref;
1418 
1419 	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1420 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1421 	fs = ITOFS(ip);
1422 	/*
1423 	 * Allocation of indirect blocks is indicated by passing negative
1424 	 * values in indx: -1 for single indirect, -2 for double indirect,
1425 	 * -3 for triple indirect. As noted below, we attempt to allocate
1426 	 * the first indirect inline with the file data. For all later
1427 	 * indirect blocks, the data is often allocated in other cylinder
1428 	 * groups. However to speed random file access and to speed up
1429 	 * fsck, the filesystem reserves the first fs_metaspace blocks
1430 	 * (typically half of fs_minfree) of the data area of each cylinder
1431 	 * group to hold these later indirect blocks.
1432 	 */
1433 	inocg = ino_to_cg(fs, ip->i_number);
1434 	if (indx < 0) {
1435 		/*
1436 		 * Our preference for indirect blocks is the zone at the
1437 		 * beginning of the inode's cylinder group data area that
1438 		 * we try to reserve for indirect blocks.
1439 		 */
1440 		pref = cgmeta(fs, inocg);
1441 		/*
1442 		 * If we are allocating the first indirect block, try to
1443 		 * place it immediately following the last direct block.
1444 		 */
1445 		if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1446 		    ip->i_din2->di_db[UFS_NDADDR - 1] != 0)
1447 			pref = ip->i_din2->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1448 		return (pref);
1449 	}
1450 	/*
1451 	 * If we are allocating the first data block in the first indirect
1452 	 * block and the indirect has been allocated in the data block area,
1453 	 * try to place it immediately following the indirect block.
1454 	 */
1455 	if (lbn == UFS_NDADDR) {
1456 		pref = ip->i_din2->di_ib[0];
1457 		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1458 		    pref < cgbase(fs, inocg + 1))
1459 			return (pref + fs->fs_frag);
1460 	}
1461 	/*
1462 	 * If we are at the beginning of a file, or we have already allocated
1463 	 * the maximum number of blocks per cylinder group, or we do not
1464 	 * have a block allocated immediately preceding us, then we need
1465 	 * to decide where to start allocating new blocks.
1466 	 */
1467 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1468 		/*
1469 		 * If we are allocating a directory data block, we want
1470 		 * to place it in the metadata area.
1471 		 */
1472 		if ((ip->i_mode & IFMT) == IFDIR)
1473 			return (cgmeta(fs, inocg));
1474 		/*
1475 		 * Until we fill all the direct and all the first indirect's
1476 		 * blocks, we try to allocate in the data area of the inode's
1477 		 * cylinder group.
1478 		 */
1479 		if (lbn < UFS_NDADDR + NINDIR(fs))
1480 			return (cgdata(fs, inocg));
1481 		/*
1482 		 * Find a cylinder with greater than average number of
1483 		 * unused data blocks.
1484 		 */
1485 		if (indx == 0 || bap[indx - 1] == 0)
1486 			startcg = inocg + lbn / fs->fs_maxbpg;
1487 		else
1488 			startcg = dtog(fs, bap[indx - 1]) + 1;
1489 		startcg %= fs->fs_ncg;
1490 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1491 		for (cg = startcg; cg < fs->fs_ncg; cg++)
1492 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1493 				fs->fs_cgrotor = cg;
1494 				return (cgdata(fs, cg));
1495 			}
1496 		for (cg = 0; cg <= startcg; cg++)
1497 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1498 				fs->fs_cgrotor = cg;
1499 				return (cgdata(fs, cg));
1500 			}
1501 		return (0);
1502 	}
1503 	/*
1504 	 * Otherwise, we just always try to lay things out contiguously.
1505 	 */
1506 	return (bap[indx - 1] + fs->fs_frag);
1507 }
1508 
1509 /*
1510  * Implement the cylinder overflow algorithm.
1511  *
1512  * The policy implemented by this algorithm is:
1513  *   1) allocate the block in its requested cylinder group.
1514  *   2) quadradically rehash on the cylinder group number.
1515  *   3) brute force search for a free block.
1516  *
1517  * Must be called with the UFS lock held.  Will release the lock on success
1518  * and return with it held on failure.
1519  */
1520 /*VARARGS5*/
1521 static ufs2_daddr_t
1522 ffs_hashalloc(ip, cg, pref, size, rsize, allocator)
1523 	struct inode *ip;
1524 	u_int cg;
1525 	ufs2_daddr_t pref;
1526 	int size;	/* Search size for data blocks, mode for inodes */
1527 	int rsize;	/* Real allocated size. */
1528 	allocfcn_t *allocator;
1529 {
1530 	struct fs *fs;
1531 	ufs2_daddr_t result;
1532 	u_int i, icg = cg;
1533 
1534 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1535 #ifdef INVARIANTS
1536 	if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1537 		panic("ffs_hashalloc: allocation on suspended filesystem");
1538 #endif
1539 	fs = ITOFS(ip);
1540 	/*
1541 	 * 1: preferred cylinder group
1542 	 */
1543 	result = (*allocator)(ip, cg, pref, size, rsize);
1544 	if (result)
1545 		return (result);
1546 	/*
1547 	 * 2: quadratic rehash
1548 	 */
1549 	for (i = 1; i < fs->fs_ncg; i *= 2) {
1550 		cg += i;
1551 		if (cg >= fs->fs_ncg)
1552 			cg -= fs->fs_ncg;
1553 		result = (*allocator)(ip, cg, 0, size, rsize);
1554 		if (result)
1555 			return (result);
1556 	}
1557 	/*
1558 	 * 3: brute force search
1559 	 * Note that we start at i == 2, since 0 was checked initially,
1560 	 * and 1 is always checked in the quadratic rehash.
1561 	 */
1562 	cg = (icg + 2) % fs->fs_ncg;
1563 	for (i = 2; i < fs->fs_ncg; i++) {
1564 		result = (*allocator)(ip, cg, 0, size, rsize);
1565 		if (result)
1566 			return (result);
1567 		cg++;
1568 		if (cg == fs->fs_ncg)
1569 			cg = 0;
1570 	}
1571 	return (0);
1572 }
1573 
1574 /*
1575  * Determine whether a fragment can be extended.
1576  *
1577  * Check to see if the necessary fragments are available, and
1578  * if they are, allocate them.
1579  */
1580 static ufs2_daddr_t
1581 ffs_fragextend(ip, cg, bprev, osize, nsize)
1582 	struct inode *ip;
1583 	u_int cg;
1584 	ufs2_daddr_t bprev;
1585 	int osize, nsize;
1586 {
1587 	struct fs *fs;
1588 	struct cg *cgp;
1589 	struct buf *bp;
1590 	struct ufsmount *ump;
1591 	int nffree;
1592 	long bno;
1593 	int frags, bbase;
1594 	int i, error;
1595 	u_int8_t *blksfree;
1596 
1597 	ump = ITOUMP(ip);
1598 	fs = ump->um_fs;
1599 	if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1600 		return (0);
1601 	frags = numfrags(fs, nsize);
1602 	bbase = fragnum(fs, bprev);
1603 	if (bbase > fragnum(fs, (bprev + frags - 1))) {
1604 		/* cannot extend across a block boundary */
1605 		return (0);
1606 	}
1607 	UFS_UNLOCK(ump);
1608 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0)
1609 		goto fail;
1610 	bno = dtogd(fs, bprev);
1611 	blksfree = cg_blksfree(cgp);
1612 	for (i = numfrags(fs, osize); i < frags; i++)
1613 		if (isclr(blksfree, bno + i))
1614 			goto fail;
1615 	/*
1616 	 * the current fragment can be extended
1617 	 * deduct the count on fragment being extended into
1618 	 * increase the count on the remaining fragment (if any)
1619 	 * allocate the extended piece
1620 	 */
1621 	for (i = frags; i < fs->fs_frag - bbase; i++)
1622 		if (isclr(blksfree, bno + i))
1623 			break;
1624 	cgp->cg_frsum[i - numfrags(fs, osize)]--;
1625 	if (i != frags)
1626 		cgp->cg_frsum[i - frags]++;
1627 	for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1628 		clrbit(blksfree, bno + i);
1629 		cgp->cg_cs.cs_nffree--;
1630 		nffree++;
1631 	}
1632 	UFS_LOCK(ump);
1633 	fs->fs_cstotal.cs_nffree -= nffree;
1634 	fs->fs_cs(fs, cg).cs_nffree -= nffree;
1635 	fs->fs_fmod = 1;
1636 	ACTIVECLEAR(fs, cg);
1637 	UFS_UNLOCK(ump);
1638 	if (DOINGSOFTDEP(ITOV(ip)))
1639 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
1640 		    frags, numfrags(fs, osize));
1641 	bdwrite(bp);
1642 	return (bprev);
1643 
1644 fail:
1645 	brelse(bp);
1646 	UFS_LOCK(ump);
1647 	return (0);
1648 
1649 }
1650 
1651 /*
1652  * Determine whether a block can be allocated.
1653  *
1654  * Check to see if a block of the appropriate size is available,
1655  * and if it is, allocate it.
1656  */
1657 static ufs2_daddr_t
1658 ffs_alloccg(ip, cg, bpref, size, rsize)
1659 	struct inode *ip;
1660 	u_int cg;
1661 	ufs2_daddr_t bpref;
1662 	int size;
1663 	int rsize;
1664 {
1665 	struct fs *fs;
1666 	struct cg *cgp;
1667 	struct buf *bp;
1668 	struct ufsmount *ump;
1669 	ufs1_daddr_t bno;
1670 	ufs2_daddr_t blkno;
1671 	int i, allocsiz, error, frags;
1672 	u_int8_t *blksfree;
1673 
1674 	ump = ITOUMP(ip);
1675 	fs = ump->um_fs;
1676 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1677 		return (0);
1678 	UFS_UNLOCK(ump);
1679 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0 ||
1680 	   (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1681 		goto fail;
1682 	if (size == fs->fs_bsize) {
1683 		UFS_LOCK(ump);
1684 		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1685 		ACTIVECLEAR(fs, cg);
1686 		UFS_UNLOCK(ump);
1687 		bdwrite(bp);
1688 		return (blkno);
1689 	}
1690 	/*
1691 	 * check to see if any fragments are already available
1692 	 * allocsiz is the size which will be allocated, hacking
1693 	 * it down to a smaller size if necessary
1694 	 */
1695 	blksfree = cg_blksfree(cgp);
1696 	frags = numfrags(fs, size);
1697 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1698 		if (cgp->cg_frsum[allocsiz] != 0)
1699 			break;
1700 	if (allocsiz == fs->fs_frag) {
1701 		/*
1702 		 * no fragments were available, so a block will be
1703 		 * allocated, and hacked up
1704 		 */
1705 		if (cgp->cg_cs.cs_nbfree == 0)
1706 			goto fail;
1707 		UFS_LOCK(ump);
1708 		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1709 		ACTIVECLEAR(fs, cg);
1710 		UFS_UNLOCK(ump);
1711 		bdwrite(bp);
1712 		return (blkno);
1713 	}
1714 	KASSERT(size == rsize,
1715 	    ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
1716 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1717 	if (bno < 0)
1718 		goto fail;
1719 	for (i = 0; i < frags; i++)
1720 		clrbit(blksfree, bno + i);
1721 	cgp->cg_cs.cs_nffree -= frags;
1722 	cgp->cg_frsum[allocsiz]--;
1723 	if (frags != allocsiz)
1724 		cgp->cg_frsum[allocsiz - frags]++;
1725 	UFS_LOCK(ump);
1726 	fs->fs_cstotal.cs_nffree -= frags;
1727 	fs->fs_cs(fs, cg).cs_nffree -= frags;
1728 	fs->fs_fmod = 1;
1729 	blkno = cgbase(fs, cg) + bno;
1730 	ACTIVECLEAR(fs, cg);
1731 	UFS_UNLOCK(ump);
1732 	if (DOINGSOFTDEP(ITOV(ip)))
1733 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
1734 	bdwrite(bp);
1735 	return (blkno);
1736 
1737 fail:
1738 	brelse(bp);
1739 	UFS_LOCK(ump);
1740 	return (0);
1741 }
1742 
1743 /*
1744  * Allocate a block in a cylinder group.
1745  *
1746  * This algorithm implements the following policy:
1747  *   1) allocate the requested block.
1748  *   2) allocate a rotationally optimal block in the same cylinder.
1749  *   3) allocate the next available block on the block rotor for the
1750  *      specified cylinder group.
1751  * Note that this routine only allocates fs_bsize blocks; these
1752  * blocks may be fragmented by the routine that allocates them.
1753  */
1754 static ufs2_daddr_t
1755 ffs_alloccgblk(ip, bp, bpref, size)
1756 	struct inode *ip;
1757 	struct buf *bp;
1758 	ufs2_daddr_t bpref;
1759 	int size;
1760 {
1761 	struct fs *fs;
1762 	struct cg *cgp;
1763 	struct ufsmount *ump;
1764 	ufs1_daddr_t bno;
1765 	ufs2_daddr_t blkno;
1766 	u_int8_t *blksfree;
1767 	int i, cgbpref;
1768 
1769 	ump = ITOUMP(ip);
1770 	fs = ump->um_fs;
1771 	mtx_assert(UFS_MTX(ump), MA_OWNED);
1772 	cgp = (struct cg *)bp->b_data;
1773 	blksfree = cg_blksfree(cgp);
1774 	if (bpref == 0) {
1775 		bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
1776 	} else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1777 		/* map bpref to correct zone in this cg */
1778 		if (bpref < cgdata(fs, cgbpref))
1779 			bpref = cgmeta(fs, cgp->cg_cgx);
1780 		else
1781 			bpref = cgdata(fs, cgp->cg_cgx);
1782 	}
1783 	/*
1784 	 * if the requested block is available, use it
1785 	 */
1786 	bno = dtogd(fs, blknum(fs, bpref));
1787 	if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1788 		goto gotit;
1789 	/*
1790 	 * Take the next available block in this cylinder group.
1791 	 */
1792 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1793 	if (bno < 0)
1794 		return (0);
1795 	/* Update cg_rotor only if allocated from the data zone */
1796 	if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1797 		cgp->cg_rotor = bno;
1798 gotit:
1799 	blkno = fragstoblks(fs, bno);
1800 	ffs_clrblock(fs, blksfree, (long)blkno);
1801 	ffs_clusteracct(fs, cgp, blkno, -1);
1802 	cgp->cg_cs.cs_nbfree--;
1803 	fs->fs_cstotal.cs_nbfree--;
1804 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1805 	fs->fs_fmod = 1;
1806 	blkno = cgbase(fs, cgp->cg_cgx) + bno;
1807 	/*
1808 	 * If the caller didn't want the whole block free the frags here.
1809 	 */
1810 	size = numfrags(fs, size);
1811 	if (size != fs->fs_frag) {
1812 		bno = dtogd(fs, blkno);
1813 		for (i = size; i < fs->fs_frag; i++)
1814 			setbit(blksfree, bno + i);
1815 		i = fs->fs_frag - size;
1816 		cgp->cg_cs.cs_nffree += i;
1817 		fs->fs_cstotal.cs_nffree += i;
1818 		fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
1819 		fs->fs_fmod = 1;
1820 		cgp->cg_frsum[i]++;
1821 	}
1822 	/* XXX Fixme. */
1823 	UFS_UNLOCK(ump);
1824 	if (DOINGSOFTDEP(ITOV(ip)))
1825 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno,
1826 		    size, 0);
1827 	UFS_LOCK(ump);
1828 	return (blkno);
1829 }
1830 
1831 /*
1832  * Determine whether a cluster can be allocated.
1833  *
1834  * We do not currently check for optimal rotational layout if there
1835  * are multiple choices in the same cylinder group. Instead we just
1836  * take the first one that we find following bpref.
1837  */
1838 static ufs2_daddr_t
1839 ffs_clusteralloc(ip, cg, bpref, len)
1840 	struct inode *ip;
1841 	u_int cg;
1842 	ufs2_daddr_t bpref;
1843 	int len;
1844 {
1845 	struct fs *fs;
1846 	struct cg *cgp;
1847 	struct buf *bp;
1848 	struct ufsmount *ump;
1849 	int i, run, bit, map, got, error;
1850 	ufs2_daddr_t bno;
1851 	u_char *mapp;
1852 	int32_t *lp;
1853 	u_int8_t *blksfree;
1854 
1855 	ump = ITOUMP(ip);
1856 	fs = ump->um_fs;
1857 	if (fs->fs_maxcluster[cg] < len)
1858 		return (0);
1859 	UFS_UNLOCK(ump);
1860 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) {
1861 		UFS_LOCK(ump);
1862 		return (0);
1863 	}
1864 	/*
1865 	 * Check to see if a cluster of the needed size (or bigger) is
1866 	 * available in this cylinder group.
1867 	 */
1868 	lp = &cg_clustersum(cgp)[len];
1869 	for (i = len; i <= fs->fs_contigsumsize; i++)
1870 		if (*lp++ > 0)
1871 			break;
1872 	if (i > fs->fs_contigsumsize) {
1873 		/*
1874 		 * This is the first time looking for a cluster in this
1875 		 * cylinder group. Update the cluster summary information
1876 		 * to reflect the true maximum sized cluster so that
1877 		 * future cluster allocation requests can avoid reading
1878 		 * the cylinder group map only to find no clusters.
1879 		 */
1880 		lp = &cg_clustersum(cgp)[len - 1];
1881 		for (i = len - 1; i > 0; i--)
1882 			if (*lp-- > 0)
1883 				break;
1884 		UFS_LOCK(ump);
1885 		fs->fs_maxcluster[cg] = i;
1886 		brelse(bp);
1887 		return (0);
1888 	}
1889 	/*
1890 	 * Search the cluster map to find a big enough cluster.
1891 	 * We take the first one that we find, even if it is larger
1892 	 * than we need as we prefer to get one close to the previous
1893 	 * block allocation. We do not search before the current
1894 	 * preference point as we do not want to allocate a block
1895 	 * that is allocated before the previous one (as we will
1896 	 * then have to wait for another pass of the elevator
1897 	 * algorithm before it will be read). We prefer to fail and
1898 	 * be recalled to try an allocation in the next cylinder group.
1899 	 */
1900 	if (dtog(fs, bpref) != cg)
1901 		bpref = cgdata(fs, cg);
1902 	else
1903 		bpref = blknum(fs, bpref);
1904 	bpref = fragstoblks(fs, dtogd(fs, bpref));
1905 	mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1906 	map = *mapp++;
1907 	bit = 1 << (bpref % NBBY);
1908 	for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1909 		if ((map & bit) == 0) {
1910 			run = 0;
1911 		} else {
1912 			run++;
1913 			if (run == len)
1914 				break;
1915 		}
1916 		if ((got & (NBBY - 1)) != (NBBY - 1)) {
1917 			bit <<= 1;
1918 		} else {
1919 			map = *mapp++;
1920 			bit = 1;
1921 		}
1922 	}
1923 	if (got >= cgp->cg_nclusterblks) {
1924 		UFS_LOCK(ump);
1925 		brelse(bp);
1926 		return (0);
1927 	}
1928 	/*
1929 	 * Allocate the cluster that we have found.
1930 	 */
1931 	blksfree = cg_blksfree(cgp);
1932 	for (i = 1; i <= len; i++)
1933 		if (!ffs_isblock(fs, blksfree, got - run + i))
1934 			panic("ffs_clusteralloc: map mismatch");
1935 	bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
1936 	if (dtog(fs, bno) != cg)
1937 		panic("ffs_clusteralloc: allocated out of group");
1938 	len = blkstofrags(fs, len);
1939 	UFS_LOCK(ump);
1940 	for (i = 0; i < len; i += fs->fs_frag)
1941 		if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
1942 			panic("ffs_clusteralloc: lost block");
1943 	ACTIVECLEAR(fs, cg);
1944 	UFS_UNLOCK(ump);
1945 	bdwrite(bp);
1946 	return (bno);
1947 }
1948 
1949 static inline struct buf *
1950 getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags)
1951 {
1952 	struct fs *fs;
1953 
1954 	fs = ITOFS(ip);
1955 	return (getblk(ITODEVVP(ip), fsbtodb(fs, ino_to_fsba(fs,
1956 	    cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
1957 	    gbflags));
1958 }
1959 
1960 /*
1961  * Determine whether an inode can be allocated.
1962  *
1963  * Check to see if an inode is available, and if it is,
1964  * allocate it using the following policy:
1965  *   1) allocate the requested inode.
1966  *   2) allocate the next available inode after the requested
1967  *      inode in the specified cylinder group.
1968  */
1969 static ufs2_daddr_t
1970 ffs_nodealloccg(ip, cg, ipref, mode, unused)
1971 	struct inode *ip;
1972 	u_int cg;
1973 	ufs2_daddr_t ipref;
1974 	int mode;
1975 	int unused;
1976 {
1977 	struct fs *fs;
1978 	struct cg *cgp;
1979 	struct buf *bp, *ibp;
1980 	struct ufsmount *ump;
1981 	u_int8_t *inosused, *loc;
1982 	struct ufs2_dinode *dp2;
1983 	int error, start, len, i;
1984 	u_int32_t old_initediblk;
1985 
1986 	ump = ITOUMP(ip);
1987 	fs = ump->um_fs;
1988 check_nifree:
1989 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
1990 		return (0);
1991 	UFS_UNLOCK(ump);
1992 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) {
1993 		UFS_LOCK(ump);
1994 		return (0);
1995 	}
1996 restart:
1997 	if (cgp->cg_cs.cs_nifree == 0) {
1998 		brelse(bp);
1999 		UFS_LOCK(ump);
2000 		return (0);
2001 	}
2002 	inosused = cg_inosused(cgp);
2003 	if (ipref) {
2004 		ipref %= fs->fs_ipg;
2005 		if (isclr(inosused, ipref))
2006 			goto gotit;
2007 	}
2008 	start = cgp->cg_irotor / NBBY;
2009 	len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
2010 	loc = memcchr(&inosused[start], 0xff, len);
2011 	if (loc == NULL) {
2012 		len = start + 1;
2013 		start = 0;
2014 		loc = memcchr(&inosused[start], 0xff, len);
2015 		if (loc == NULL) {
2016 			printf("cg = %d, irotor = %ld, fs = %s\n",
2017 			    cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
2018 			panic("ffs_nodealloccg: map corrupted");
2019 			/* NOTREACHED */
2020 		}
2021 	}
2022 	ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1;
2023 gotit:
2024 	/*
2025 	 * Check to see if we need to initialize more inodes.
2026 	 */
2027 	if (fs->fs_magic == FS_UFS2_MAGIC &&
2028 	    ipref + INOPB(fs) > cgp->cg_initediblk &&
2029 	    cgp->cg_initediblk < cgp->cg_niblk) {
2030 		old_initediblk = cgp->cg_initediblk;
2031 
2032 		/*
2033 		 * Free the cylinder group lock before writing the
2034 		 * initialized inode block.  Entering the
2035 		 * babarrierwrite() with the cylinder group lock
2036 		 * causes lock order violation between the lock and
2037 		 * snaplk.
2038 		 *
2039 		 * Another thread can decide to initialize the same
2040 		 * inode block, but whichever thread first gets the
2041 		 * cylinder group lock after writing the newly
2042 		 * allocated inode block will update it and the other
2043 		 * will realize that it has lost and leave the
2044 		 * cylinder group unchanged.
2045 		 */
2046 		ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
2047 		brelse(bp);
2048 		if (ibp == NULL) {
2049 			/*
2050 			 * The inode block buffer is already owned by
2051 			 * another thread, which must initialize it.
2052 			 * Wait on the buffer to allow another thread
2053 			 * to finish the updates, with dropped cg
2054 			 * buffer lock, then retry.
2055 			 */
2056 			ibp = getinobuf(ip, cg, old_initediblk, 0);
2057 			brelse(ibp);
2058 			UFS_LOCK(ump);
2059 			goto check_nifree;
2060 		}
2061 		bzero(ibp->b_data, (int)fs->fs_bsize);
2062 		dp2 = (struct ufs2_dinode *)(ibp->b_data);
2063 		for (i = 0; i < INOPB(fs); i++) {
2064 			while (dp2->di_gen == 0)
2065 				dp2->di_gen = arc4random();
2066 			dp2++;
2067 		}
2068 		/*
2069 		 * Rather than adding a soft updates dependency to ensure
2070 		 * that the new inode block is written before it is claimed
2071 		 * by the cylinder group map, we just do a barrier write
2072 		 * here. The barrier write will ensure that the inode block
2073 		 * gets written before the updated cylinder group map can be
2074 		 * written. The barrier write should only slow down bulk
2075 		 * loading of newly created filesystems.
2076 		 */
2077 		babarrierwrite(ibp);
2078 
2079 		/*
2080 		 * After the inode block is written, try to update the
2081 		 * cg initediblk pointer.  If another thread beat us
2082 		 * to it, then leave it unchanged as the other thread
2083 		 * has already set it correctly.
2084 		 */
2085 		error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp);
2086 		UFS_LOCK(ump);
2087 		ACTIVECLEAR(fs, cg);
2088 		UFS_UNLOCK(ump);
2089 		if (error != 0)
2090 			return (error);
2091 		if (cgp->cg_initediblk == old_initediblk)
2092 			cgp->cg_initediblk += INOPB(fs);
2093 		goto restart;
2094 	}
2095 	cgp->cg_irotor = ipref;
2096 	UFS_LOCK(ump);
2097 	ACTIVECLEAR(fs, cg);
2098 	setbit(inosused, ipref);
2099 	cgp->cg_cs.cs_nifree--;
2100 	fs->fs_cstotal.cs_nifree--;
2101 	fs->fs_cs(fs, cg).cs_nifree--;
2102 	fs->fs_fmod = 1;
2103 	if ((mode & IFMT) == IFDIR) {
2104 		cgp->cg_cs.cs_ndir++;
2105 		fs->fs_cstotal.cs_ndir++;
2106 		fs->fs_cs(fs, cg).cs_ndir++;
2107 	}
2108 	UFS_UNLOCK(ump);
2109 	if (DOINGSOFTDEP(ITOV(ip)))
2110 		softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
2111 	bdwrite(bp);
2112 	return ((ino_t)(cg * fs->fs_ipg + ipref));
2113 }
2114 
2115 /*
2116  * Free a block or fragment.
2117  *
2118  * The specified block or fragment is placed back in the
2119  * free map. If a fragment is deallocated, a possible
2120  * block reassembly is checked.
2121  */
2122 static void
2123 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd)
2124 	struct ufsmount *ump;
2125 	struct fs *fs;
2126 	struct vnode *devvp;
2127 	ufs2_daddr_t bno;
2128 	long size;
2129 	ino_t inum;
2130 	struct workhead *dephd;
2131 {
2132 	struct mount *mp;
2133 	struct cg *cgp;
2134 	struct buf *bp;
2135 	ufs1_daddr_t fragno, cgbno;
2136 	int i, blk, frags, bbase, error;
2137 	u_int cg;
2138 	u_int8_t *blksfree;
2139 	struct cdev *dev;
2140 
2141 	cg = dtog(fs, bno);
2142 	if (devvp->v_type == VREG) {
2143 		/* devvp is a snapshot */
2144 		MPASS(devvp->v_mount->mnt_data == ump);
2145 		dev = ump->um_devvp->v_rdev;
2146 	} else if (devvp->v_type == VCHR) {
2147 		/* devvp is a normal disk device */
2148 		dev = devvp->v_rdev;
2149 		ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg");
2150 	} else
2151 		return;
2152 #ifdef INVARIANTS
2153 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
2154 	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
2155 		printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
2156 		    devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
2157 		    size, fs->fs_fsmnt);
2158 		panic("ffs_blkfree_cg: bad size");
2159 	}
2160 #endif
2161 	if ((u_int)bno >= fs->fs_size) {
2162 		printf("bad block %jd, ino %lu\n", (intmax_t)bno,
2163 		    (u_long)inum);
2164 		ffs_fserr(fs, inum, "bad block");
2165 		return;
2166 	}
2167 	if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2168 		return;
2169 	cgbno = dtogd(fs, bno);
2170 	blksfree = cg_blksfree(cgp);
2171 	UFS_LOCK(ump);
2172 	if (size == fs->fs_bsize) {
2173 		fragno = fragstoblks(fs, cgbno);
2174 		if (!ffs_isfreeblock(fs, blksfree, fragno)) {
2175 			if (devvp->v_type == VREG) {
2176 				UFS_UNLOCK(ump);
2177 				/* devvp is a snapshot */
2178 				brelse(bp);
2179 				return;
2180 			}
2181 			printf("dev = %s, block = %jd, fs = %s\n",
2182 			    devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
2183 			panic("ffs_blkfree_cg: freeing free block");
2184 		}
2185 		ffs_setblock(fs, blksfree, fragno);
2186 		ffs_clusteracct(fs, cgp, fragno, 1);
2187 		cgp->cg_cs.cs_nbfree++;
2188 		fs->fs_cstotal.cs_nbfree++;
2189 		fs->fs_cs(fs, cg).cs_nbfree++;
2190 	} else {
2191 		bbase = cgbno - fragnum(fs, cgbno);
2192 		/*
2193 		 * decrement the counts associated with the old frags
2194 		 */
2195 		blk = blkmap(fs, blksfree, bbase);
2196 		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
2197 		/*
2198 		 * deallocate the fragment
2199 		 */
2200 		frags = numfrags(fs, size);
2201 		for (i = 0; i < frags; i++) {
2202 			if (isset(blksfree, cgbno + i)) {
2203 				printf("dev = %s, block = %jd, fs = %s\n",
2204 				    devtoname(dev), (intmax_t)(bno + i),
2205 				    fs->fs_fsmnt);
2206 				panic("ffs_blkfree_cg: freeing free frag");
2207 			}
2208 			setbit(blksfree, cgbno + i);
2209 		}
2210 		cgp->cg_cs.cs_nffree += i;
2211 		fs->fs_cstotal.cs_nffree += i;
2212 		fs->fs_cs(fs, cg).cs_nffree += i;
2213 		/*
2214 		 * add back in counts associated with the new frags
2215 		 */
2216 		blk = blkmap(fs, blksfree, bbase);
2217 		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
2218 		/*
2219 		 * if a complete block has been reassembled, account for it
2220 		 */
2221 		fragno = fragstoblks(fs, bbase);
2222 		if (ffs_isblock(fs, blksfree, fragno)) {
2223 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
2224 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
2225 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
2226 			ffs_clusteracct(fs, cgp, fragno, 1);
2227 			cgp->cg_cs.cs_nbfree++;
2228 			fs->fs_cstotal.cs_nbfree++;
2229 			fs->fs_cs(fs, cg).cs_nbfree++;
2230 		}
2231 	}
2232 	fs->fs_fmod = 1;
2233 	ACTIVECLEAR(fs, cg);
2234 	UFS_UNLOCK(ump);
2235 	mp = UFSTOVFS(ump);
2236 	if (MOUNTEDSOFTDEP(mp) && devvp->v_type == VCHR)
2237 		softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2238 		    numfrags(fs, size), dephd);
2239 	bdwrite(bp);
2240 }
2241 
2242 struct ffs_blkfree_trim_params {
2243 	struct task task;
2244 	struct ufsmount *ump;
2245 	struct vnode *devvp;
2246 	ufs2_daddr_t bno;
2247 	long size;
2248 	ino_t inum;
2249 	struct workhead *pdephd;
2250 	struct workhead dephd;
2251 };
2252 
2253 static void
2254 ffs_blkfree_trim_task(ctx, pending)
2255 	void *ctx;
2256 	int pending;
2257 {
2258 	struct ffs_blkfree_trim_params *tp;
2259 
2260 	tp = ctx;
2261 	ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size,
2262 	    tp->inum, tp->pdephd);
2263 	vn_finished_secondary_write(UFSTOVFS(tp->ump));
2264 	atomic_add_int(&tp->ump->um_trim_inflight, -1);
2265 	free(tp, M_TEMP);
2266 }
2267 
2268 static void
2269 ffs_blkfree_trim_completed(bip)
2270 	struct bio *bip;
2271 {
2272 	struct ffs_blkfree_trim_params *tp;
2273 
2274 	tp = bip->bio_caller2;
2275 	g_destroy_bio(bip);
2276 	TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
2277 	taskqueue_enqueue(tp->ump->um_trim_tq, &tp->task);
2278 }
2279 
2280 void
2281 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd)
2282 	struct ufsmount *ump;
2283 	struct fs *fs;
2284 	struct vnode *devvp;
2285 	ufs2_daddr_t bno;
2286 	long size;
2287 	ino_t inum;
2288 	enum vtype vtype;
2289 	struct workhead *dephd;
2290 {
2291 	struct mount *mp;
2292 	struct bio *bip;
2293 	struct ffs_blkfree_trim_params *tp;
2294 
2295 	/*
2296 	 * Check to see if a snapshot wants to claim the block.
2297 	 * Check that devvp is a normal disk device, not a snapshot,
2298 	 * it has a snapshot(s) associated with it, and one of the
2299 	 * snapshots wants to claim the block.
2300 	 */
2301 	if (devvp->v_type == VCHR &&
2302 	    (devvp->v_vflag & VV_COPYONWRITE) &&
2303 	    ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
2304 		return;
2305 	}
2306 	/*
2307 	 * Nothing to delay if TRIM is disabled, or the operation is
2308 	 * performed on the snapshot.
2309 	 */
2310 	if (!ump->um_candelete || devvp->v_type == VREG) {
2311 		ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
2312 		return;
2313 	}
2314 
2315 	/*
2316 	 * Postpone the set of the free bit in the cg bitmap until the
2317 	 * BIO_DELETE is completed.  Otherwise, due to disk queue
2318 	 * reordering, TRIM might be issued after we reuse the block
2319 	 * and write some new data into it.
2320 	 */
2321 	atomic_add_int(&ump->um_trim_inflight, 1);
2322 	tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK);
2323 	tp->ump = ump;
2324 	tp->devvp = devvp;
2325 	tp->bno = bno;
2326 	tp->size = size;
2327 	tp->inum = inum;
2328 	if (dephd != NULL) {
2329 		LIST_INIT(&tp->dephd);
2330 		LIST_SWAP(dephd, &tp->dephd, worklist, wk_list);
2331 		tp->pdephd = &tp->dephd;
2332 	} else
2333 		tp->pdephd = NULL;
2334 
2335 	bip = g_alloc_bio();
2336 	bip->bio_cmd = BIO_DELETE;
2337 	bip->bio_offset = dbtob(fsbtodb(fs, bno));
2338 	bip->bio_done = ffs_blkfree_trim_completed;
2339 	bip->bio_length = size;
2340 	bip->bio_caller2 = tp;
2341 
2342 	mp = UFSTOVFS(ump);
2343 	vn_start_secondary_write(NULL, &mp, 0);
2344 	g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private);
2345 }
2346 
2347 #ifdef INVARIANTS
2348 /*
2349  * Verify allocation of a block or fragment. Returns true if block or
2350  * fragment is allocated, false if it is free.
2351  */
2352 static int
2353 ffs_checkblk(ip, bno, size)
2354 	struct inode *ip;
2355 	ufs2_daddr_t bno;
2356 	long size;
2357 {
2358 	struct fs *fs;
2359 	struct cg *cgp;
2360 	struct buf *bp;
2361 	ufs1_daddr_t cgbno;
2362 	int i, error, frags, free;
2363 	u_int8_t *blksfree;
2364 
2365 	fs = ITOFS(ip);
2366 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
2367 		printf("bsize = %ld, size = %ld, fs = %s\n",
2368 		    (long)fs->fs_bsize, size, fs->fs_fsmnt);
2369 		panic("ffs_checkblk: bad size");
2370 	}
2371 	if ((u_int)bno >= fs->fs_size)
2372 		panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
2373 	error = ffs_getcg(fs, ITODEVVP(ip), dtog(fs, bno), &bp, &cgp);
2374 	if (error)
2375 		panic("ffs_checkblk: cylinder group read failed");
2376 	blksfree = cg_blksfree(cgp);
2377 	cgbno = dtogd(fs, bno);
2378 	if (size == fs->fs_bsize) {
2379 		free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2380 	} else {
2381 		frags = numfrags(fs, size);
2382 		for (free = 0, i = 0; i < frags; i++)
2383 			if (isset(blksfree, cgbno + i))
2384 				free++;
2385 		if (free != 0 && free != frags)
2386 			panic("ffs_checkblk: partially free fragment");
2387 	}
2388 	brelse(bp);
2389 	return (!free);
2390 }
2391 #endif /* INVARIANTS */
2392 
2393 /*
2394  * Free an inode.
2395  */
2396 int
2397 ffs_vfree(pvp, ino, mode)
2398 	struct vnode *pvp;
2399 	ino_t ino;
2400 	int mode;
2401 {
2402 	struct ufsmount *ump;
2403 	struct inode *ip;
2404 
2405 	if (DOINGSOFTDEP(pvp)) {
2406 		softdep_freefile(pvp, ino, mode);
2407 		return (0);
2408 	}
2409 	ip = VTOI(pvp);
2410 	ump = VFSTOUFS(pvp->v_mount);
2411 	return (ffs_freefile(ump, ump->um_fs, ump->um_devvp, ino, mode, NULL));
2412 }
2413 
2414 /*
2415  * Do the actual free operation.
2416  * The specified inode is placed back in the free map.
2417  */
2418 int
2419 ffs_freefile(ump, fs, devvp, ino, mode, wkhd)
2420 	struct ufsmount *ump;
2421 	struct fs *fs;
2422 	struct vnode *devvp;
2423 	ino_t ino;
2424 	int mode;
2425 	struct workhead *wkhd;
2426 {
2427 	struct cg *cgp;
2428 	struct buf *bp;
2429 	ufs2_daddr_t cgbno;
2430 	int error;
2431 	u_int cg;
2432 	u_int8_t *inosused;
2433 	struct cdev *dev;
2434 
2435 	cg = ino_to_cg(fs, ino);
2436 	if (devvp->v_type == VREG) {
2437 		/* devvp is a snapshot */
2438 		MPASS(devvp->v_mount->mnt_data == ump);
2439 		dev = ump->um_devvp->v_rdev;
2440 		cgbno = fragstoblks(fs, cgtod(fs, cg));
2441 	} else if (devvp->v_type == VCHR) {
2442 		/* devvp is a normal disk device */
2443 		dev = devvp->v_rdev;
2444 		cgbno = fsbtodb(fs, cgtod(fs, cg));
2445 	} else {
2446 		bp = NULL;
2447 		return (0);
2448 	}
2449 	if (ino >= fs->fs_ipg * fs->fs_ncg)
2450 		panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s",
2451 		    devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt);
2452 	if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2453 		return (error);
2454 	inosused = cg_inosused(cgp);
2455 	ino %= fs->fs_ipg;
2456 	if (isclr(inosused, ino)) {
2457 		printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev),
2458 		    (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt);
2459 		if (fs->fs_ronly == 0)
2460 			panic("ffs_freefile: freeing free inode");
2461 	}
2462 	clrbit(inosused, ino);
2463 	if (ino < cgp->cg_irotor)
2464 		cgp->cg_irotor = ino;
2465 	cgp->cg_cs.cs_nifree++;
2466 	UFS_LOCK(ump);
2467 	fs->fs_cstotal.cs_nifree++;
2468 	fs->fs_cs(fs, cg).cs_nifree++;
2469 	if ((mode & IFMT) == IFDIR) {
2470 		cgp->cg_cs.cs_ndir--;
2471 		fs->fs_cstotal.cs_ndir--;
2472 		fs->fs_cs(fs, cg).cs_ndir--;
2473 	}
2474 	fs->fs_fmod = 1;
2475 	ACTIVECLEAR(fs, cg);
2476 	UFS_UNLOCK(ump);
2477 	if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type == VCHR)
2478 		softdep_setup_inofree(UFSTOVFS(ump), bp,
2479 		    ino + cg * fs->fs_ipg, wkhd);
2480 	bdwrite(bp);
2481 	return (0);
2482 }
2483 
2484 /*
2485  * Check to see if a file is free.
2486  * Used to check for allocated files in snapshots.
2487  */
2488 int
2489 ffs_checkfreefile(fs, devvp, ino)
2490 	struct fs *fs;
2491 	struct vnode *devvp;
2492 	ino_t ino;
2493 {
2494 	struct cg *cgp;
2495 	struct buf *bp;
2496 	ufs2_daddr_t cgbno;
2497 	int ret, error;
2498 	u_int cg;
2499 	u_int8_t *inosused;
2500 
2501 	cg = ino_to_cg(fs, ino);
2502 	if (devvp->v_type == VREG) {
2503 		/* devvp is a snapshot */
2504 		cgbno = fragstoblks(fs, cgtod(fs, cg));
2505 	} else if (devvp->v_type == VCHR) {
2506 		/* devvp is a normal disk device */
2507 		cgbno = fsbtodb(fs, cgtod(fs, cg));
2508 	} else {
2509 		return (1);
2510 	}
2511 	if (ino >= fs->fs_ipg * fs->fs_ncg)
2512 		return (1);
2513 	if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2514 		return (1);
2515 	inosused = cg_inosused(cgp);
2516 	ino %= fs->fs_ipg;
2517 	ret = isclr(inosused, ino);
2518 	brelse(bp);
2519 	return (ret);
2520 }
2521 
2522 /*
2523  * Find a block of the specified size in the specified cylinder group.
2524  *
2525  * It is a panic if a request is made to find a block if none are
2526  * available.
2527  */
2528 static ufs1_daddr_t
2529 ffs_mapsearch(fs, cgp, bpref, allocsiz)
2530 	struct fs *fs;
2531 	struct cg *cgp;
2532 	ufs2_daddr_t bpref;
2533 	int allocsiz;
2534 {
2535 	ufs1_daddr_t bno;
2536 	int start, len, loc, i;
2537 	int blk, field, subfield, pos;
2538 	u_int8_t *blksfree;
2539 
2540 	/*
2541 	 * find the fragment by searching through the free block
2542 	 * map for an appropriate bit pattern
2543 	 */
2544 	if (bpref)
2545 		start = dtogd(fs, bpref) / NBBY;
2546 	else
2547 		start = cgp->cg_frotor / NBBY;
2548 	blksfree = cg_blksfree(cgp);
2549 	len = howmany(fs->fs_fpg, NBBY) - start;
2550 	loc = scanc((u_int)len, (u_char *)&blksfree[start],
2551 		fragtbl[fs->fs_frag],
2552 		(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2553 	if (loc == 0) {
2554 		len = start + 1;
2555 		start = 0;
2556 		loc = scanc((u_int)len, (u_char *)&blksfree[0],
2557 			fragtbl[fs->fs_frag],
2558 			(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2559 		if (loc == 0) {
2560 			printf("start = %d, len = %d, fs = %s\n",
2561 			    start, len, fs->fs_fsmnt);
2562 			panic("ffs_alloccg: map corrupted");
2563 			/* NOTREACHED */
2564 		}
2565 	}
2566 	bno = (start + len - loc) * NBBY;
2567 	cgp->cg_frotor = bno;
2568 	/*
2569 	 * found the byte in the map
2570 	 * sift through the bits to find the selected frag
2571 	 */
2572 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2573 		blk = blkmap(fs, blksfree, bno);
2574 		blk <<= 1;
2575 		field = around[allocsiz];
2576 		subfield = inside[allocsiz];
2577 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2578 			if ((blk & field) == subfield)
2579 				return (bno + pos);
2580 			field <<= 1;
2581 			subfield <<= 1;
2582 		}
2583 	}
2584 	printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2585 	panic("ffs_alloccg: block not in map");
2586 	return (-1);
2587 }
2588 
2589 static const struct statfs *
2590 ffs_getmntstat(struct vnode *devvp)
2591 {
2592 
2593 	if (devvp->v_type == VCHR)
2594 		return (&devvp->v_rdev->si_mountpt->mnt_stat);
2595 	return (ffs_getmntstat(VFSTOUFS(devvp->v_mount)->um_devvp));
2596 }
2597 
2598 /*
2599  * Fetch and verify a cylinder group.
2600  */
2601 int
2602 ffs_getcg(fs, devvp, cg, bpp, cgpp)
2603 	struct fs *fs;
2604 	struct vnode *devvp;
2605 	u_int cg;
2606 	struct buf **bpp;
2607 	struct cg **cgpp;
2608 {
2609 	struct buf *bp;
2610 	struct cg *cgp;
2611 	const struct statfs *sfs;
2612 	int flags, error;
2613 
2614 	*bpp = NULL;
2615 	*cgpp = NULL;
2616 	flags = 0;
2617 	if ((fs->fs_metackhash & CK_CYLGRP) != 0)
2618 		flags |= GB_CKHASH;
2619 	error = breadn_flags(devvp, devvp->v_type == VREG ?
2620 	    fragstoblks(fs, cgtod(fs, cg)) : fsbtodb(fs, cgtod(fs, cg)),
2621 	    (int)fs->fs_cgsize, NULL, NULL, 0, NOCRED, flags,
2622 	    ffs_ckhash_cg, &bp);
2623 	if (error != 0)
2624 		return (error);
2625 	cgp = (struct cg *)bp->b_data;
2626 	if (((fs->fs_metackhash & CK_CYLGRP) != 0 &&
2627 	    (bp->b_flags & B_CKHASH) != 0 &&
2628 	    cgp->cg_ckhash != bp->b_ckhash) ||
2629 	    !cg_chkmagic(cgp) || cgp->cg_cgx != cg) {
2630 		sfs = ffs_getmntstat(devvp);
2631 		printf("UFS %s%s (%s) cylinder checksum failed: cg %u, cgp: "
2632 		    "0x%x != bp: 0x%jx\n",
2633 		    devvp->v_type == VCHR ? "" : "snapshot of ",
2634 		    sfs->f_mntfromname, sfs->f_mntonname,
2635 		    cg, cgp->cg_ckhash, (uintmax_t)bp->b_ckhash);
2636 		bp->b_flags &= ~B_CKHASH;
2637 		bp->b_flags |= B_INVAL | B_NOCACHE;
2638 		brelse(bp);
2639 		return (EIO);
2640 	}
2641 	bp->b_flags &= ~B_CKHASH;
2642 	bp->b_xflags |= BX_BKGRDWRITE;
2643 	if ((fs->fs_metackhash & CK_CYLGRP) != 0)
2644 		bp->b_xflags |= BX_CYLGRP;
2645 	cgp->cg_old_time = cgp->cg_time = time_second;
2646 	*bpp = bp;
2647 	*cgpp = cgp;
2648 	return (0);
2649 }
2650 
2651 static void
2652 ffs_ckhash_cg(bp)
2653 	struct buf *bp;
2654 {
2655 	uint32_t ckhash;
2656 	struct cg *cgp;
2657 
2658 	cgp = (struct cg *)bp->b_data;
2659 	ckhash = cgp->cg_ckhash;
2660 	cgp->cg_ckhash = 0;
2661 	bp->b_ckhash = calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2662 	cgp->cg_ckhash = ckhash;
2663 }
2664 
2665 /*
2666  * Fserr prints the name of a filesystem with an error diagnostic.
2667  *
2668  * The form of the error message is:
2669  *	fs: error message
2670  */
2671 void
2672 ffs_fserr(fs, inum, cp)
2673 	struct fs *fs;
2674 	ino_t inum;
2675 	char *cp;
2676 {
2677 	struct thread *td = curthread;	/* XXX */
2678 	struct proc *p = td->td_proc;
2679 
2680 	log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n",
2681 	    p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum,
2682 	    fs->fs_fsmnt, cp);
2683 }
2684 
2685 /*
2686  * This function provides the capability for the fsck program to
2687  * update an active filesystem. Fourteen operations are provided:
2688  *
2689  * adjrefcnt(inode, amt) - adjusts the reference count on the
2690  *	specified inode by the specified amount. Under normal
2691  *	operation the count should always go down. Decrementing
2692  *	the count to zero will cause the inode to be freed.
2693  * adjblkcnt(inode, amt) - adjust the number of blocks used by the
2694  *	inode by the specified amount.
2695  * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
2696  *	adjust the superblock summary.
2697  * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
2698  *	are marked as free. Inodes should never have to be marked
2699  *	as in use.
2700  * freefiles(inode, count) - file inodes [inode..inode + count - 1]
2701  *	are marked as free. Inodes should never have to be marked
2702  *	as in use.
2703  * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
2704  *	are marked as free. Blocks should never have to be marked
2705  *	as in use.
2706  * setflags(flags, set/clear) - the fs_flags field has the specified
2707  *	flags set (second parameter +1) or cleared (second parameter -1).
2708  * setcwd(dirinode) - set the current directory to dirinode in the
2709  *	filesystem associated with the snapshot.
2710  * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
2711  *	in the current directory is oldvalue then change it to newvalue.
2712  * unlink(nameptr, oldvalue) - Verify that the inode number associated
2713  *	with nameptr in the current directory is oldvalue then unlink it.
2714  *
2715  * The following functions may only be used on a quiescent filesystem
2716  * by the soft updates journal. They are not safe to be run on an active
2717  * filesystem.
2718  *
2719  * setinode(inode, dip) - the specified disk inode is replaced with the
2720  *	contents pointed to by dip.
2721  * setbufoutput(fd, flags) - output associated with the specified file
2722  *	descriptor (which must reference the character device supporting
2723  *	the filesystem) switches from using physio to running through the
2724  *	buffer cache when flags is set to 1. The descriptor reverts to
2725  *	physio for output when flags is set to zero.
2726  */
2727 
2728 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
2729 
2730 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
2731 	0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
2732 
2733 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
2734 	sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
2735 
2736 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR,
2737 	sysctl_ffs_fsck, "Adjust number of directories");
2738 
2739 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR,
2740 	sysctl_ffs_fsck, "Adjust number of free blocks");
2741 
2742 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR,
2743 	sysctl_ffs_fsck, "Adjust number of free inodes");
2744 
2745 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR,
2746 	sysctl_ffs_fsck, "Adjust number of free frags");
2747 
2748 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR,
2749 	sysctl_ffs_fsck, "Adjust number of free clusters");
2750 
2751 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
2752 	sysctl_ffs_fsck, "Free Range of Directory Inodes");
2753 
2754 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
2755 	sysctl_ffs_fsck, "Free Range of File Inodes");
2756 
2757 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
2758 	sysctl_ffs_fsck, "Free Range of Blocks");
2759 
2760 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
2761 	sysctl_ffs_fsck, "Change Filesystem Flags");
2762 
2763 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR,
2764 	sysctl_ffs_fsck, "Set Current Working Directory");
2765 
2766 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR,
2767 	sysctl_ffs_fsck, "Change Value of .. Entry");
2768 
2769 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR,
2770 	sysctl_ffs_fsck, "Unlink a Duplicate Name");
2771 
2772 static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR,
2773 	sysctl_ffs_fsck, "Update an On-Disk Inode");
2774 
2775 static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR,
2776 	sysctl_ffs_fsck, "Set Buffered Writing for Descriptor");
2777 
2778 #define DEBUG 1
2779 #ifdef DEBUG
2780 static int fsckcmds = 0;
2781 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
2782 #endif /* DEBUG */
2783 
2784 static int buffered_write(struct file *, struct uio *, struct ucred *,
2785 	int, struct thread *);
2786 
2787 static int
2788 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
2789 {
2790 	struct thread *td = curthread;
2791 	struct fsck_cmd cmd;
2792 	struct ufsmount *ump;
2793 	struct vnode *vp, *dvp, *fdvp;
2794 	struct inode *ip, *dp;
2795 	struct mount *mp;
2796 	struct fs *fs;
2797 	ufs2_daddr_t blkno;
2798 	long blkcnt, blksize;
2799 	struct file *fp, *vfp;
2800 	cap_rights_t rights;
2801 	int filetype, error;
2802 	static struct fileops *origops, bufferedops;
2803 
2804 	if (req->newlen > sizeof cmd)
2805 		return (EBADRPC);
2806 	if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
2807 		return (error);
2808 	if (cmd.version != FFS_CMD_VERSION)
2809 		return (ERPCMISMATCH);
2810 	if ((error = getvnode(td, cmd.handle,
2811 	    cap_rights_init(&rights, CAP_FSCK), &fp)) != 0)
2812 		return (error);
2813 	vp = fp->f_data;
2814 	if (vp->v_type != VREG && vp->v_type != VDIR) {
2815 		fdrop(fp, td);
2816 		return (EINVAL);
2817 	}
2818 	vn_start_write(vp, &mp, V_WAIT);
2819 	if (mp == NULL ||
2820 	    strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
2821 		vn_finished_write(mp);
2822 		fdrop(fp, td);
2823 		return (EINVAL);
2824 	}
2825 	ump = VFSTOUFS(mp);
2826 	if ((mp->mnt_flag & MNT_RDONLY) &&
2827 	    ump->um_fsckpid != td->td_proc->p_pid) {
2828 		vn_finished_write(mp);
2829 		fdrop(fp, td);
2830 		return (EROFS);
2831 	}
2832 	fs = ump->um_fs;
2833 	filetype = IFREG;
2834 
2835 	switch (oidp->oid_number) {
2836 
2837 	case FFS_SET_FLAGS:
2838 #ifdef DEBUG
2839 		if (fsckcmds)
2840 			printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
2841 			    cmd.size > 0 ? "set" : "clear");
2842 #endif /* DEBUG */
2843 		if (cmd.size > 0)
2844 			fs->fs_flags |= (long)cmd.value;
2845 		else
2846 			fs->fs_flags &= ~(long)cmd.value;
2847 		break;
2848 
2849 	case FFS_ADJ_REFCNT:
2850 #ifdef DEBUG
2851 		if (fsckcmds) {
2852 			printf("%s: adjust inode %jd link count by %jd\n",
2853 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2854 			    (intmax_t)cmd.size);
2855 		}
2856 #endif /* DEBUG */
2857 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2858 			break;
2859 		ip = VTOI(vp);
2860 		ip->i_nlink += cmd.size;
2861 		DIP_SET(ip, i_nlink, ip->i_nlink);
2862 		ip->i_effnlink += cmd.size;
2863 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2864 		error = ffs_update(vp, 1);
2865 		if (DOINGSOFTDEP(vp))
2866 			softdep_change_linkcnt(ip);
2867 		vput(vp);
2868 		break;
2869 
2870 	case FFS_ADJ_BLKCNT:
2871 #ifdef DEBUG
2872 		if (fsckcmds) {
2873 			printf("%s: adjust inode %jd block count by %jd\n",
2874 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2875 			    (intmax_t)cmd.size);
2876 		}
2877 #endif /* DEBUG */
2878 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2879 			break;
2880 		ip = VTOI(vp);
2881 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
2882 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2883 		error = ffs_update(vp, 1);
2884 		vput(vp);
2885 		break;
2886 
2887 	case FFS_DIR_FREE:
2888 		filetype = IFDIR;
2889 		/* fall through */
2890 
2891 	case FFS_FILE_FREE:
2892 #ifdef DEBUG
2893 		if (fsckcmds) {
2894 			if (cmd.size == 1)
2895 				printf("%s: free %s inode %ju\n",
2896 				    mp->mnt_stat.f_mntonname,
2897 				    filetype == IFDIR ? "directory" : "file",
2898 				    (uintmax_t)cmd.value);
2899 			else
2900 				printf("%s: free %s inodes %ju-%ju\n",
2901 				    mp->mnt_stat.f_mntonname,
2902 				    filetype == IFDIR ? "directory" : "file",
2903 				    (uintmax_t)cmd.value,
2904 				    (uintmax_t)(cmd.value + cmd.size - 1));
2905 		}
2906 #endif /* DEBUG */
2907 		while (cmd.size > 0) {
2908 			if ((error = ffs_freefile(ump, fs, ump->um_devvp,
2909 			    cmd.value, filetype, NULL)))
2910 				break;
2911 			cmd.size -= 1;
2912 			cmd.value += 1;
2913 		}
2914 		break;
2915 
2916 	case FFS_BLK_FREE:
2917 #ifdef DEBUG
2918 		if (fsckcmds) {
2919 			if (cmd.size == 1)
2920 				printf("%s: free block %jd\n",
2921 				    mp->mnt_stat.f_mntonname,
2922 				    (intmax_t)cmd.value);
2923 			else
2924 				printf("%s: free blocks %jd-%jd\n",
2925 				    mp->mnt_stat.f_mntonname,
2926 				    (intmax_t)cmd.value,
2927 				    (intmax_t)cmd.value + cmd.size - 1);
2928 		}
2929 #endif /* DEBUG */
2930 		blkno = cmd.value;
2931 		blkcnt = cmd.size;
2932 		blksize = fs->fs_frag - (blkno % fs->fs_frag);
2933 		while (blkcnt > 0) {
2934 			if (blksize > blkcnt)
2935 				blksize = blkcnt;
2936 			ffs_blkfree(ump, fs, ump->um_devvp, blkno,
2937 			    blksize * fs->fs_fsize, UFS_ROOTINO, VDIR, NULL);
2938 			blkno += blksize;
2939 			blkcnt -= blksize;
2940 			blksize = fs->fs_frag;
2941 		}
2942 		break;
2943 
2944 	/*
2945 	 * Adjust superblock summaries.  fsck(8) is expected to
2946 	 * submit deltas when necessary.
2947 	 */
2948 	case FFS_ADJ_NDIR:
2949 #ifdef DEBUG
2950 		if (fsckcmds) {
2951 			printf("%s: adjust number of directories by %jd\n",
2952 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2953 		}
2954 #endif /* DEBUG */
2955 		fs->fs_cstotal.cs_ndir += cmd.value;
2956 		break;
2957 
2958 	case FFS_ADJ_NBFREE:
2959 #ifdef DEBUG
2960 		if (fsckcmds) {
2961 			printf("%s: adjust number of free blocks by %+jd\n",
2962 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2963 		}
2964 #endif /* DEBUG */
2965 		fs->fs_cstotal.cs_nbfree += cmd.value;
2966 		break;
2967 
2968 	case FFS_ADJ_NIFREE:
2969 #ifdef DEBUG
2970 		if (fsckcmds) {
2971 			printf("%s: adjust number of free inodes by %+jd\n",
2972 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2973 		}
2974 #endif /* DEBUG */
2975 		fs->fs_cstotal.cs_nifree += cmd.value;
2976 		break;
2977 
2978 	case FFS_ADJ_NFFREE:
2979 #ifdef DEBUG
2980 		if (fsckcmds) {
2981 			printf("%s: adjust number of free frags by %+jd\n",
2982 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2983 		}
2984 #endif /* DEBUG */
2985 		fs->fs_cstotal.cs_nffree += cmd.value;
2986 		break;
2987 
2988 	case FFS_ADJ_NUMCLUSTERS:
2989 #ifdef DEBUG
2990 		if (fsckcmds) {
2991 			printf("%s: adjust number of free clusters by %+jd\n",
2992 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2993 		}
2994 #endif /* DEBUG */
2995 		fs->fs_cstotal.cs_numclusters += cmd.value;
2996 		break;
2997 
2998 	case FFS_SET_CWD:
2999 #ifdef DEBUG
3000 		if (fsckcmds) {
3001 			printf("%s: set current directory to inode %jd\n",
3002 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3003 		}
3004 #endif /* DEBUG */
3005 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
3006 			break;
3007 		AUDIT_ARG_VNODE1(vp);
3008 		if ((error = change_dir(vp, td)) != 0) {
3009 			vput(vp);
3010 			break;
3011 		}
3012 		VOP_UNLOCK(vp, 0);
3013 		pwd_chdir(td, vp);
3014 		break;
3015 
3016 	case FFS_SET_DOTDOT:
3017 #ifdef DEBUG
3018 		if (fsckcmds) {
3019 			printf("%s: change .. in cwd from %jd to %jd\n",
3020 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3021 			    (intmax_t)cmd.size);
3022 		}
3023 #endif /* DEBUG */
3024 		/*
3025 		 * First we have to get and lock the parent directory
3026 		 * to which ".." points.
3027 		 */
3028 		error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
3029 		if (error)
3030 			break;
3031 		/*
3032 		 * Now we get and lock the child directory containing "..".
3033 		 */
3034 		FILEDESC_SLOCK(td->td_proc->p_fd);
3035 		dvp = td->td_proc->p_fd->fd_cdir;
3036 		FILEDESC_SUNLOCK(td->td_proc->p_fd);
3037 		if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
3038 			vput(fdvp);
3039 			break;
3040 		}
3041 		dp = VTOI(dvp);
3042 		dp->i_offset = 12;	/* XXX mastertemplate.dot_reclen */
3043 		error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
3044 		    DT_DIR, 0);
3045 		cache_purge(fdvp);
3046 		cache_purge(dvp);
3047 		vput(dvp);
3048 		vput(fdvp);
3049 		break;
3050 
3051 	case FFS_UNLINK:
3052 #ifdef DEBUG
3053 		if (fsckcmds) {
3054 			char buf[32];
3055 
3056 			if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
3057 				strncpy(buf, "Name_too_long", 32);
3058 			printf("%s: unlink %s (inode %jd)\n",
3059 			    mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
3060 		}
3061 #endif /* DEBUG */
3062 		/*
3063 		 * kern_unlinkat will do its own start/finish writes and
3064 		 * they do not nest, so drop ours here. Setting mp == NULL
3065 		 * indicates that vn_finished_write is not needed down below.
3066 		 */
3067 		vn_finished_write(mp);
3068 		mp = NULL;
3069 		error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value,
3070 		    UIO_USERSPACE, (ino_t)cmd.size);
3071 		break;
3072 
3073 	case FFS_SET_INODE:
3074 		if (ump->um_fsckpid != td->td_proc->p_pid) {
3075 			error = EPERM;
3076 			break;
3077 		}
3078 #ifdef DEBUG
3079 		if (fsckcmds) {
3080 			printf("%s: update inode %jd\n",
3081 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3082 		}
3083 #endif /* DEBUG */
3084 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3085 			break;
3086 		AUDIT_ARG_VNODE1(vp);
3087 		ip = VTOI(vp);
3088 		if (I_IS_UFS1(ip))
3089 			error = copyin((void *)(intptr_t)cmd.size, ip->i_din1,
3090 			    sizeof(struct ufs1_dinode));
3091 		else
3092 			error = copyin((void *)(intptr_t)cmd.size, ip->i_din2,
3093 			    sizeof(struct ufs2_dinode));
3094 		if (error) {
3095 			vput(vp);
3096 			break;
3097 		}
3098 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
3099 		error = ffs_update(vp, 1);
3100 		vput(vp);
3101 		break;
3102 
3103 	case FFS_SET_BUFOUTPUT:
3104 		if (ump->um_fsckpid != td->td_proc->p_pid) {
3105 			error = EPERM;
3106 			break;
3107 		}
3108 		if (ITOUMP(VTOI(vp)) != ump) {
3109 			error = EINVAL;
3110 			break;
3111 		}
3112 #ifdef DEBUG
3113 		if (fsckcmds) {
3114 			printf("%s: %s buffered output for descriptor %jd\n",
3115 			    mp->mnt_stat.f_mntonname,
3116 			    cmd.size == 1 ? "enable" : "disable",
3117 			    (intmax_t)cmd.value);
3118 		}
3119 #endif /* DEBUG */
3120 		if ((error = getvnode(td, cmd.value,
3121 		    cap_rights_init(&rights, CAP_FSCK), &vfp)) != 0)
3122 			break;
3123 		if (vfp->f_vnode->v_type != VCHR) {
3124 			fdrop(vfp, td);
3125 			error = EINVAL;
3126 			break;
3127 		}
3128 		if (origops == NULL) {
3129 			origops = vfp->f_ops;
3130 			bcopy((void *)origops, (void *)&bufferedops,
3131 			    sizeof(bufferedops));
3132 			bufferedops.fo_write = buffered_write;
3133 		}
3134 		if (cmd.size == 1)
3135 			atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3136 			    (uintptr_t)&bufferedops);
3137 		else
3138 			atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3139 			    (uintptr_t)origops);
3140 		fdrop(vfp, td);
3141 		break;
3142 
3143 	default:
3144 #ifdef DEBUG
3145 		if (fsckcmds) {
3146 			printf("Invalid request %d from fsck\n",
3147 			    oidp->oid_number);
3148 		}
3149 #endif /* DEBUG */
3150 		error = EINVAL;
3151 		break;
3152 
3153 	}
3154 	fdrop(fp, td);
3155 	vn_finished_write(mp);
3156 	return (error);
3157 }
3158 
3159 /*
3160  * Function to switch a descriptor to use the buffer cache to stage
3161  * its I/O. This is needed so that writes to the filesystem device
3162  * will give snapshots a chance to copy modified blocks for which it
3163  * needs to retain copies.
3164  */
3165 static int
3166 buffered_write(fp, uio, active_cred, flags, td)
3167 	struct file *fp;
3168 	struct uio *uio;
3169 	struct ucred *active_cred;
3170 	int flags;
3171 	struct thread *td;
3172 {
3173 	struct vnode *devvp, *vp;
3174 	struct inode *ip;
3175 	struct buf *bp;
3176 	struct fs *fs;
3177 	struct filedesc *fdp;
3178 	int error;
3179 	daddr_t lbn;
3180 
3181 	/*
3182 	 * The devvp is associated with the /dev filesystem. To discover
3183 	 * the filesystem with which the device is associated, we depend
3184 	 * on the application setting the current directory to a location
3185 	 * within the filesystem being written. Yes, this is an ugly hack.
3186 	 */
3187 	devvp = fp->f_vnode;
3188 	if (!vn_isdisk(devvp, NULL))
3189 		return (EINVAL);
3190 	fdp = td->td_proc->p_fd;
3191 	FILEDESC_SLOCK(fdp);
3192 	vp = fdp->fd_cdir;
3193 	vref(vp);
3194 	FILEDESC_SUNLOCK(fdp);
3195 	vn_lock(vp, LK_SHARED | LK_RETRY);
3196 	/*
3197 	 * Check that the current directory vnode indeed belongs to
3198 	 * UFS before trying to dereference UFS-specific v_data fields.
3199 	 */
3200 	if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) {
3201 		vput(vp);
3202 		return (EINVAL);
3203 	}
3204 	ip = VTOI(vp);
3205 	if (ITODEVVP(ip) != devvp) {
3206 		vput(vp);
3207 		return (EINVAL);
3208 	}
3209 	fs = ITOFS(ip);
3210 	vput(vp);
3211 	foffset_lock_uio(fp, uio, flags);
3212 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
3213 #ifdef DEBUG
3214 	if (fsckcmds) {
3215 		printf("%s: buffered write for block %jd\n",
3216 		    fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset));
3217 	}
3218 #endif /* DEBUG */
3219 	/*
3220 	 * All I/O must be contained within a filesystem block, start on
3221 	 * a fragment boundary, and be a multiple of fragments in length.
3222 	 */
3223 	if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) ||
3224 	    fragoff(fs, uio->uio_offset) != 0 ||
3225 	    fragoff(fs, uio->uio_resid) != 0) {
3226 		error = EINVAL;
3227 		goto out;
3228 	}
3229 	lbn = numfrags(fs, uio->uio_offset);
3230 	bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0);
3231 	bp->b_flags |= B_RELBUF;
3232 	if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) {
3233 		brelse(bp);
3234 		goto out;
3235 	}
3236 	error = bwrite(bp);
3237 out:
3238 	VOP_UNLOCK(devvp, 0);
3239 	foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF);
3240 	return (error);
3241 }
3242