xref: /freebsd/sys/ufs/ffs/ffs_alloc.c (revision ca987d4641cdcd7f27e153db17c5bf064934faf5)
1 /*-
2  * Copyright (c) 2002 Networks Associates Technology, Inc.
3  * All rights reserved.
4  *
5  * This software was developed for the FreeBSD Project by Marshall
6  * Kirk McKusick and Network Associates Laboratories, the Security
7  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9  * research program
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * Copyright (c) 1982, 1986, 1989, 1993
33  *	The Regents of the University of California.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. Neither the name of the University nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  *	@(#)ffs_alloc.c	8.18 (Berkeley) 5/26/95
60  */
61 
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
64 
65 #include "opt_quota.h"
66 
67 #include <sys/param.h>
68 #include <sys/capsicum.h>
69 #include <sys/systm.h>
70 #include <sys/bio.h>
71 #include <sys/buf.h>
72 #include <sys/conf.h>
73 #include <sys/fcntl.h>
74 #include <sys/file.h>
75 #include <sys/filedesc.h>
76 #include <sys/priv.h>
77 #include <sys/proc.h>
78 #include <sys/vnode.h>
79 #include <sys/mount.h>
80 #include <sys/kernel.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/syslog.h>
84 #include <sys/taskqueue.h>
85 
86 #include <security/audit/audit.h>
87 
88 #include <geom/geom.h>
89 
90 #include <ufs/ufs/dir.h>
91 #include <ufs/ufs/extattr.h>
92 #include <ufs/ufs/quota.h>
93 #include <ufs/ufs/inode.h>
94 #include <ufs/ufs/ufs_extern.h>
95 #include <ufs/ufs/ufsmount.h>
96 
97 #include <ufs/ffs/fs.h>
98 #include <ufs/ffs/ffs_extern.h>
99 #include <ufs/ffs/softdep.h>
100 
101 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
102 				  int size, int rsize);
103 
104 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int);
105 static ufs2_daddr_t
106 	      ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
107 static void	ffs_blkfree_cg(struct ufsmount *, struct fs *,
108 		    struct vnode *, ufs2_daddr_t, long, ino_t,
109 		    struct workhead *);
110 static void	ffs_blkfree_trim_completed(struct bio *);
111 static void	ffs_blkfree_trim_task(void *ctx, int pending __unused);
112 #ifdef INVARIANTS
113 static int	ffs_checkblk(struct inode *, ufs2_daddr_t, long);
114 #endif
115 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int);
116 static ino_t	ffs_dirpref(struct inode *);
117 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
118 		    int, int);
119 static ufs2_daddr_t	ffs_hashalloc
120 		(struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *);
121 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int,
122 		    int);
123 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
124 static int	ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
125 static int	ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
126 static void	ffs_ckhash_cg(struct buf *);
127 
128 /*
129  * Allocate a block in the filesystem.
130  *
131  * The size of the requested block is given, which must be some
132  * multiple of fs_fsize and <= fs_bsize.
133  * A preference may be optionally specified. If a preference is given
134  * the following hierarchy is used to allocate a block:
135  *   1) allocate the requested block.
136  *   2) allocate a rotationally optimal block in the same cylinder.
137  *   3) allocate a block in the same cylinder group.
138  *   4) quadradically rehash into other cylinder groups, until an
139  *      available block is located.
140  * If no block preference is given the following hierarchy is used
141  * to allocate a block:
142  *   1) allocate a block in the cylinder group that contains the
143  *      inode for the file.
144  *   2) quadradically rehash into other cylinder groups, until an
145  *      available block is located.
146  */
147 int
148 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
149 	struct inode *ip;
150 	ufs2_daddr_t lbn, bpref;
151 	int size, flags;
152 	struct ucred *cred;
153 	ufs2_daddr_t *bnp;
154 {
155 	struct fs *fs;
156 	struct ufsmount *ump;
157 	ufs2_daddr_t bno;
158 	u_int cg, reclaimed;
159 	static struct timeval lastfail;
160 	static int curfail;
161 	int64_t delta;
162 #ifdef QUOTA
163 	int error;
164 #endif
165 
166 	*bnp = 0;
167 	ump = ITOUMP(ip);
168 	fs = ump->um_fs;
169 	mtx_assert(UFS_MTX(ump), MA_OWNED);
170 #ifdef INVARIANTS
171 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
172 		printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
173 		    devtoname(ump->um_dev), (long)fs->fs_bsize, size,
174 		    fs->fs_fsmnt);
175 		panic("ffs_alloc: bad size");
176 	}
177 	if (cred == NOCRED)
178 		panic("ffs_alloc: missing credential");
179 #endif /* INVARIANTS */
180 	reclaimed = 0;
181 retry:
182 #ifdef QUOTA
183 	UFS_UNLOCK(ump);
184 	error = chkdq(ip, btodb(size), cred, 0);
185 	if (error)
186 		return (error);
187 	UFS_LOCK(ump);
188 #endif
189 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
190 		goto nospace;
191 	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
192 	    freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
193 		goto nospace;
194 	if (bpref >= fs->fs_size)
195 		bpref = 0;
196 	if (bpref == 0)
197 		cg = ino_to_cg(fs, ip->i_number);
198 	else
199 		cg = dtog(fs, bpref);
200 	bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
201 	if (bno > 0) {
202 		delta = btodb(size);
203 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
204 		if (flags & IO_EXT)
205 			ip->i_flag |= IN_CHANGE;
206 		else
207 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
208 		*bnp = bno;
209 		return (0);
210 	}
211 nospace:
212 #ifdef QUOTA
213 	UFS_UNLOCK(ump);
214 	/*
215 	 * Restore user's disk quota because allocation failed.
216 	 */
217 	(void) chkdq(ip, -btodb(size), cred, FORCE);
218 	UFS_LOCK(ump);
219 #endif
220 	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
221 		reclaimed = 1;
222 		softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
223 		goto retry;
224 	}
225 	UFS_UNLOCK(ump);
226 	if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
227 		ffs_fserr(fs, ip->i_number, "filesystem full");
228 		uprintf("\n%s: write failed, filesystem is full\n",
229 		    fs->fs_fsmnt);
230 	}
231 	return (ENOSPC);
232 }
233 
234 /*
235  * Reallocate a fragment to a bigger size
236  *
237  * The number and size of the old block is given, and a preference
238  * and new size is also specified. The allocator attempts to extend
239  * the original block. Failing that, the regular block allocator is
240  * invoked to get an appropriate block.
241  */
242 int
243 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
244 	struct inode *ip;
245 	ufs2_daddr_t lbprev;
246 	ufs2_daddr_t bprev;
247 	ufs2_daddr_t bpref;
248 	int osize, nsize, flags;
249 	struct ucred *cred;
250 	struct buf **bpp;
251 {
252 	struct vnode *vp;
253 	struct fs *fs;
254 	struct buf *bp;
255 	struct ufsmount *ump;
256 	u_int cg, request, reclaimed;
257 	int error, gbflags;
258 	ufs2_daddr_t bno;
259 	static struct timeval lastfail;
260 	static int curfail;
261 	int64_t delta;
262 
263 	vp = ITOV(ip);
264 	ump = ITOUMP(ip);
265 	fs = ump->um_fs;
266 	bp = NULL;
267 	gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
268 
269 	mtx_assert(UFS_MTX(ump), MA_OWNED);
270 #ifdef INVARIANTS
271 	if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
272 		panic("ffs_realloccg: allocation on suspended filesystem");
273 	if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
274 	    (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
275 		printf(
276 		"dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
277 		    devtoname(ump->um_dev), (long)fs->fs_bsize, osize,
278 		    nsize, fs->fs_fsmnt);
279 		panic("ffs_realloccg: bad size");
280 	}
281 	if (cred == NOCRED)
282 		panic("ffs_realloccg: missing credential");
283 #endif /* INVARIANTS */
284 	reclaimed = 0;
285 retry:
286 	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
287 	    freespace(fs, fs->fs_minfree) -  numfrags(fs, nsize - osize) < 0) {
288 		goto nospace;
289 	}
290 	if (bprev == 0) {
291 		printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
292 		    devtoname(ump->um_dev), (long)fs->fs_bsize, (intmax_t)bprev,
293 		    fs->fs_fsmnt);
294 		panic("ffs_realloccg: bad bprev");
295 	}
296 	UFS_UNLOCK(ump);
297 	/*
298 	 * Allocate the extra space in the buffer.
299 	 */
300 	error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
301 	if (error) {
302 		brelse(bp);
303 		return (error);
304 	}
305 
306 	if (bp->b_blkno == bp->b_lblkno) {
307 		if (lbprev >= UFS_NDADDR)
308 			panic("ffs_realloccg: lbprev out of range");
309 		bp->b_blkno = fsbtodb(fs, bprev);
310 	}
311 
312 #ifdef QUOTA
313 	error = chkdq(ip, btodb(nsize - osize), cred, 0);
314 	if (error) {
315 		brelse(bp);
316 		return (error);
317 	}
318 #endif
319 	/*
320 	 * Check for extension in the existing location.
321 	 */
322 	*bpp = NULL;
323 	cg = dtog(fs, bprev);
324 	UFS_LOCK(ump);
325 	bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
326 	if (bno) {
327 		if (bp->b_blkno != fsbtodb(fs, bno))
328 			panic("ffs_realloccg: bad blockno");
329 		delta = btodb(nsize - osize);
330 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
331 		if (flags & IO_EXT)
332 			ip->i_flag |= IN_CHANGE;
333 		else
334 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
335 		allocbuf(bp, nsize);
336 		bp->b_flags |= B_DONE;
337 		vfs_bio_bzero_buf(bp, osize, nsize - osize);
338 		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
339 			vfs_bio_set_valid(bp, osize, nsize - osize);
340 		*bpp = bp;
341 		return (0);
342 	}
343 	/*
344 	 * Allocate a new disk location.
345 	 */
346 	if (bpref >= fs->fs_size)
347 		bpref = 0;
348 	switch ((int)fs->fs_optim) {
349 	case FS_OPTSPACE:
350 		/*
351 		 * Allocate an exact sized fragment. Although this makes
352 		 * best use of space, we will waste time relocating it if
353 		 * the file continues to grow. If the fragmentation is
354 		 * less than half of the minimum free reserve, we choose
355 		 * to begin optimizing for time.
356 		 */
357 		request = nsize;
358 		if (fs->fs_minfree <= 5 ||
359 		    fs->fs_cstotal.cs_nffree >
360 		    (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
361 			break;
362 		log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
363 			fs->fs_fsmnt);
364 		fs->fs_optim = FS_OPTTIME;
365 		break;
366 	case FS_OPTTIME:
367 		/*
368 		 * At this point we have discovered a file that is trying to
369 		 * grow a small fragment to a larger fragment. To save time,
370 		 * we allocate a full sized block, then free the unused portion.
371 		 * If the file continues to grow, the `ffs_fragextend' call
372 		 * above will be able to grow it in place without further
373 		 * copying. If aberrant programs cause disk fragmentation to
374 		 * grow within 2% of the free reserve, we choose to begin
375 		 * optimizing for space.
376 		 */
377 		request = fs->fs_bsize;
378 		if (fs->fs_cstotal.cs_nffree <
379 		    (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
380 			break;
381 		log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
382 			fs->fs_fsmnt);
383 		fs->fs_optim = FS_OPTSPACE;
384 		break;
385 	default:
386 		printf("dev = %s, optim = %ld, fs = %s\n",
387 		    devtoname(ump->um_dev), (long)fs->fs_optim, fs->fs_fsmnt);
388 		panic("ffs_realloccg: bad optim");
389 		/* NOTREACHED */
390 	}
391 	bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
392 	if (bno > 0) {
393 		bp->b_blkno = fsbtodb(fs, bno);
394 		if (!DOINGSOFTDEP(vp))
395 			ffs_blkfree(ump, fs, ump->um_devvp, bprev, (long)osize,
396 			    ip->i_number, vp->v_type, NULL);
397 		delta = btodb(nsize - osize);
398 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
399 		if (flags & IO_EXT)
400 			ip->i_flag |= IN_CHANGE;
401 		else
402 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
403 		allocbuf(bp, nsize);
404 		bp->b_flags |= B_DONE;
405 		vfs_bio_bzero_buf(bp, osize, nsize - osize);
406 		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
407 			vfs_bio_set_valid(bp, osize, nsize - osize);
408 		*bpp = bp;
409 		return (0);
410 	}
411 #ifdef QUOTA
412 	UFS_UNLOCK(ump);
413 	/*
414 	 * Restore user's disk quota because allocation failed.
415 	 */
416 	(void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
417 	UFS_LOCK(ump);
418 #endif
419 nospace:
420 	/*
421 	 * no space available
422 	 */
423 	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
424 		reclaimed = 1;
425 		UFS_UNLOCK(ump);
426 		if (bp) {
427 			brelse(bp);
428 			bp = NULL;
429 		}
430 		UFS_LOCK(ump);
431 		softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
432 		goto retry;
433 	}
434 	UFS_UNLOCK(ump);
435 	if (bp)
436 		brelse(bp);
437 	if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
438 		ffs_fserr(fs, ip->i_number, "filesystem full");
439 		uprintf("\n%s: write failed, filesystem is full\n",
440 		    fs->fs_fsmnt);
441 	}
442 	return (ENOSPC);
443 }
444 
445 /*
446  * Reallocate a sequence of blocks into a contiguous sequence of blocks.
447  *
448  * The vnode and an array of buffer pointers for a range of sequential
449  * logical blocks to be made contiguous is given. The allocator attempts
450  * to find a range of sequential blocks starting as close as possible
451  * from the end of the allocation for the logical block immediately
452  * preceding the current range. If successful, the physical block numbers
453  * in the buffer pointers and in the inode are changed to reflect the new
454  * allocation. If unsuccessful, the allocation is left unchanged. The
455  * success in doing the reallocation is returned. Note that the error
456  * return is not reflected back to the user. Rather the previous block
457  * allocation will be used.
458  */
459 
460 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
461 
462 static int doasyncfree = 1;
463 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
464 "do not force synchronous writes when blocks are reallocated");
465 
466 static int doreallocblks = 1;
467 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0,
468 "enable block reallocation");
469 
470 static int maxclustersearch = 10;
471 SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch,
472 0, "max number of cylinder group to search for contigous blocks");
473 
474 #ifdef DEBUG
475 static volatile int prtrealloc = 0;
476 #endif
477 
478 int
479 ffs_reallocblks(ap)
480 	struct vop_reallocblks_args /* {
481 		struct vnode *a_vp;
482 		struct cluster_save *a_buflist;
483 	} */ *ap;
484 {
485 	struct ufsmount *ump;
486 
487 	/*
488 	 * If the underlying device can do deletes, then skip reallocating
489 	 * the blocks of this file into contiguous sequences. Devices that
490 	 * benefit from BIO_DELETE also benefit from not moving the data.
491 	 * These devices are flash and therefore work less well with this
492 	 * optimization. Also skip if reallocblks has been disabled globally.
493 	 */
494 	ump = ap->a_vp->v_mount->mnt_data;
495 	if (ump->um_candelete || doreallocblks == 0)
496 		return (ENOSPC);
497 
498 	/*
499 	 * We can't wait in softdep prealloc as it may fsync and recurse
500 	 * here.  Instead we simply fail to reallocate blocks if this
501 	 * rare condition arises.
502 	 */
503 	if (DOINGSOFTDEP(ap->a_vp))
504 		if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
505 			return (ENOSPC);
506 	if (ump->um_fstype == UFS1)
507 		return (ffs_reallocblks_ufs1(ap));
508 	return (ffs_reallocblks_ufs2(ap));
509 }
510 
511 static int
512 ffs_reallocblks_ufs1(ap)
513 	struct vop_reallocblks_args /* {
514 		struct vnode *a_vp;
515 		struct cluster_save *a_buflist;
516 	} */ *ap;
517 {
518 	struct fs *fs;
519 	struct inode *ip;
520 	struct vnode *vp;
521 	struct buf *sbp, *ebp;
522 	ufs1_daddr_t *bap, *sbap, *ebap;
523 	struct cluster_save *buflist;
524 	struct ufsmount *ump;
525 	ufs_lbn_t start_lbn, end_lbn;
526 	ufs1_daddr_t soff, newblk, blkno;
527 	ufs2_daddr_t pref;
528 	struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
529 	int i, cg, len, start_lvl, end_lvl, ssize;
530 
531 	vp = ap->a_vp;
532 	ip = VTOI(vp);
533 	ump = ITOUMP(ip);
534 	fs = ump->um_fs;
535 	/*
536 	 * If we are not tracking block clusters or if we have less than 4%
537 	 * free blocks left, then do not attempt to cluster. Running with
538 	 * less than 5% free block reserve is not recommended and those that
539 	 * choose to do so do not expect to have good file layout.
540 	 */
541 	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
542 		return (ENOSPC);
543 	buflist = ap->a_buflist;
544 	len = buflist->bs_nchildren;
545 	start_lbn = buflist->bs_children[0]->b_lblkno;
546 	end_lbn = start_lbn + len - 1;
547 #ifdef INVARIANTS
548 	for (i = 0; i < len; i++)
549 		if (!ffs_checkblk(ip,
550 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
551 			panic("ffs_reallocblks: unallocated block 1");
552 	for (i = 1; i < len; i++)
553 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
554 			panic("ffs_reallocblks: non-logical cluster");
555 	blkno = buflist->bs_children[0]->b_blkno;
556 	ssize = fsbtodb(fs, fs->fs_frag);
557 	for (i = 1; i < len - 1; i++)
558 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
559 			panic("ffs_reallocblks: non-physical cluster %d", i);
560 #endif
561 	/*
562 	 * If the cluster crosses the boundary for the first indirect
563 	 * block, leave space for the indirect block. Indirect blocks
564 	 * are initially laid out in a position after the last direct
565 	 * block. Block reallocation would usually destroy locality by
566 	 * moving the indirect block out of the way to make room for
567 	 * data blocks if we didn't compensate here. We should also do
568 	 * this for other indirect block boundaries, but it is only
569 	 * important for the first one.
570 	 */
571 	if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
572 		return (ENOSPC);
573 	/*
574 	 * If the latest allocation is in a new cylinder group, assume that
575 	 * the filesystem has decided to move and do not force it back to
576 	 * the previous cylinder group.
577 	 */
578 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
579 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
580 		return (ENOSPC);
581 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
582 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
583 		return (ENOSPC);
584 	/*
585 	 * Get the starting offset and block map for the first block.
586 	 */
587 	if (start_lvl == 0) {
588 		sbap = &ip->i_din1->di_db[0];
589 		soff = start_lbn;
590 	} else {
591 		idp = &start_ap[start_lvl - 1];
592 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
593 			brelse(sbp);
594 			return (ENOSPC);
595 		}
596 		sbap = (ufs1_daddr_t *)sbp->b_data;
597 		soff = idp->in_off;
598 	}
599 	/*
600 	 * If the block range spans two block maps, get the second map.
601 	 */
602 	ebap = NULL;
603 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
604 		ssize = len;
605 	} else {
606 #ifdef INVARIANTS
607 		if (start_lvl > 0 &&
608 		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
609 			panic("ffs_reallocblk: start == end");
610 #endif
611 		ssize = len - (idp->in_off + 1);
612 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
613 			goto fail;
614 		ebap = (ufs1_daddr_t *)ebp->b_data;
615 	}
616 	/*
617 	 * Find the preferred location for the cluster. If we have not
618 	 * previously failed at this endeavor, then follow our standard
619 	 * preference calculation. If we have failed at it, then pick up
620 	 * where we last ended our search.
621 	 */
622 	UFS_LOCK(ump);
623 	if (ip->i_nextclustercg == -1)
624 		pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
625 	else
626 		pref = cgdata(fs, ip->i_nextclustercg);
627 	/*
628 	 * Search the block map looking for an allocation of the desired size.
629 	 * To avoid wasting too much time, we limit the number of cylinder
630 	 * groups that we will search.
631 	 */
632 	cg = dtog(fs, pref);
633 	for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
634 		if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
635 			break;
636 		cg += 1;
637 		if (cg >= fs->fs_ncg)
638 			cg = 0;
639 	}
640 	/*
641 	 * If we have failed in our search, record where we gave up for
642 	 * next time. Otherwise, fall back to our usual search citerion.
643 	 */
644 	if (newblk == 0) {
645 		ip->i_nextclustercg = cg;
646 		UFS_UNLOCK(ump);
647 		goto fail;
648 	}
649 	ip->i_nextclustercg = -1;
650 	/*
651 	 * We have found a new contiguous block.
652 	 *
653 	 * First we have to replace the old block pointers with the new
654 	 * block pointers in the inode and indirect blocks associated
655 	 * with the file.
656 	 */
657 #ifdef DEBUG
658 	if (prtrealloc)
659 		printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
660 		    (uintmax_t)ip->i_number,
661 		    (intmax_t)start_lbn, (intmax_t)end_lbn);
662 #endif
663 	blkno = newblk;
664 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
665 		if (i == ssize) {
666 			bap = ebap;
667 			soff = -i;
668 		}
669 #ifdef INVARIANTS
670 		if (!ffs_checkblk(ip,
671 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
672 			panic("ffs_reallocblks: unallocated block 2");
673 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
674 			panic("ffs_reallocblks: alloc mismatch");
675 #endif
676 #ifdef DEBUG
677 		if (prtrealloc)
678 			printf(" %d,", *bap);
679 #endif
680 		if (DOINGSOFTDEP(vp)) {
681 			if (sbap == &ip->i_din1->di_db[0] && i < ssize)
682 				softdep_setup_allocdirect(ip, start_lbn + i,
683 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
684 				    buflist->bs_children[i]);
685 			else
686 				softdep_setup_allocindir_page(ip, start_lbn + i,
687 				    i < ssize ? sbp : ebp, soff + i, blkno,
688 				    *bap, buflist->bs_children[i]);
689 		}
690 		*bap++ = blkno;
691 	}
692 	/*
693 	 * Next we must write out the modified inode and indirect blocks.
694 	 * For strict correctness, the writes should be synchronous since
695 	 * the old block values may have been written to disk. In practise
696 	 * they are almost never written, but if we are concerned about
697 	 * strict correctness, the `doasyncfree' flag should be set to zero.
698 	 *
699 	 * The test on `doasyncfree' should be changed to test a flag
700 	 * that shows whether the associated buffers and inodes have
701 	 * been written. The flag should be set when the cluster is
702 	 * started and cleared whenever the buffer or inode is flushed.
703 	 * We can then check below to see if it is set, and do the
704 	 * synchronous write only when it has been cleared.
705 	 */
706 	if (sbap != &ip->i_din1->di_db[0]) {
707 		if (doasyncfree)
708 			bdwrite(sbp);
709 		else
710 			bwrite(sbp);
711 	} else {
712 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
713 		if (!doasyncfree)
714 			ffs_update(vp, 1);
715 	}
716 	if (ssize < len) {
717 		if (doasyncfree)
718 			bdwrite(ebp);
719 		else
720 			bwrite(ebp);
721 	}
722 	/*
723 	 * Last, free the old blocks and assign the new blocks to the buffers.
724 	 */
725 #ifdef DEBUG
726 	if (prtrealloc)
727 		printf("\n\tnew:");
728 #endif
729 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
730 		if (!DOINGSOFTDEP(vp))
731 			ffs_blkfree(ump, fs, ump->um_devvp,
732 			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
733 			    fs->fs_bsize, ip->i_number, vp->v_type, NULL);
734 		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
735 #ifdef INVARIANTS
736 		if (!ffs_checkblk(ip,
737 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
738 			panic("ffs_reallocblks: unallocated block 3");
739 #endif
740 #ifdef DEBUG
741 		if (prtrealloc)
742 			printf(" %d,", blkno);
743 #endif
744 	}
745 #ifdef DEBUG
746 	if (prtrealloc) {
747 		prtrealloc--;
748 		printf("\n");
749 	}
750 #endif
751 	return (0);
752 
753 fail:
754 	if (ssize < len)
755 		brelse(ebp);
756 	if (sbap != &ip->i_din1->di_db[0])
757 		brelse(sbp);
758 	return (ENOSPC);
759 }
760 
761 static int
762 ffs_reallocblks_ufs2(ap)
763 	struct vop_reallocblks_args /* {
764 		struct vnode *a_vp;
765 		struct cluster_save *a_buflist;
766 	} */ *ap;
767 {
768 	struct fs *fs;
769 	struct inode *ip;
770 	struct vnode *vp;
771 	struct buf *sbp, *ebp;
772 	ufs2_daddr_t *bap, *sbap, *ebap;
773 	struct cluster_save *buflist;
774 	struct ufsmount *ump;
775 	ufs_lbn_t start_lbn, end_lbn;
776 	ufs2_daddr_t soff, newblk, blkno, pref;
777 	struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
778 	int i, cg, len, start_lvl, end_lvl, ssize;
779 
780 	vp = ap->a_vp;
781 	ip = VTOI(vp);
782 	ump = ITOUMP(ip);
783 	fs = ump->um_fs;
784 	/*
785 	 * If we are not tracking block clusters or if we have less than 4%
786 	 * free blocks left, then do not attempt to cluster. Running with
787 	 * less than 5% free block reserve is not recommended and those that
788 	 * choose to do so do not expect to have good file layout.
789 	 */
790 	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
791 		return (ENOSPC);
792 	buflist = ap->a_buflist;
793 	len = buflist->bs_nchildren;
794 	start_lbn = buflist->bs_children[0]->b_lblkno;
795 	end_lbn = start_lbn + len - 1;
796 #ifdef INVARIANTS
797 	for (i = 0; i < len; i++)
798 		if (!ffs_checkblk(ip,
799 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
800 			panic("ffs_reallocblks: unallocated block 1");
801 	for (i = 1; i < len; i++)
802 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
803 			panic("ffs_reallocblks: non-logical cluster");
804 	blkno = buflist->bs_children[0]->b_blkno;
805 	ssize = fsbtodb(fs, fs->fs_frag);
806 	for (i = 1; i < len - 1; i++)
807 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
808 			panic("ffs_reallocblks: non-physical cluster %d", i);
809 #endif
810 	/*
811 	 * If the cluster crosses the boundary for the first indirect
812 	 * block, do not move anything in it. Indirect blocks are
813 	 * usually initially laid out in a position between the data
814 	 * blocks. Block reallocation would usually destroy locality by
815 	 * moving the indirect block out of the way to make room for
816 	 * data blocks if we didn't compensate here. We should also do
817 	 * this for other indirect block boundaries, but it is only
818 	 * important for the first one.
819 	 */
820 	if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
821 		return (ENOSPC);
822 	/*
823 	 * If the latest allocation is in a new cylinder group, assume that
824 	 * the filesystem has decided to move and do not force it back to
825 	 * the previous cylinder group.
826 	 */
827 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
828 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
829 		return (ENOSPC);
830 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
831 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
832 		return (ENOSPC);
833 	/*
834 	 * Get the starting offset and block map for the first block.
835 	 */
836 	if (start_lvl == 0) {
837 		sbap = &ip->i_din2->di_db[0];
838 		soff = start_lbn;
839 	} else {
840 		idp = &start_ap[start_lvl - 1];
841 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
842 			brelse(sbp);
843 			return (ENOSPC);
844 		}
845 		sbap = (ufs2_daddr_t *)sbp->b_data;
846 		soff = idp->in_off;
847 	}
848 	/*
849 	 * If the block range spans two block maps, get the second map.
850 	 */
851 	ebap = NULL;
852 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
853 		ssize = len;
854 	} else {
855 #ifdef INVARIANTS
856 		if (start_lvl > 0 &&
857 		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
858 			panic("ffs_reallocblk: start == end");
859 #endif
860 		ssize = len - (idp->in_off + 1);
861 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
862 			goto fail;
863 		ebap = (ufs2_daddr_t *)ebp->b_data;
864 	}
865 	/*
866 	 * Find the preferred location for the cluster. If we have not
867 	 * previously failed at this endeavor, then follow our standard
868 	 * preference calculation. If we have failed at it, then pick up
869 	 * where we last ended our search.
870 	 */
871 	UFS_LOCK(ump);
872 	if (ip->i_nextclustercg == -1)
873 		pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
874 	else
875 		pref = cgdata(fs, ip->i_nextclustercg);
876 	/*
877 	 * Search the block map looking for an allocation of the desired size.
878 	 * To avoid wasting too much time, we limit the number of cylinder
879 	 * groups that we will search.
880 	 */
881 	cg = dtog(fs, pref);
882 	for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
883 		if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
884 			break;
885 		cg += 1;
886 		if (cg >= fs->fs_ncg)
887 			cg = 0;
888 	}
889 	/*
890 	 * If we have failed in our search, record where we gave up for
891 	 * next time. Otherwise, fall back to our usual search citerion.
892 	 */
893 	if (newblk == 0) {
894 		ip->i_nextclustercg = cg;
895 		UFS_UNLOCK(ump);
896 		goto fail;
897 	}
898 	ip->i_nextclustercg = -1;
899 	/*
900 	 * We have found a new contiguous block.
901 	 *
902 	 * First we have to replace the old block pointers with the new
903 	 * block pointers in the inode and indirect blocks associated
904 	 * with the file.
905 	 */
906 #ifdef DEBUG
907 	if (prtrealloc)
908 		printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number,
909 		    (intmax_t)start_lbn, (intmax_t)end_lbn);
910 #endif
911 	blkno = newblk;
912 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
913 		if (i == ssize) {
914 			bap = ebap;
915 			soff = -i;
916 		}
917 #ifdef INVARIANTS
918 		if (!ffs_checkblk(ip,
919 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
920 			panic("ffs_reallocblks: unallocated block 2");
921 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
922 			panic("ffs_reallocblks: alloc mismatch");
923 #endif
924 #ifdef DEBUG
925 		if (prtrealloc)
926 			printf(" %jd,", (intmax_t)*bap);
927 #endif
928 		if (DOINGSOFTDEP(vp)) {
929 			if (sbap == &ip->i_din2->di_db[0] && i < ssize)
930 				softdep_setup_allocdirect(ip, start_lbn + i,
931 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
932 				    buflist->bs_children[i]);
933 			else
934 				softdep_setup_allocindir_page(ip, start_lbn + i,
935 				    i < ssize ? sbp : ebp, soff + i, blkno,
936 				    *bap, buflist->bs_children[i]);
937 		}
938 		*bap++ = blkno;
939 	}
940 	/*
941 	 * Next we must write out the modified inode and indirect blocks.
942 	 * For strict correctness, the writes should be synchronous since
943 	 * the old block values may have been written to disk. In practise
944 	 * they are almost never written, but if we are concerned about
945 	 * strict correctness, the `doasyncfree' flag should be set to zero.
946 	 *
947 	 * The test on `doasyncfree' should be changed to test a flag
948 	 * that shows whether the associated buffers and inodes have
949 	 * been written. The flag should be set when the cluster is
950 	 * started and cleared whenever the buffer or inode is flushed.
951 	 * We can then check below to see if it is set, and do the
952 	 * synchronous write only when it has been cleared.
953 	 */
954 	if (sbap != &ip->i_din2->di_db[0]) {
955 		if (doasyncfree)
956 			bdwrite(sbp);
957 		else
958 			bwrite(sbp);
959 	} else {
960 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
961 		if (!doasyncfree)
962 			ffs_update(vp, 1);
963 	}
964 	if (ssize < len) {
965 		if (doasyncfree)
966 			bdwrite(ebp);
967 		else
968 			bwrite(ebp);
969 	}
970 	/*
971 	 * Last, free the old blocks and assign the new blocks to the buffers.
972 	 */
973 #ifdef DEBUG
974 	if (prtrealloc)
975 		printf("\n\tnew:");
976 #endif
977 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
978 		if (!DOINGSOFTDEP(vp))
979 			ffs_blkfree(ump, fs, ump->um_devvp,
980 			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
981 			    fs->fs_bsize, ip->i_number, vp->v_type, NULL);
982 		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
983 #ifdef INVARIANTS
984 		if (!ffs_checkblk(ip,
985 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
986 			panic("ffs_reallocblks: unallocated block 3");
987 #endif
988 #ifdef DEBUG
989 		if (prtrealloc)
990 			printf(" %jd,", (intmax_t)blkno);
991 #endif
992 	}
993 #ifdef DEBUG
994 	if (prtrealloc) {
995 		prtrealloc--;
996 		printf("\n");
997 	}
998 #endif
999 	return (0);
1000 
1001 fail:
1002 	if (ssize < len)
1003 		brelse(ebp);
1004 	if (sbap != &ip->i_din2->di_db[0])
1005 		brelse(sbp);
1006 	return (ENOSPC);
1007 }
1008 
1009 /*
1010  * Allocate an inode in the filesystem.
1011  *
1012  * If allocating a directory, use ffs_dirpref to select the inode.
1013  * If allocating in a directory, the following hierarchy is followed:
1014  *   1) allocate the preferred inode.
1015  *   2) allocate an inode in the same cylinder group.
1016  *   3) quadradically rehash into other cylinder groups, until an
1017  *      available inode is located.
1018  * If no inode preference is given the following hierarchy is used
1019  * to allocate an inode:
1020  *   1) allocate an inode in cylinder group 0.
1021  *   2) quadradically rehash into other cylinder groups, until an
1022  *      available inode is located.
1023  */
1024 int
1025 ffs_valloc(pvp, mode, cred, vpp)
1026 	struct vnode *pvp;
1027 	int mode;
1028 	struct ucred *cred;
1029 	struct vnode **vpp;
1030 {
1031 	struct inode *pip;
1032 	struct fs *fs;
1033 	struct inode *ip;
1034 	struct timespec ts;
1035 	struct ufsmount *ump;
1036 	ino_t ino, ipref;
1037 	u_int cg;
1038 	int error, error1, reclaimed;
1039 	static struct timeval lastfail;
1040 	static int curfail;
1041 
1042 	*vpp = NULL;
1043 	pip = VTOI(pvp);
1044 	ump = ITOUMP(pip);
1045 	fs = ump->um_fs;
1046 
1047 	UFS_LOCK(ump);
1048 	reclaimed = 0;
1049 retry:
1050 	if (fs->fs_cstotal.cs_nifree == 0)
1051 		goto noinodes;
1052 
1053 	if ((mode & IFMT) == IFDIR)
1054 		ipref = ffs_dirpref(pip);
1055 	else
1056 		ipref = pip->i_number;
1057 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
1058 		ipref = 0;
1059 	cg = ino_to_cg(fs, ipref);
1060 	/*
1061 	 * Track number of dirs created one after another
1062 	 * in a same cg without intervening by files.
1063 	 */
1064 	if ((mode & IFMT) == IFDIR) {
1065 		if (fs->fs_contigdirs[cg] < 255)
1066 			fs->fs_contigdirs[cg]++;
1067 	} else {
1068 		if (fs->fs_contigdirs[cg] > 0)
1069 			fs->fs_contigdirs[cg]--;
1070 	}
1071 	ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
1072 					(allocfcn_t *)ffs_nodealloccg);
1073 	if (ino == 0)
1074 		goto noinodes;
1075 	error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
1076 	if (error) {
1077 		error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
1078 		    FFSV_FORCEINSMQ);
1079 		ffs_vfree(pvp, ino, mode);
1080 		if (error1 == 0) {
1081 			ip = VTOI(*vpp);
1082 			if (ip->i_mode)
1083 				goto dup_alloc;
1084 			ip->i_flag |= IN_MODIFIED;
1085 			vput(*vpp);
1086 		}
1087 		return (error);
1088 	}
1089 	ip = VTOI(*vpp);
1090 	if (ip->i_mode) {
1091 dup_alloc:
1092 		printf("mode = 0%o, inum = %ju, fs = %s\n",
1093 		    ip->i_mode, (uintmax_t)ip->i_number, fs->fs_fsmnt);
1094 		panic("ffs_valloc: dup alloc");
1095 	}
1096 	if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) {  /* XXX */
1097 		printf("free inode %s/%lu had %ld blocks\n",
1098 		    fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
1099 		DIP_SET(ip, i_blocks, 0);
1100 	}
1101 	ip->i_flags = 0;
1102 	DIP_SET(ip, i_flags, 0);
1103 	/*
1104 	 * Set up a new generation number for this inode.
1105 	 */
1106 	while (ip->i_gen == 0 || ++ip->i_gen == 0)
1107 		ip->i_gen = arc4random();
1108 	DIP_SET(ip, i_gen, ip->i_gen);
1109 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1110 		vfs_timestamp(&ts);
1111 		ip->i_din2->di_birthtime = ts.tv_sec;
1112 		ip->i_din2->di_birthnsec = ts.tv_nsec;
1113 	}
1114 	ufs_prepare_reclaim(*vpp);
1115 	ip->i_flag = 0;
1116 	(*vpp)->v_vflag = 0;
1117 	(*vpp)->v_type = VNON;
1118 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1119 		(*vpp)->v_op = &ffs_vnodeops2;
1120 		ip->i_flag |= IN_UFS2;
1121 	} else {
1122 		(*vpp)->v_op = &ffs_vnodeops1;
1123 	}
1124 	return (0);
1125 noinodes:
1126 	if (reclaimed == 0) {
1127 		reclaimed = 1;
1128 		softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
1129 		goto retry;
1130 	}
1131 	UFS_UNLOCK(ump);
1132 	if (ppsratecheck(&lastfail, &curfail, 1)) {
1133 		ffs_fserr(fs, pip->i_number, "out of inodes");
1134 		uprintf("\n%s: create/symlink failed, no inodes free\n",
1135 		    fs->fs_fsmnt);
1136 	}
1137 	return (ENOSPC);
1138 }
1139 
1140 /*
1141  * Find a cylinder group to place a directory.
1142  *
1143  * The policy implemented by this algorithm is to allocate a
1144  * directory inode in the same cylinder group as its parent
1145  * directory, but also to reserve space for its files inodes
1146  * and data. Restrict the number of directories which may be
1147  * allocated one after another in the same cylinder group
1148  * without intervening allocation of files.
1149  *
1150  * If we allocate a first level directory then force allocation
1151  * in another cylinder group.
1152  */
1153 static ino_t
1154 ffs_dirpref(pip)
1155 	struct inode *pip;
1156 {
1157 	struct fs *fs;
1158 	int cg, prefcg, dirsize, cgsize;
1159 	u_int avgifree, avgbfree, avgndir, curdirsize;
1160 	u_int minifree, minbfree, maxndir;
1161 	u_int mincg, minndir;
1162 	u_int maxcontigdirs;
1163 
1164 	mtx_assert(UFS_MTX(ITOUMP(pip)), MA_OWNED);
1165 	fs = ITOFS(pip);
1166 
1167 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1168 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1169 	avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1170 
1171 	/*
1172 	 * Force allocation in another cg if creating a first level dir.
1173 	 */
1174 	ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1175 	if (ITOV(pip)->v_vflag & VV_ROOT) {
1176 		prefcg = arc4random() % fs->fs_ncg;
1177 		mincg = prefcg;
1178 		minndir = fs->fs_ipg;
1179 		for (cg = prefcg; cg < fs->fs_ncg; cg++)
1180 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1181 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1182 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1183 				mincg = cg;
1184 				minndir = fs->fs_cs(fs, cg).cs_ndir;
1185 			}
1186 		for (cg = 0; cg < prefcg; cg++)
1187 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1188 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1189 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1190 				mincg = cg;
1191 				minndir = fs->fs_cs(fs, cg).cs_ndir;
1192 			}
1193 		return ((ino_t)(fs->fs_ipg * mincg));
1194 	}
1195 
1196 	/*
1197 	 * Count various limits which used for
1198 	 * optimal allocation of a directory inode.
1199 	 */
1200 	maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1201 	minifree = avgifree - avgifree / 4;
1202 	if (minifree < 1)
1203 		minifree = 1;
1204 	minbfree = avgbfree - avgbfree / 4;
1205 	if (minbfree < 1)
1206 		minbfree = 1;
1207 	cgsize = fs->fs_fsize * fs->fs_fpg;
1208 	dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1209 	curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1210 	if (dirsize < curdirsize)
1211 		dirsize = curdirsize;
1212 	if (dirsize <= 0)
1213 		maxcontigdirs = 0;		/* dirsize overflowed */
1214 	else
1215 		maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1216 	if (fs->fs_avgfpdir > 0)
1217 		maxcontigdirs = min(maxcontigdirs,
1218 				    fs->fs_ipg / fs->fs_avgfpdir);
1219 	if (maxcontigdirs == 0)
1220 		maxcontigdirs = 1;
1221 
1222 	/*
1223 	 * Limit number of dirs in one cg and reserve space for
1224 	 * regular files, but only if we have no deficit in
1225 	 * inodes or space.
1226 	 *
1227 	 * We are trying to find a suitable cylinder group nearby
1228 	 * our preferred cylinder group to place a new directory.
1229 	 * We scan from our preferred cylinder group forward looking
1230 	 * for a cylinder group that meets our criterion. If we get
1231 	 * to the final cylinder group and do not find anything,
1232 	 * we start scanning forwards from the beginning of the
1233 	 * filesystem. While it might seem sensible to start scanning
1234 	 * backwards or even to alternate looking forward and backward,
1235 	 * this approach fails badly when the filesystem is nearly full.
1236 	 * Specifically, we first search all the areas that have no space
1237 	 * and finally try the one preceding that. We repeat this on
1238 	 * every request and in the case of the final block end up
1239 	 * searching the entire filesystem. By jumping to the front
1240 	 * of the filesystem, our future forward searches always look
1241 	 * in new cylinder groups so finds every possible block after
1242 	 * one pass over the filesystem.
1243 	 */
1244 	prefcg = ino_to_cg(fs, pip->i_number);
1245 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1246 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1247 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1248 		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1249 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1250 				return ((ino_t)(fs->fs_ipg * cg));
1251 		}
1252 	for (cg = 0; cg < prefcg; cg++)
1253 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1254 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1255 		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1256 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1257 				return ((ino_t)(fs->fs_ipg * cg));
1258 		}
1259 	/*
1260 	 * This is a backstop when we have deficit in space.
1261 	 */
1262 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1263 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1264 			return ((ino_t)(fs->fs_ipg * cg));
1265 	for (cg = 0; cg < prefcg; cg++)
1266 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1267 			break;
1268 	return ((ino_t)(fs->fs_ipg * cg));
1269 }
1270 
1271 /*
1272  * Select the desired position for the next block in a file.  The file is
1273  * logically divided into sections. The first section is composed of the
1274  * direct blocks and the next fs_maxbpg blocks. Each additional section
1275  * contains fs_maxbpg blocks.
1276  *
1277  * If no blocks have been allocated in the first section, the policy is to
1278  * request a block in the same cylinder group as the inode that describes
1279  * the file. The first indirect is allocated immediately following the last
1280  * direct block and the data blocks for the first indirect immediately
1281  * follow it.
1282  *
1283  * If no blocks have been allocated in any other section, the indirect
1284  * block(s) are allocated in the same cylinder group as its inode in an
1285  * area reserved immediately following the inode blocks. The policy for
1286  * the data blocks is to place them in a cylinder group with a greater than
1287  * average number of free blocks. An appropriate cylinder group is found
1288  * by using a rotor that sweeps the cylinder groups. When a new group of
1289  * blocks is needed, the sweep begins in the cylinder group following the
1290  * cylinder group from which the previous allocation was made. The sweep
1291  * continues until a cylinder group with greater than the average number
1292  * of free blocks is found. If the allocation is for the first block in an
1293  * indirect block or the previous block is a hole, then the information on
1294  * the previous allocation is unavailable; here a best guess is made based
1295  * on the logical block number being allocated.
1296  *
1297  * If a section is already partially allocated, the policy is to
1298  * allocate blocks contiguously within the section if possible.
1299  */
1300 ufs2_daddr_t
1301 ffs_blkpref_ufs1(ip, lbn, indx, bap)
1302 	struct inode *ip;
1303 	ufs_lbn_t lbn;
1304 	int indx;
1305 	ufs1_daddr_t *bap;
1306 {
1307 	struct fs *fs;
1308 	u_int cg, inocg;
1309 	u_int avgbfree, startcg;
1310 	ufs2_daddr_t pref;
1311 
1312 	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1313 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1314 	fs = ITOFS(ip);
1315 	/*
1316 	 * Allocation of indirect blocks is indicated by passing negative
1317 	 * values in indx: -1 for single indirect, -2 for double indirect,
1318 	 * -3 for triple indirect. As noted below, we attempt to allocate
1319 	 * the first indirect inline with the file data. For all later
1320 	 * indirect blocks, the data is often allocated in other cylinder
1321 	 * groups. However to speed random file access and to speed up
1322 	 * fsck, the filesystem reserves the first fs_metaspace blocks
1323 	 * (typically half of fs_minfree) of the data area of each cylinder
1324 	 * group to hold these later indirect blocks.
1325 	 */
1326 	inocg = ino_to_cg(fs, ip->i_number);
1327 	if (indx < 0) {
1328 		/*
1329 		 * Our preference for indirect blocks is the zone at the
1330 		 * beginning of the inode's cylinder group data area that
1331 		 * we try to reserve for indirect blocks.
1332 		 */
1333 		pref = cgmeta(fs, inocg);
1334 		/*
1335 		 * If we are allocating the first indirect block, try to
1336 		 * place it immediately following the last direct block.
1337 		 */
1338 		if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1339 		    ip->i_din1->di_db[UFS_NDADDR - 1] != 0)
1340 			pref = ip->i_din1->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1341 		return (pref);
1342 	}
1343 	/*
1344 	 * If we are allocating the first data block in the first indirect
1345 	 * block and the indirect has been allocated in the data block area,
1346 	 * try to place it immediately following the indirect block.
1347 	 */
1348 	if (lbn == UFS_NDADDR) {
1349 		pref = ip->i_din1->di_ib[0];
1350 		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1351 		    pref < cgbase(fs, inocg + 1))
1352 			return (pref + fs->fs_frag);
1353 	}
1354 	/*
1355 	 * If we are at the beginning of a file, or we have already allocated
1356 	 * the maximum number of blocks per cylinder group, or we do not
1357 	 * have a block allocated immediately preceding us, then we need
1358 	 * to decide where to start allocating new blocks.
1359 	 */
1360 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1361 		/*
1362 		 * If we are allocating a directory data block, we want
1363 		 * to place it in the metadata area.
1364 		 */
1365 		if ((ip->i_mode & IFMT) == IFDIR)
1366 			return (cgmeta(fs, inocg));
1367 		/*
1368 		 * Until we fill all the direct and all the first indirect's
1369 		 * blocks, we try to allocate in the data area of the inode's
1370 		 * cylinder group.
1371 		 */
1372 		if (lbn < UFS_NDADDR + NINDIR(fs))
1373 			return (cgdata(fs, inocg));
1374 		/*
1375 		 * Find a cylinder with greater than average number of
1376 		 * unused data blocks.
1377 		 */
1378 		if (indx == 0 || bap[indx - 1] == 0)
1379 			startcg = inocg + lbn / fs->fs_maxbpg;
1380 		else
1381 			startcg = dtog(fs, bap[indx - 1]) + 1;
1382 		startcg %= fs->fs_ncg;
1383 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1384 		for (cg = startcg; cg < fs->fs_ncg; cg++)
1385 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1386 				fs->fs_cgrotor = cg;
1387 				return (cgdata(fs, cg));
1388 			}
1389 		for (cg = 0; cg <= startcg; cg++)
1390 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1391 				fs->fs_cgrotor = cg;
1392 				return (cgdata(fs, cg));
1393 			}
1394 		return (0);
1395 	}
1396 	/*
1397 	 * Otherwise, we just always try to lay things out contiguously.
1398 	 */
1399 	return (bap[indx - 1] + fs->fs_frag);
1400 }
1401 
1402 /*
1403  * Same as above, but for UFS2
1404  */
1405 ufs2_daddr_t
1406 ffs_blkpref_ufs2(ip, lbn, indx, bap)
1407 	struct inode *ip;
1408 	ufs_lbn_t lbn;
1409 	int indx;
1410 	ufs2_daddr_t *bap;
1411 {
1412 	struct fs *fs;
1413 	u_int cg, inocg;
1414 	u_int avgbfree, startcg;
1415 	ufs2_daddr_t pref;
1416 
1417 	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1418 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1419 	fs = ITOFS(ip);
1420 	/*
1421 	 * Allocation of indirect blocks is indicated by passing negative
1422 	 * values in indx: -1 for single indirect, -2 for double indirect,
1423 	 * -3 for triple indirect. As noted below, we attempt to allocate
1424 	 * the first indirect inline with the file data. For all later
1425 	 * indirect blocks, the data is often allocated in other cylinder
1426 	 * groups. However to speed random file access and to speed up
1427 	 * fsck, the filesystem reserves the first fs_metaspace blocks
1428 	 * (typically half of fs_minfree) of the data area of each cylinder
1429 	 * group to hold these later indirect blocks.
1430 	 */
1431 	inocg = ino_to_cg(fs, ip->i_number);
1432 	if (indx < 0) {
1433 		/*
1434 		 * Our preference for indirect blocks is the zone at the
1435 		 * beginning of the inode's cylinder group data area that
1436 		 * we try to reserve for indirect blocks.
1437 		 */
1438 		pref = cgmeta(fs, inocg);
1439 		/*
1440 		 * If we are allocating the first indirect block, try to
1441 		 * place it immediately following the last direct block.
1442 		 */
1443 		if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1444 		    ip->i_din2->di_db[UFS_NDADDR - 1] != 0)
1445 			pref = ip->i_din2->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1446 		return (pref);
1447 	}
1448 	/*
1449 	 * If we are allocating the first data block in the first indirect
1450 	 * block and the indirect has been allocated in the data block area,
1451 	 * try to place it immediately following the indirect block.
1452 	 */
1453 	if (lbn == UFS_NDADDR) {
1454 		pref = ip->i_din2->di_ib[0];
1455 		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1456 		    pref < cgbase(fs, inocg + 1))
1457 			return (pref + fs->fs_frag);
1458 	}
1459 	/*
1460 	 * If we are at the beginning of a file, or we have already allocated
1461 	 * the maximum number of blocks per cylinder group, or we do not
1462 	 * have a block allocated immediately preceding us, then we need
1463 	 * to decide where to start allocating new blocks.
1464 	 */
1465 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1466 		/*
1467 		 * If we are allocating a directory data block, we want
1468 		 * to place it in the metadata area.
1469 		 */
1470 		if ((ip->i_mode & IFMT) == IFDIR)
1471 			return (cgmeta(fs, inocg));
1472 		/*
1473 		 * Until we fill all the direct and all the first indirect's
1474 		 * blocks, we try to allocate in the data area of the inode's
1475 		 * cylinder group.
1476 		 */
1477 		if (lbn < UFS_NDADDR + NINDIR(fs))
1478 			return (cgdata(fs, inocg));
1479 		/*
1480 		 * Find a cylinder with greater than average number of
1481 		 * unused data blocks.
1482 		 */
1483 		if (indx == 0 || bap[indx - 1] == 0)
1484 			startcg = inocg + lbn / fs->fs_maxbpg;
1485 		else
1486 			startcg = dtog(fs, bap[indx - 1]) + 1;
1487 		startcg %= fs->fs_ncg;
1488 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1489 		for (cg = startcg; cg < fs->fs_ncg; cg++)
1490 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1491 				fs->fs_cgrotor = cg;
1492 				return (cgdata(fs, cg));
1493 			}
1494 		for (cg = 0; cg <= startcg; cg++)
1495 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1496 				fs->fs_cgrotor = cg;
1497 				return (cgdata(fs, cg));
1498 			}
1499 		return (0);
1500 	}
1501 	/*
1502 	 * Otherwise, we just always try to lay things out contiguously.
1503 	 */
1504 	return (bap[indx - 1] + fs->fs_frag);
1505 }
1506 
1507 /*
1508  * Implement the cylinder overflow algorithm.
1509  *
1510  * The policy implemented by this algorithm is:
1511  *   1) allocate the block in its requested cylinder group.
1512  *   2) quadradically rehash on the cylinder group number.
1513  *   3) brute force search for a free block.
1514  *
1515  * Must be called with the UFS lock held.  Will release the lock on success
1516  * and return with it held on failure.
1517  */
1518 /*VARARGS5*/
1519 static ufs2_daddr_t
1520 ffs_hashalloc(ip, cg, pref, size, rsize, allocator)
1521 	struct inode *ip;
1522 	u_int cg;
1523 	ufs2_daddr_t pref;
1524 	int size;	/* Search size for data blocks, mode for inodes */
1525 	int rsize;	/* Real allocated size. */
1526 	allocfcn_t *allocator;
1527 {
1528 	struct fs *fs;
1529 	ufs2_daddr_t result;
1530 	u_int i, icg = cg;
1531 
1532 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1533 #ifdef INVARIANTS
1534 	if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1535 		panic("ffs_hashalloc: allocation on suspended filesystem");
1536 #endif
1537 	fs = ITOFS(ip);
1538 	/*
1539 	 * 1: preferred cylinder group
1540 	 */
1541 	result = (*allocator)(ip, cg, pref, size, rsize);
1542 	if (result)
1543 		return (result);
1544 	/*
1545 	 * 2: quadratic rehash
1546 	 */
1547 	for (i = 1; i < fs->fs_ncg; i *= 2) {
1548 		cg += i;
1549 		if (cg >= fs->fs_ncg)
1550 			cg -= fs->fs_ncg;
1551 		result = (*allocator)(ip, cg, 0, size, rsize);
1552 		if (result)
1553 			return (result);
1554 	}
1555 	/*
1556 	 * 3: brute force search
1557 	 * Note that we start at i == 2, since 0 was checked initially,
1558 	 * and 1 is always checked in the quadratic rehash.
1559 	 */
1560 	cg = (icg + 2) % fs->fs_ncg;
1561 	for (i = 2; i < fs->fs_ncg; i++) {
1562 		result = (*allocator)(ip, cg, 0, size, rsize);
1563 		if (result)
1564 			return (result);
1565 		cg++;
1566 		if (cg == fs->fs_ncg)
1567 			cg = 0;
1568 	}
1569 	return (0);
1570 }
1571 
1572 /*
1573  * Determine whether a fragment can be extended.
1574  *
1575  * Check to see if the necessary fragments are available, and
1576  * if they are, allocate them.
1577  */
1578 static ufs2_daddr_t
1579 ffs_fragextend(ip, cg, bprev, osize, nsize)
1580 	struct inode *ip;
1581 	u_int cg;
1582 	ufs2_daddr_t bprev;
1583 	int osize, nsize;
1584 {
1585 	struct fs *fs;
1586 	struct cg *cgp;
1587 	struct buf *bp;
1588 	struct ufsmount *ump;
1589 	int nffree;
1590 	long bno;
1591 	int frags, bbase;
1592 	int i, error;
1593 	u_int8_t *blksfree;
1594 
1595 	ump = ITOUMP(ip);
1596 	fs = ump->um_fs;
1597 	if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1598 		return (0);
1599 	frags = numfrags(fs, nsize);
1600 	bbase = fragnum(fs, bprev);
1601 	if (bbase > fragnum(fs, (bprev + frags - 1))) {
1602 		/* cannot extend across a block boundary */
1603 		return (0);
1604 	}
1605 	UFS_UNLOCK(ump);
1606 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0)
1607 		goto fail;
1608 	bno = dtogd(fs, bprev);
1609 	blksfree = cg_blksfree(cgp);
1610 	for (i = numfrags(fs, osize); i < frags; i++)
1611 		if (isclr(blksfree, bno + i))
1612 			goto fail;
1613 	/*
1614 	 * the current fragment can be extended
1615 	 * deduct the count on fragment being extended into
1616 	 * increase the count on the remaining fragment (if any)
1617 	 * allocate the extended piece
1618 	 */
1619 	for (i = frags; i < fs->fs_frag - bbase; i++)
1620 		if (isclr(blksfree, bno + i))
1621 			break;
1622 	cgp->cg_frsum[i - numfrags(fs, osize)]--;
1623 	if (i != frags)
1624 		cgp->cg_frsum[i - frags]++;
1625 	for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1626 		clrbit(blksfree, bno + i);
1627 		cgp->cg_cs.cs_nffree--;
1628 		nffree++;
1629 	}
1630 	UFS_LOCK(ump);
1631 	fs->fs_cstotal.cs_nffree -= nffree;
1632 	fs->fs_cs(fs, cg).cs_nffree -= nffree;
1633 	fs->fs_fmod = 1;
1634 	ACTIVECLEAR(fs, cg);
1635 	UFS_UNLOCK(ump);
1636 	if (DOINGSOFTDEP(ITOV(ip)))
1637 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
1638 		    frags, numfrags(fs, osize));
1639 	bdwrite(bp);
1640 	return (bprev);
1641 
1642 fail:
1643 	brelse(bp);
1644 	UFS_LOCK(ump);
1645 	return (0);
1646 
1647 }
1648 
1649 /*
1650  * Determine whether a block can be allocated.
1651  *
1652  * Check to see if a block of the appropriate size is available,
1653  * and if it is, allocate it.
1654  */
1655 static ufs2_daddr_t
1656 ffs_alloccg(ip, cg, bpref, size, rsize)
1657 	struct inode *ip;
1658 	u_int cg;
1659 	ufs2_daddr_t bpref;
1660 	int size;
1661 	int rsize;
1662 {
1663 	struct fs *fs;
1664 	struct cg *cgp;
1665 	struct buf *bp;
1666 	struct ufsmount *ump;
1667 	ufs1_daddr_t bno;
1668 	ufs2_daddr_t blkno;
1669 	int i, allocsiz, error, frags;
1670 	u_int8_t *blksfree;
1671 
1672 	ump = ITOUMP(ip);
1673 	fs = ump->um_fs;
1674 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1675 		return (0);
1676 	UFS_UNLOCK(ump);
1677 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0 ||
1678 	   (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1679 		goto fail;
1680 	if (size == fs->fs_bsize) {
1681 		UFS_LOCK(ump);
1682 		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1683 		ACTIVECLEAR(fs, cg);
1684 		UFS_UNLOCK(ump);
1685 		bdwrite(bp);
1686 		return (blkno);
1687 	}
1688 	/*
1689 	 * check to see if any fragments are already available
1690 	 * allocsiz is the size which will be allocated, hacking
1691 	 * it down to a smaller size if necessary
1692 	 */
1693 	blksfree = cg_blksfree(cgp);
1694 	frags = numfrags(fs, size);
1695 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1696 		if (cgp->cg_frsum[allocsiz] != 0)
1697 			break;
1698 	if (allocsiz == fs->fs_frag) {
1699 		/*
1700 		 * no fragments were available, so a block will be
1701 		 * allocated, and hacked up
1702 		 */
1703 		if (cgp->cg_cs.cs_nbfree == 0)
1704 			goto fail;
1705 		UFS_LOCK(ump);
1706 		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1707 		ACTIVECLEAR(fs, cg);
1708 		UFS_UNLOCK(ump);
1709 		bdwrite(bp);
1710 		return (blkno);
1711 	}
1712 	KASSERT(size == rsize,
1713 	    ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
1714 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1715 	if (bno < 0)
1716 		goto fail;
1717 	for (i = 0; i < frags; i++)
1718 		clrbit(blksfree, bno + i);
1719 	cgp->cg_cs.cs_nffree -= frags;
1720 	cgp->cg_frsum[allocsiz]--;
1721 	if (frags != allocsiz)
1722 		cgp->cg_frsum[allocsiz - frags]++;
1723 	UFS_LOCK(ump);
1724 	fs->fs_cstotal.cs_nffree -= frags;
1725 	fs->fs_cs(fs, cg).cs_nffree -= frags;
1726 	fs->fs_fmod = 1;
1727 	blkno = cgbase(fs, cg) + bno;
1728 	ACTIVECLEAR(fs, cg);
1729 	UFS_UNLOCK(ump);
1730 	if (DOINGSOFTDEP(ITOV(ip)))
1731 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
1732 	bdwrite(bp);
1733 	return (blkno);
1734 
1735 fail:
1736 	brelse(bp);
1737 	UFS_LOCK(ump);
1738 	return (0);
1739 }
1740 
1741 /*
1742  * Allocate a block in a cylinder group.
1743  *
1744  * This algorithm implements the following policy:
1745  *   1) allocate the requested block.
1746  *   2) allocate a rotationally optimal block in the same cylinder.
1747  *   3) allocate the next available block on the block rotor for the
1748  *      specified cylinder group.
1749  * Note that this routine only allocates fs_bsize blocks; these
1750  * blocks may be fragmented by the routine that allocates them.
1751  */
1752 static ufs2_daddr_t
1753 ffs_alloccgblk(ip, bp, bpref, size)
1754 	struct inode *ip;
1755 	struct buf *bp;
1756 	ufs2_daddr_t bpref;
1757 	int size;
1758 {
1759 	struct fs *fs;
1760 	struct cg *cgp;
1761 	struct ufsmount *ump;
1762 	ufs1_daddr_t bno;
1763 	ufs2_daddr_t blkno;
1764 	u_int8_t *blksfree;
1765 	int i, cgbpref;
1766 
1767 	ump = ITOUMP(ip);
1768 	fs = ump->um_fs;
1769 	mtx_assert(UFS_MTX(ump), MA_OWNED);
1770 	cgp = (struct cg *)bp->b_data;
1771 	blksfree = cg_blksfree(cgp);
1772 	if (bpref == 0) {
1773 		bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
1774 	} else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1775 		/* map bpref to correct zone in this cg */
1776 		if (bpref < cgdata(fs, cgbpref))
1777 			bpref = cgmeta(fs, cgp->cg_cgx);
1778 		else
1779 			bpref = cgdata(fs, cgp->cg_cgx);
1780 	}
1781 	/*
1782 	 * if the requested block is available, use it
1783 	 */
1784 	bno = dtogd(fs, blknum(fs, bpref));
1785 	if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1786 		goto gotit;
1787 	/*
1788 	 * Take the next available block in this cylinder group.
1789 	 */
1790 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1791 	if (bno < 0)
1792 		return (0);
1793 	/* Update cg_rotor only if allocated from the data zone */
1794 	if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1795 		cgp->cg_rotor = bno;
1796 gotit:
1797 	blkno = fragstoblks(fs, bno);
1798 	ffs_clrblock(fs, blksfree, (long)blkno);
1799 	ffs_clusteracct(fs, cgp, blkno, -1);
1800 	cgp->cg_cs.cs_nbfree--;
1801 	fs->fs_cstotal.cs_nbfree--;
1802 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1803 	fs->fs_fmod = 1;
1804 	blkno = cgbase(fs, cgp->cg_cgx) + bno;
1805 	/*
1806 	 * If the caller didn't want the whole block free the frags here.
1807 	 */
1808 	size = numfrags(fs, size);
1809 	if (size != fs->fs_frag) {
1810 		bno = dtogd(fs, blkno);
1811 		for (i = size; i < fs->fs_frag; i++)
1812 			setbit(blksfree, bno + i);
1813 		i = fs->fs_frag - size;
1814 		cgp->cg_cs.cs_nffree += i;
1815 		fs->fs_cstotal.cs_nffree += i;
1816 		fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
1817 		fs->fs_fmod = 1;
1818 		cgp->cg_frsum[i]++;
1819 	}
1820 	/* XXX Fixme. */
1821 	UFS_UNLOCK(ump);
1822 	if (DOINGSOFTDEP(ITOV(ip)))
1823 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno,
1824 		    size, 0);
1825 	UFS_LOCK(ump);
1826 	return (blkno);
1827 }
1828 
1829 /*
1830  * Determine whether a cluster can be allocated.
1831  *
1832  * We do not currently check for optimal rotational layout if there
1833  * are multiple choices in the same cylinder group. Instead we just
1834  * take the first one that we find following bpref.
1835  */
1836 static ufs2_daddr_t
1837 ffs_clusteralloc(ip, cg, bpref, len)
1838 	struct inode *ip;
1839 	u_int cg;
1840 	ufs2_daddr_t bpref;
1841 	int len;
1842 {
1843 	struct fs *fs;
1844 	struct cg *cgp;
1845 	struct buf *bp;
1846 	struct ufsmount *ump;
1847 	int i, run, bit, map, got, error;
1848 	ufs2_daddr_t bno;
1849 	u_char *mapp;
1850 	int32_t *lp;
1851 	u_int8_t *blksfree;
1852 
1853 	ump = ITOUMP(ip);
1854 	fs = ump->um_fs;
1855 	if (fs->fs_maxcluster[cg] < len)
1856 		return (0);
1857 	UFS_UNLOCK(ump);
1858 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) {
1859 		UFS_LOCK(ump);
1860 		return (0);
1861 	}
1862 	/*
1863 	 * Check to see if a cluster of the needed size (or bigger) is
1864 	 * available in this cylinder group.
1865 	 */
1866 	lp = &cg_clustersum(cgp)[len];
1867 	for (i = len; i <= fs->fs_contigsumsize; i++)
1868 		if (*lp++ > 0)
1869 			break;
1870 	if (i > fs->fs_contigsumsize) {
1871 		/*
1872 		 * This is the first time looking for a cluster in this
1873 		 * cylinder group. Update the cluster summary information
1874 		 * to reflect the true maximum sized cluster so that
1875 		 * future cluster allocation requests can avoid reading
1876 		 * the cylinder group map only to find no clusters.
1877 		 */
1878 		lp = &cg_clustersum(cgp)[len - 1];
1879 		for (i = len - 1; i > 0; i--)
1880 			if (*lp-- > 0)
1881 				break;
1882 		UFS_LOCK(ump);
1883 		fs->fs_maxcluster[cg] = i;
1884 		brelse(bp);
1885 		return (0);
1886 	}
1887 	/*
1888 	 * Search the cluster map to find a big enough cluster.
1889 	 * We take the first one that we find, even if it is larger
1890 	 * than we need as we prefer to get one close to the previous
1891 	 * block allocation. We do not search before the current
1892 	 * preference point as we do not want to allocate a block
1893 	 * that is allocated before the previous one (as we will
1894 	 * then have to wait for another pass of the elevator
1895 	 * algorithm before it will be read). We prefer to fail and
1896 	 * be recalled to try an allocation in the next cylinder group.
1897 	 */
1898 	if (dtog(fs, bpref) != cg)
1899 		bpref = cgdata(fs, cg);
1900 	else
1901 		bpref = blknum(fs, bpref);
1902 	bpref = fragstoblks(fs, dtogd(fs, bpref));
1903 	mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1904 	map = *mapp++;
1905 	bit = 1 << (bpref % NBBY);
1906 	for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1907 		if ((map & bit) == 0) {
1908 			run = 0;
1909 		} else {
1910 			run++;
1911 			if (run == len)
1912 				break;
1913 		}
1914 		if ((got & (NBBY - 1)) != (NBBY - 1)) {
1915 			bit <<= 1;
1916 		} else {
1917 			map = *mapp++;
1918 			bit = 1;
1919 		}
1920 	}
1921 	if (got >= cgp->cg_nclusterblks) {
1922 		UFS_LOCK(ump);
1923 		brelse(bp);
1924 		return (0);
1925 	}
1926 	/*
1927 	 * Allocate the cluster that we have found.
1928 	 */
1929 	blksfree = cg_blksfree(cgp);
1930 	for (i = 1; i <= len; i++)
1931 		if (!ffs_isblock(fs, blksfree, got - run + i))
1932 			panic("ffs_clusteralloc: map mismatch");
1933 	bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
1934 	if (dtog(fs, bno) != cg)
1935 		panic("ffs_clusteralloc: allocated out of group");
1936 	len = blkstofrags(fs, len);
1937 	UFS_LOCK(ump);
1938 	for (i = 0; i < len; i += fs->fs_frag)
1939 		if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
1940 			panic("ffs_clusteralloc: lost block");
1941 	ACTIVECLEAR(fs, cg);
1942 	UFS_UNLOCK(ump);
1943 	bdwrite(bp);
1944 	return (bno);
1945 }
1946 
1947 static inline struct buf *
1948 getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags)
1949 {
1950 	struct fs *fs;
1951 
1952 	fs = ITOFS(ip);
1953 	return (getblk(ITODEVVP(ip), fsbtodb(fs, ino_to_fsba(fs,
1954 	    cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
1955 	    gbflags));
1956 }
1957 
1958 /*
1959  * Determine whether an inode can be allocated.
1960  *
1961  * Check to see if an inode is available, and if it is,
1962  * allocate it using the following policy:
1963  *   1) allocate the requested inode.
1964  *   2) allocate the next available inode after the requested
1965  *      inode in the specified cylinder group.
1966  */
1967 static ufs2_daddr_t
1968 ffs_nodealloccg(ip, cg, ipref, mode, unused)
1969 	struct inode *ip;
1970 	u_int cg;
1971 	ufs2_daddr_t ipref;
1972 	int mode;
1973 	int unused;
1974 {
1975 	struct fs *fs;
1976 	struct cg *cgp;
1977 	struct buf *bp, *ibp;
1978 	struct ufsmount *ump;
1979 	u_int8_t *inosused, *loc;
1980 	struct ufs2_dinode *dp2;
1981 	int error, start, len, i;
1982 	u_int32_t old_initediblk;
1983 
1984 	ump = ITOUMP(ip);
1985 	fs = ump->um_fs;
1986 check_nifree:
1987 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
1988 		return (0);
1989 	UFS_UNLOCK(ump);
1990 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) {
1991 		UFS_LOCK(ump);
1992 		return (0);
1993 	}
1994 restart:
1995 	if (cgp->cg_cs.cs_nifree == 0) {
1996 		brelse(bp);
1997 		UFS_LOCK(ump);
1998 		return (0);
1999 	}
2000 	inosused = cg_inosused(cgp);
2001 	if (ipref) {
2002 		ipref %= fs->fs_ipg;
2003 		if (isclr(inosused, ipref))
2004 			goto gotit;
2005 	}
2006 	start = cgp->cg_irotor / NBBY;
2007 	len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
2008 	loc = memcchr(&inosused[start], 0xff, len);
2009 	if (loc == NULL) {
2010 		len = start + 1;
2011 		start = 0;
2012 		loc = memcchr(&inosused[start], 0xff, len);
2013 		if (loc == NULL) {
2014 			printf("cg = %d, irotor = %ld, fs = %s\n",
2015 			    cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
2016 			panic("ffs_nodealloccg: map corrupted");
2017 			/* NOTREACHED */
2018 		}
2019 	}
2020 	ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1;
2021 gotit:
2022 	/*
2023 	 * Check to see if we need to initialize more inodes.
2024 	 */
2025 	if (fs->fs_magic == FS_UFS2_MAGIC &&
2026 	    ipref + INOPB(fs) > cgp->cg_initediblk &&
2027 	    cgp->cg_initediblk < cgp->cg_niblk) {
2028 		old_initediblk = cgp->cg_initediblk;
2029 
2030 		/*
2031 		 * Free the cylinder group lock before writing the
2032 		 * initialized inode block.  Entering the
2033 		 * babarrierwrite() with the cylinder group lock
2034 		 * causes lock order violation between the lock and
2035 		 * snaplk.
2036 		 *
2037 		 * Another thread can decide to initialize the same
2038 		 * inode block, but whichever thread first gets the
2039 		 * cylinder group lock after writing the newly
2040 		 * allocated inode block will update it and the other
2041 		 * will realize that it has lost and leave the
2042 		 * cylinder group unchanged.
2043 		 */
2044 		ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
2045 		brelse(bp);
2046 		if (ibp == NULL) {
2047 			/*
2048 			 * The inode block buffer is already owned by
2049 			 * another thread, which must initialize it.
2050 			 * Wait on the buffer to allow another thread
2051 			 * to finish the updates, with dropped cg
2052 			 * buffer lock, then retry.
2053 			 */
2054 			ibp = getinobuf(ip, cg, old_initediblk, 0);
2055 			brelse(ibp);
2056 			UFS_LOCK(ump);
2057 			goto check_nifree;
2058 		}
2059 		bzero(ibp->b_data, (int)fs->fs_bsize);
2060 		dp2 = (struct ufs2_dinode *)(ibp->b_data);
2061 		for (i = 0; i < INOPB(fs); i++) {
2062 			while (dp2->di_gen == 0)
2063 				dp2->di_gen = arc4random();
2064 			dp2++;
2065 		}
2066 		/*
2067 		 * Rather than adding a soft updates dependency to ensure
2068 		 * that the new inode block is written before it is claimed
2069 		 * by the cylinder group map, we just do a barrier write
2070 		 * here. The barrier write will ensure that the inode block
2071 		 * gets written before the updated cylinder group map can be
2072 		 * written. The barrier write should only slow down bulk
2073 		 * loading of newly created filesystems.
2074 		 */
2075 		babarrierwrite(ibp);
2076 
2077 		/*
2078 		 * After the inode block is written, try to update the
2079 		 * cg initediblk pointer.  If another thread beat us
2080 		 * to it, then leave it unchanged as the other thread
2081 		 * has already set it correctly.
2082 		 */
2083 		error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp);
2084 		UFS_LOCK(ump);
2085 		ACTIVECLEAR(fs, cg);
2086 		UFS_UNLOCK(ump);
2087 		if (error != 0)
2088 			return (error);
2089 		if (cgp->cg_initediblk == old_initediblk)
2090 			cgp->cg_initediblk += INOPB(fs);
2091 		goto restart;
2092 	}
2093 	cgp->cg_irotor = ipref;
2094 	UFS_LOCK(ump);
2095 	ACTIVECLEAR(fs, cg);
2096 	setbit(inosused, ipref);
2097 	cgp->cg_cs.cs_nifree--;
2098 	fs->fs_cstotal.cs_nifree--;
2099 	fs->fs_cs(fs, cg).cs_nifree--;
2100 	fs->fs_fmod = 1;
2101 	if ((mode & IFMT) == IFDIR) {
2102 		cgp->cg_cs.cs_ndir++;
2103 		fs->fs_cstotal.cs_ndir++;
2104 		fs->fs_cs(fs, cg).cs_ndir++;
2105 	}
2106 	UFS_UNLOCK(ump);
2107 	if (DOINGSOFTDEP(ITOV(ip)))
2108 		softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
2109 	bdwrite(bp);
2110 	return ((ino_t)(cg * fs->fs_ipg + ipref));
2111 }
2112 
2113 /*
2114  * Free a block or fragment.
2115  *
2116  * The specified block or fragment is placed back in the
2117  * free map. If a fragment is deallocated, a possible
2118  * block reassembly is checked.
2119  */
2120 static void
2121 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd)
2122 	struct ufsmount *ump;
2123 	struct fs *fs;
2124 	struct vnode *devvp;
2125 	ufs2_daddr_t bno;
2126 	long size;
2127 	ino_t inum;
2128 	struct workhead *dephd;
2129 {
2130 	struct mount *mp;
2131 	struct cg *cgp;
2132 	struct buf *bp;
2133 	ufs1_daddr_t fragno, cgbno;
2134 	int i, blk, frags, bbase, error;
2135 	u_int cg;
2136 	u_int8_t *blksfree;
2137 	struct cdev *dev;
2138 
2139 	cg = dtog(fs, bno);
2140 	if (devvp->v_type == VREG) {
2141 		/* devvp is a snapshot */
2142 		MPASS(devvp->v_mount->mnt_data == ump);
2143 		dev = ump->um_devvp->v_rdev;
2144 	} else if (devvp->v_type == VCHR) {
2145 		/* devvp is a normal disk device */
2146 		dev = devvp->v_rdev;
2147 		ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg");
2148 	} else
2149 		return;
2150 #ifdef INVARIANTS
2151 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
2152 	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
2153 		printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
2154 		    devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
2155 		    size, fs->fs_fsmnt);
2156 		panic("ffs_blkfree_cg: bad size");
2157 	}
2158 #endif
2159 	if ((u_int)bno >= fs->fs_size) {
2160 		printf("bad block %jd, ino %lu\n", (intmax_t)bno,
2161 		    (u_long)inum);
2162 		ffs_fserr(fs, inum, "bad block");
2163 		return;
2164 	}
2165 	if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2166 		return;
2167 	cgbno = dtogd(fs, bno);
2168 	blksfree = cg_blksfree(cgp);
2169 	UFS_LOCK(ump);
2170 	if (size == fs->fs_bsize) {
2171 		fragno = fragstoblks(fs, cgbno);
2172 		if (!ffs_isfreeblock(fs, blksfree, fragno)) {
2173 			if (devvp->v_type == VREG) {
2174 				UFS_UNLOCK(ump);
2175 				/* devvp is a snapshot */
2176 				brelse(bp);
2177 				return;
2178 			}
2179 			printf("dev = %s, block = %jd, fs = %s\n",
2180 			    devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
2181 			panic("ffs_blkfree_cg: freeing free block");
2182 		}
2183 		ffs_setblock(fs, blksfree, fragno);
2184 		ffs_clusteracct(fs, cgp, fragno, 1);
2185 		cgp->cg_cs.cs_nbfree++;
2186 		fs->fs_cstotal.cs_nbfree++;
2187 		fs->fs_cs(fs, cg).cs_nbfree++;
2188 	} else {
2189 		bbase = cgbno - fragnum(fs, cgbno);
2190 		/*
2191 		 * decrement the counts associated with the old frags
2192 		 */
2193 		blk = blkmap(fs, blksfree, bbase);
2194 		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
2195 		/*
2196 		 * deallocate the fragment
2197 		 */
2198 		frags = numfrags(fs, size);
2199 		for (i = 0; i < frags; i++) {
2200 			if (isset(blksfree, cgbno + i)) {
2201 				printf("dev = %s, block = %jd, fs = %s\n",
2202 				    devtoname(dev), (intmax_t)(bno + i),
2203 				    fs->fs_fsmnt);
2204 				panic("ffs_blkfree_cg: freeing free frag");
2205 			}
2206 			setbit(blksfree, cgbno + i);
2207 		}
2208 		cgp->cg_cs.cs_nffree += i;
2209 		fs->fs_cstotal.cs_nffree += i;
2210 		fs->fs_cs(fs, cg).cs_nffree += i;
2211 		/*
2212 		 * add back in counts associated with the new frags
2213 		 */
2214 		blk = blkmap(fs, blksfree, bbase);
2215 		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
2216 		/*
2217 		 * if a complete block has been reassembled, account for it
2218 		 */
2219 		fragno = fragstoblks(fs, bbase);
2220 		if (ffs_isblock(fs, blksfree, fragno)) {
2221 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
2222 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
2223 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
2224 			ffs_clusteracct(fs, cgp, fragno, 1);
2225 			cgp->cg_cs.cs_nbfree++;
2226 			fs->fs_cstotal.cs_nbfree++;
2227 			fs->fs_cs(fs, cg).cs_nbfree++;
2228 		}
2229 	}
2230 	fs->fs_fmod = 1;
2231 	ACTIVECLEAR(fs, cg);
2232 	UFS_UNLOCK(ump);
2233 	mp = UFSTOVFS(ump);
2234 	if (MOUNTEDSOFTDEP(mp) && devvp->v_type == VCHR)
2235 		softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2236 		    numfrags(fs, size), dephd);
2237 	bdwrite(bp);
2238 }
2239 
2240 struct ffs_blkfree_trim_params {
2241 	struct task task;
2242 	struct ufsmount *ump;
2243 	struct vnode *devvp;
2244 	ufs2_daddr_t bno;
2245 	long size;
2246 	ino_t inum;
2247 	struct workhead *pdephd;
2248 	struct workhead dephd;
2249 };
2250 
2251 static void
2252 ffs_blkfree_trim_task(ctx, pending)
2253 	void *ctx;
2254 	int pending;
2255 {
2256 	struct ffs_blkfree_trim_params *tp;
2257 
2258 	tp = ctx;
2259 	ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size,
2260 	    tp->inum, tp->pdephd);
2261 	vn_finished_secondary_write(UFSTOVFS(tp->ump));
2262 	atomic_add_int(&tp->ump->um_trim_inflight, -1);
2263 	free(tp, M_TEMP);
2264 }
2265 
2266 static void
2267 ffs_blkfree_trim_completed(bip)
2268 	struct bio *bip;
2269 {
2270 	struct ffs_blkfree_trim_params *tp;
2271 
2272 	tp = bip->bio_caller2;
2273 	g_destroy_bio(bip);
2274 	TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
2275 	taskqueue_enqueue(tp->ump->um_trim_tq, &tp->task);
2276 }
2277 
2278 void
2279 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd)
2280 	struct ufsmount *ump;
2281 	struct fs *fs;
2282 	struct vnode *devvp;
2283 	ufs2_daddr_t bno;
2284 	long size;
2285 	ino_t inum;
2286 	enum vtype vtype;
2287 	struct workhead *dephd;
2288 {
2289 	struct mount *mp;
2290 	struct bio *bip;
2291 	struct ffs_blkfree_trim_params *tp;
2292 
2293 	/*
2294 	 * Check to see if a snapshot wants to claim the block.
2295 	 * Check that devvp is a normal disk device, not a snapshot,
2296 	 * it has a snapshot(s) associated with it, and one of the
2297 	 * snapshots wants to claim the block.
2298 	 */
2299 	if (devvp->v_type == VCHR &&
2300 	    (devvp->v_vflag & VV_COPYONWRITE) &&
2301 	    ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
2302 		return;
2303 	}
2304 	/*
2305 	 * Nothing to delay if TRIM is disabled, or the operation is
2306 	 * performed on the snapshot.
2307 	 */
2308 	if (!ump->um_candelete || devvp->v_type == VREG) {
2309 		ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
2310 		return;
2311 	}
2312 
2313 	/*
2314 	 * Postpone the set of the free bit in the cg bitmap until the
2315 	 * BIO_DELETE is completed.  Otherwise, due to disk queue
2316 	 * reordering, TRIM might be issued after we reuse the block
2317 	 * and write some new data into it.
2318 	 */
2319 	atomic_add_int(&ump->um_trim_inflight, 1);
2320 	tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK);
2321 	tp->ump = ump;
2322 	tp->devvp = devvp;
2323 	tp->bno = bno;
2324 	tp->size = size;
2325 	tp->inum = inum;
2326 	if (dephd != NULL) {
2327 		LIST_INIT(&tp->dephd);
2328 		LIST_SWAP(dephd, &tp->dephd, worklist, wk_list);
2329 		tp->pdephd = &tp->dephd;
2330 	} else
2331 		tp->pdephd = NULL;
2332 
2333 	bip = g_alloc_bio();
2334 	bip->bio_cmd = BIO_DELETE;
2335 	bip->bio_offset = dbtob(fsbtodb(fs, bno));
2336 	bip->bio_done = ffs_blkfree_trim_completed;
2337 	bip->bio_length = size;
2338 	bip->bio_caller2 = tp;
2339 
2340 	mp = UFSTOVFS(ump);
2341 	vn_start_secondary_write(NULL, &mp, 0);
2342 	g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private);
2343 }
2344 
2345 #ifdef INVARIANTS
2346 /*
2347  * Verify allocation of a block or fragment. Returns true if block or
2348  * fragment is allocated, false if it is free.
2349  */
2350 static int
2351 ffs_checkblk(ip, bno, size)
2352 	struct inode *ip;
2353 	ufs2_daddr_t bno;
2354 	long size;
2355 {
2356 	struct fs *fs;
2357 	struct cg *cgp;
2358 	struct buf *bp;
2359 	ufs1_daddr_t cgbno;
2360 	int i, error, frags, free;
2361 	u_int8_t *blksfree;
2362 
2363 	fs = ITOFS(ip);
2364 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
2365 		printf("bsize = %ld, size = %ld, fs = %s\n",
2366 		    (long)fs->fs_bsize, size, fs->fs_fsmnt);
2367 		panic("ffs_checkblk: bad size");
2368 	}
2369 	if ((u_int)bno >= fs->fs_size)
2370 		panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
2371 	error = ffs_getcg(fs, ITODEVVP(ip), dtog(fs, bno), &bp, &cgp);
2372 	if (error)
2373 		panic("ffs_checkblk: cylinder group read failed");
2374 	blksfree = cg_blksfree(cgp);
2375 	cgbno = dtogd(fs, bno);
2376 	if (size == fs->fs_bsize) {
2377 		free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2378 	} else {
2379 		frags = numfrags(fs, size);
2380 		for (free = 0, i = 0; i < frags; i++)
2381 			if (isset(blksfree, cgbno + i))
2382 				free++;
2383 		if (free != 0 && free != frags)
2384 			panic("ffs_checkblk: partially free fragment");
2385 	}
2386 	brelse(bp);
2387 	return (!free);
2388 }
2389 #endif /* INVARIANTS */
2390 
2391 /*
2392  * Free an inode.
2393  */
2394 int
2395 ffs_vfree(pvp, ino, mode)
2396 	struct vnode *pvp;
2397 	ino_t ino;
2398 	int mode;
2399 {
2400 	struct ufsmount *ump;
2401 	struct inode *ip;
2402 
2403 	if (DOINGSOFTDEP(pvp)) {
2404 		softdep_freefile(pvp, ino, mode);
2405 		return (0);
2406 	}
2407 	ip = VTOI(pvp);
2408 	ump = VFSTOUFS(pvp->v_mount);
2409 	return (ffs_freefile(ump, ump->um_fs, ump->um_devvp, ino, mode, NULL));
2410 }
2411 
2412 /*
2413  * Do the actual free operation.
2414  * The specified inode is placed back in the free map.
2415  */
2416 int
2417 ffs_freefile(ump, fs, devvp, ino, mode, wkhd)
2418 	struct ufsmount *ump;
2419 	struct fs *fs;
2420 	struct vnode *devvp;
2421 	ino_t ino;
2422 	int mode;
2423 	struct workhead *wkhd;
2424 {
2425 	struct cg *cgp;
2426 	struct buf *bp;
2427 	ufs2_daddr_t cgbno;
2428 	int error;
2429 	u_int cg;
2430 	u_int8_t *inosused;
2431 	struct cdev *dev;
2432 
2433 	cg = ino_to_cg(fs, ino);
2434 	if (devvp->v_type == VREG) {
2435 		/* devvp is a snapshot */
2436 		MPASS(devvp->v_mount->mnt_data == ump);
2437 		dev = ump->um_devvp->v_rdev;
2438 		cgbno = fragstoblks(fs, cgtod(fs, cg));
2439 	} else if (devvp->v_type == VCHR) {
2440 		/* devvp is a normal disk device */
2441 		dev = devvp->v_rdev;
2442 		cgbno = fsbtodb(fs, cgtod(fs, cg));
2443 	} else {
2444 		bp = NULL;
2445 		return (0);
2446 	}
2447 	if (ino >= fs->fs_ipg * fs->fs_ncg)
2448 		panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s",
2449 		    devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt);
2450 	if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2451 		return (error);
2452 	inosused = cg_inosused(cgp);
2453 	ino %= fs->fs_ipg;
2454 	if (isclr(inosused, ino)) {
2455 		printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev),
2456 		    (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt);
2457 		if (fs->fs_ronly == 0)
2458 			panic("ffs_freefile: freeing free inode");
2459 	}
2460 	clrbit(inosused, ino);
2461 	if (ino < cgp->cg_irotor)
2462 		cgp->cg_irotor = ino;
2463 	cgp->cg_cs.cs_nifree++;
2464 	UFS_LOCK(ump);
2465 	fs->fs_cstotal.cs_nifree++;
2466 	fs->fs_cs(fs, cg).cs_nifree++;
2467 	if ((mode & IFMT) == IFDIR) {
2468 		cgp->cg_cs.cs_ndir--;
2469 		fs->fs_cstotal.cs_ndir--;
2470 		fs->fs_cs(fs, cg).cs_ndir--;
2471 	}
2472 	fs->fs_fmod = 1;
2473 	ACTIVECLEAR(fs, cg);
2474 	UFS_UNLOCK(ump);
2475 	if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type == VCHR)
2476 		softdep_setup_inofree(UFSTOVFS(ump), bp,
2477 		    ino + cg * fs->fs_ipg, wkhd);
2478 	bdwrite(bp);
2479 	return (0);
2480 }
2481 
2482 /*
2483  * Check to see if a file is free.
2484  * Used to check for allocated files in snapshots.
2485  */
2486 int
2487 ffs_checkfreefile(fs, devvp, ino)
2488 	struct fs *fs;
2489 	struct vnode *devvp;
2490 	ino_t ino;
2491 {
2492 	struct cg *cgp;
2493 	struct buf *bp;
2494 	ufs2_daddr_t cgbno;
2495 	int ret, error;
2496 	u_int cg;
2497 	u_int8_t *inosused;
2498 
2499 	cg = ino_to_cg(fs, ino);
2500 	if (devvp->v_type == VREG) {
2501 		/* devvp is a snapshot */
2502 		cgbno = fragstoblks(fs, cgtod(fs, cg));
2503 	} else if (devvp->v_type == VCHR) {
2504 		/* devvp is a normal disk device */
2505 		cgbno = fsbtodb(fs, cgtod(fs, cg));
2506 	} else {
2507 		return (1);
2508 	}
2509 	if (ino >= fs->fs_ipg * fs->fs_ncg)
2510 		return (1);
2511 	if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2512 		return (1);
2513 	inosused = cg_inosused(cgp);
2514 	ino %= fs->fs_ipg;
2515 	ret = isclr(inosused, ino);
2516 	brelse(bp);
2517 	return (ret);
2518 }
2519 
2520 /*
2521  * Find a block of the specified size in the specified cylinder group.
2522  *
2523  * It is a panic if a request is made to find a block if none are
2524  * available.
2525  */
2526 static ufs1_daddr_t
2527 ffs_mapsearch(fs, cgp, bpref, allocsiz)
2528 	struct fs *fs;
2529 	struct cg *cgp;
2530 	ufs2_daddr_t bpref;
2531 	int allocsiz;
2532 {
2533 	ufs1_daddr_t bno;
2534 	int start, len, loc, i;
2535 	int blk, field, subfield, pos;
2536 	u_int8_t *blksfree;
2537 
2538 	/*
2539 	 * find the fragment by searching through the free block
2540 	 * map for an appropriate bit pattern
2541 	 */
2542 	if (bpref)
2543 		start = dtogd(fs, bpref) / NBBY;
2544 	else
2545 		start = cgp->cg_frotor / NBBY;
2546 	blksfree = cg_blksfree(cgp);
2547 	len = howmany(fs->fs_fpg, NBBY) - start;
2548 	loc = scanc((u_int)len, (u_char *)&blksfree[start],
2549 		fragtbl[fs->fs_frag],
2550 		(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2551 	if (loc == 0) {
2552 		len = start + 1;
2553 		start = 0;
2554 		loc = scanc((u_int)len, (u_char *)&blksfree[0],
2555 			fragtbl[fs->fs_frag],
2556 			(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2557 		if (loc == 0) {
2558 			printf("start = %d, len = %d, fs = %s\n",
2559 			    start, len, fs->fs_fsmnt);
2560 			panic("ffs_alloccg: map corrupted");
2561 			/* NOTREACHED */
2562 		}
2563 	}
2564 	bno = (start + len - loc) * NBBY;
2565 	cgp->cg_frotor = bno;
2566 	/*
2567 	 * found the byte in the map
2568 	 * sift through the bits to find the selected frag
2569 	 */
2570 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2571 		blk = blkmap(fs, blksfree, bno);
2572 		blk <<= 1;
2573 		field = around[allocsiz];
2574 		subfield = inside[allocsiz];
2575 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2576 			if ((blk & field) == subfield)
2577 				return (bno + pos);
2578 			field <<= 1;
2579 			subfield <<= 1;
2580 		}
2581 	}
2582 	printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2583 	panic("ffs_alloccg: block not in map");
2584 	return (-1);
2585 }
2586 
2587 static const struct statfs *
2588 ffs_getmntstat(struct vnode *devvp)
2589 {
2590 
2591 	if (devvp->v_type == VCHR)
2592 		return (&devvp->v_rdev->si_mountpt->mnt_stat);
2593 	return (ffs_getmntstat(VFSTOUFS(devvp->v_mount)->um_devvp));
2594 }
2595 
2596 /*
2597  * Fetch and verify a cylinder group.
2598  */
2599 int
2600 ffs_getcg(fs, devvp, cg, bpp, cgpp)
2601 	struct fs *fs;
2602 	struct vnode *devvp;
2603 	u_int cg;
2604 	struct buf **bpp;
2605 	struct cg **cgpp;
2606 {
2607 	struct buf *bp;
2608 	struct cg *cgp;
2609 	const struct statfs *sfs;
2610 	int flags, error;
2611 
2612 	*bpp = NULL;
2613 	*cgpp = NULL;
2614 	flags = 0;
2615 	if ((fs->fs_metackhash & CK_CYLGRP) != 0)
2616 		flags |= GB_CKHASH;
2617 	error = breadn_flags(devvp, devvp->v_type == VREG ?
2618 	    fragstoblks(fs, cgtod(fs, cg)) : fsbtodb(fs, cgtod(fs, cg)),
2619 	    (int)fs->fs_cgsize, NULL, NULL, 0, NOCRED, flags,
2620 	    ffs_ckhash_cg, &bp);
2621 	if (error != 0)
2622 		return (error);
2623 	cgp = (struct cg *)bp->b_data;
2624 	if (((fs->fs_metackhash & CK_CYLGRP) != 0 &&
2625 	    (bp->b_flags & B_CKHASH) != 0 &&
2626 	    cgp->cg_ckhash != bp->b_ckhash) ||
2627 	    !cg_chkmagic(cgp) || cgp->cg_cgx != cg) {
2628 		sfs = ffs_getmntstat(devvp);
2629 		printf("UFS %s%s (%s) cylinder checksum failed: cg %u, cgp: "
2630 		    "0x%x != bp: 0x%jx\n",
2631 		    devvp->v_type == VCHR ? "" : "snapshot of ",
2632 		    sfs->f_mntfromname, sfs->f_mntonname,
2633 		    cg, cgp->cg_ckhash, (uintmax_t)bp->b_ckhash);
2634 		bp->b_flags &= ~B_CKHASH;
2635 		bp->b_flags |= B_INVAL | B_NOCACHE;
2636 		brelse(bp);
2637 		return (EIO);
2638 	}
2639 	bp->b_flags &= ~B_CKHASH;
2640 	bp->b_xflags |= BX_BKGRDWRITE;
2641 	if ((fs->fs_metackhash & CK_CYLGRP) != 0)
2642 		bp->b_xflags |= BX_CYLGRP;
2643 	cgp->cg_old_time = cgp->cg_time = time_second;
2644 	*bpp = bp;
2645 	*cgpp = cgp;
2646 	return (0);
2647 }
2648 
2649 static void
2650 ffs_ckhash_cg(bp)
2651 	struct buf *bp;
2652 {
2653 	uint32_t ckhash;
2654 	struct cg *cgp;
2655 
2656 	cgp = (struct cg *)bp->b_data;
2657 	ckhash = cgp->cg_ckhash;
2658 	cgp->cg_ckhash = 0;
2659 	bp->b_ckhash = calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2660 	cgp->cg_ckhash = ckhash;
2661 }
2662 
2663 /*
2664  * Fserr prints the name of a filesystem with an error diagnostic.
2665  *
2666  * The form of the error message is:
2667  *	fs: error message
2668  */
2669 void
2670 ffs_fserr(fs, inum, cp)
2671 	struct fs *fs;
2672 	ino_t inum;
2673 	char *cp;
2674 {
2675 	struct thread *td = curthread;	/* XXX */
2676 	struct proc *p = td->td_proc;
2677 
2678 	log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n",
2679 	    p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum,
2680 	    fs->fs_fsmnt, cp);
2681 }
2682 
2683 /*
2684  * This function provides the capability for the fsck program to
2685  * update an active filesystem. Fourteen operations are provided:
2686  *
2687  * adjrefcnt(inode, amt) - adjusts the reference count on the
2688  *	specified inode by the specified amount. Under normal
2689  *	operation the count should always go down. Decrementing
2690  *	the count to zero will cause the inode to be freed.
2691  * adjblkcnt(inode, amt) - adjust the number of blocks used by the
2692  *	inode by the specified amount.
2693  * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
2694  *	adjust the superblock summary.
2695  * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
2696  *	are marked as free. Inodes should never have to be marked
2697  *	as in use.
2698  * freefiles(inode, count) - file inodes [inode..inode + count - 1]
2699  *	are marked as free. Inodes should never have to be marked
2700  *	as in use.
2701  * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
2702  *	are marked as free. Blocks should never have to be marked
2703  *	as in use.
2704  * setflags(flags, set/clear) - the fs_flags field has the specified
2705  *	flags set (second parameter +1) or cleared (second parameter -1).
2706  * setcwd(dirinode) - set the current directory to dirinode in the
2707  *	filesystem associated with the snapshot.
2708  * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
2709  *	in the current directory is oldvalue then change it to newvalue.
2710  * unlink(nameptr, oldvalue) - Verify that the inode number associated
2711  *	with nameptr in the current directory is oldvalue then unlink it.
2712  *
2713  * The following functions may only be used on a quiescent filesystem
2714  * by the soft updates journal. They are not safe to be run on an active
2715  * filesystem.
2716  *
2717  * setinode(inode, dip) - the specified disk inode is replaced with the
2718  *	contents pointed to by dip.
2719  * setbufoutput(fd, flags) - output associated with the specified file
2720  *	descriptor (which must reference the character device supporting
2721  *	the filesystem) switches from using physio to running through the
2722  *	buffer cache when flags is set to 1. The descriptor reverts to
2723  *	physio for output when flags is set to zero.
2724  */
2725 
2726 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
2727 
2728 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
2729 	0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
2730 
2731 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
2732 	sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
2733 
2734 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR,
2735 	sysctl_ffs_fsck, "Adjust number of directories");
2736 
2737 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR,
2738 	sysctl_ffs_fsck, "Adjust number of free blocks");
2739 
2740 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR,
2741 	sysctl_ffs_fsck, "Adjust number of free inodes");
2742 
2743 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR,
2744 	sysctl_ffs_fsck, "Adjust number of free frags");
2745 
2746 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR,
2747 	sysctl_ffs_fsck, "Adjust number of free clusters");
2748 
2749 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
2750 	sysctl_ffs_fsck, "Free Range of Directory Inodes");
2751 
2752 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
2753 	sysctl_ffs_fsck, "Free Range of File Inodes");
2754 
2755 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
2756 	sysctl_ffs_fsck, "Free Range of Blocks");
2757 
2758 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
2759 	sysctl_ffs_fsck, "Change Filesystem Flags");
2760 
2761 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR,
2762 	sysctl_ffs_fsck, "Set Current Working Directory");
2763 
2764 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR,
2765 	sysctl_ffs_fsck, "Change Value of .. Entry");
2766 
2767 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR,
2768 	sysctl_ffs_fsck, "Unlink a Duplicate Name");
2769 
2770 static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR,
2771 	sysctl_ffs_fsck, "Update an On-Disk Inode");
2772 
2773 static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR,
2774 	sysctl_ffs_fsck, "Set Buffered Writing for Descriptor");
2775 
2776 #define DEBUG 1
2777 #ifdef DEBUG
2778 static int fsckcmds = 0;
2779 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
2780 #endif /* DEBUG */
2781 
2782 static int buffered_write(struct file *, struct uio *, struct ucred *,
2783 	int, struct thread *);
2784 
2785 static int
2786 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
2787 {
2788 	struct thread *td = curthread;
2789 	struct fsck_cmd cmd;
2790 	struct ufsmount *ump;
2791 	struct vnode *vp, *dvp, *fdvp;
2792 	struct inode *ip, *dp;
2793 	struct mount *mp;
2794 	struct fs *fs;
2795 	ufs2_daddr_t blkno;
2796 	long blkcnt, blksize;
2797 	struct file *fp, *vfp;
2798 	cap_rights_t rights;
2799 	int filetype, error;
2800 	static struct fileops *origops, bufferedops;
2801 
2802 	if (req->newlen > sizeof cmd)
2803 		return (EBADRPC);
2804 	if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
2805 		return (error);
2806 	if (cmd.version != FFS_CMD_VERSION)
2807 		return (ERPCMISMATCH);
2808 	if ((error = getvnode(td, cmd.handle,
2809 	    cap_rights_init(&rights, CAP_FSCK), &fp)) != 0)
2810 		return (error);
2811 	vp = fp->f_data;
2812 	if (vp->v_type != VREG && vp->v_type != VDIR) {
2813 		fdrop(fp, td);
2814 		return (EINVAL);
2815 	}
2816 	vn_start_write(vp, &mp, V_WAIT);
2817 	if (mp == NULL ||
2818 	    strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
2819 		vn_finished_write(mp);
2820 		fdrop(fp, td);
2821 		return (EINVAL);
2822 	}
2823 	ump = VFSTOUFS(mp);
2824 	if ((mp->mnt_flag & MNT_RDONLY) &&
2825 	    ump->um_fsckpid != td->td_proc->p_pid) {
2826 		vn_finished_write(mp);
2827 		fdrop(fp, td);
2828 		return (EROFS);
2829 	}
2830 	fs = ump->um_fs;
2831 	filetype = IFREG;
2832 
2833 	switch (oidp->oid_number) {
2834 
2835 	case FFS_SET_FLAGS:
2836 #ifdef DEBUG
2837 		if (fsckcmds)
2838 			printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
2839 			    cmd.size > 0 ? "set" : "clear");
2840 #endif /* DEBUG */
2841 		if (cmd.size > 0)
2842 			fs->fs_flags |= (long)cmd.value;
2843 		else
2844 			fs->fs_flags &= ~(long)cmd.value;
2845 		break;
2846 
2847 	case FFS_ADJ_REFCNT:
2848 #ifdef DEBUG
2849 		if (fsckcmds) {
2850 			printf("%s: adjust inode %jd link count by %jd\n",
2851 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2852 			    (intmax_t)cmd.size);
2853 		}
2854 #endif /* DEBUG */
2855 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2856 			break;
2857 		ip = VTOI(vp);
2858 		ip->i_nlink += cmd.size;
2859 		DIP_SET(ip, i_nlink, ip->i_nlink);
2860 		ip->i_effnlink += cmd.size;
2861 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2862 		error = ffs_update(vp, 1);
2863 		if (DOINGSOFTDEP(vp))
2864 			softdep_change_linkcnt(ip);
2865 		vput(vp);
2866 		break;
2867 
2868 	case FFS_ADJ_BLKCNT:
2869 #ifdef DEBUG
2870 		if (fsckcmds) {
2871 			printf("%s: adjust inode %jd block count by %jd\n",
2872 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2873 			    (intmax_t)cmd.size);
2874 		}
2875 #endif /* DEBUG */
2876 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2877 			break;
2878 		ip = VTOI(vp);
2879 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
2880 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2881 		error = ffs_update(vp, 1);
2882 		vput(vp);
2883 		break;
2884 
2885 	case FFS_DIR_FREE:
2886 		filetype = IFDIR;
2887 		/* fall through */
2888 
2889 	case FFS_FILE_FREE:
2890 #ifdef DEBUG
2891 		if (fsckcmds) {
2892 			if (cmd.size == 1)
2893 				printf("%s: free %s inode %ju\n",
2894 				    mp->mnt_stat.f_mntonname,
2895 				    filetype == IFDIR ? "directory" : "file",
2896 				    (uintmax_t)cmd.value);
2897 			else
2898 				printf("%s: free %s inodes %ju-%ju\n",
2899 				    mp->mnt_stat.f_mntonname,
2900 				    filetype == IFDIR ? "directory" : "file",
2901 				    (uintmax_t)cmd.value,
2902 				    (uintmax_t)(cmd.value + cmd.size - 1));
2903 		}
2904 #endif /* DEBUG */
2905 		while (cmd.size > 0) {
2906 			if ((error = ffs_freefile(ump, fs, ump->um_devvp,
2907 			    cmd.value, filetype, NULL)))
2908 				break;
2909 			cmd.size -= 1;
2910 			cmd.value += 1;
2911 		}
2912 		break;
2913 
2914 	case FFS_BLK_FREE:
2915 #ifdef DEBUG
2916 		if (fsckcmds) {
2917 			if (cmd.size == 1)
2918 				printf("%s: free block %jd\n",
2919 				    mp->mnt_stat.f_mntonname,
2920 				    (intmax_t)cmd.value);
2921 			else
2922 				printf("%s: free blocks %jd-%jd\n",
2923 				    mp->mnt_stat.f_mntonname,
2924 				    (intmax_t)cmd.value,
2925 				    (intmax_t)cmd.value + cmd.size - 1);
2926 		}
2927 #endif /* DEBUG */
2928 		blkno = cmd.value;
2929 		blkcnt = cmd.size;
2930 		blksize = fs->fs_frag - (blkno % fs->fs_frag);
2931 		while (blkcnt > 0) {
2932 			if (blksize > blkcnt)
2933 				blksize = blkcnt;
2934 			ffs_blkfree(ump, fs, ump->um_devvp, blkno,
2935 			    blksize * fs->fs_fsize, UFS_ROOTINO, VDIR, NULL);
2936 			blkno += blksize;
2937 			blkcnt -= blksize;
2938 			blksize = fs->fs_frag;
2939 		}
2940 		break;
2941 
2942 	/*
2943 	 * Adjust superblock summaries.  fsck(8) is expected to
2944 	 * submit deltas when necessary.
2945 	 */
2946 	case FFS_ADJ_NDIR:
2947 #ifdef DEBUG
2948 		if (fsckcmds) {
2949 			printf("%s: adjust number of directories by %jd\n",
2950 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2951 		}
2952 #endif /* DEBUG */
2953 		fs->fs_cstotal.cs_ndir += cmd.value;
2954 		break;
2955 
2956 	case FFS_ADJ_NBFREE:
2957 #ifdef DEBUG
2958 		if (fsckcmds) {
2959 			printf("%s: adjust number of free blocks by %+jd\n",
2960 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2961 		}
2962 #endif /* DEBUG */
2963 		fs->fs_cstotal.cs_nbfree += cmd.value;
2964 		break;
2965 
2966 	case FFS_ADJ_NIFREE:
2967 #ifdef DEBUG
2968 		if (fsckcmds) {
2969 			printf("%s: adjust number of free inodes by %+jd\n",
2970 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2971 		}
2972 #endif /* DEBUG */
2973 		fs->fs_cstotal.cs_nifree += cmd.value;
2974 		break;
2975 
2976 	case FFS_ADJ_NFFREE:
2977 #ifdef DEBUG
2978 		if (fsckcmds) {
2979 			printf("%s: adjust number of free frags by %+jd\n",
2980 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2981 		}
2982 #endif /* DEBUG */
2983 		fs->fs_cstotal.cs_nffree += cmd.value;
2984 		break;
2985 
2986 	case FFS_ADJ_NUMCLUSTERS:
2987 #ifdef DEBUG
2988 		if (fsckcmds) {
2989 			printf("%s: adjust number of free clusters by %+jd\n",
2990 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2991 		}
2992 #endif /* DEBUG */
2993 		fs->fs_cstotal.cs_numclusters += cmd.value;
2994 		break;
2995 
2996 	case FFS_SET_CWD:
2997 #ifdef DEBUG
2998 		if (fsckcmds) {
2999 			printf("%s: set current directory to inode %jd\n",
3000 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3001 		}
3002 #endif /* DEBUG */
3003 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
3004 			break;
3005 		AUDIT_ARG_VNODE1(vp);
3006 		if ((error = change_dir(vp, td)) != 0) {
3007 			vput(vp);
3008 			break;
3009 		}
3010 		VOP_UNLOCK(vp, 0);
3011 		pwd_chdir(td, vp);
3012 		break;
3013 
3014 	case FFS_SET_DOTDOT:
3015 #ifdef DEBUG
3016 		if (fsckcmds) {
3017 			printf("%s: change .. in cwd from %jd to %jd\n",
3018 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3019 			    (intmax_t)cmd.size);
3020 		}
3021 #endif /* DEBUG */
3022 		/*
3023 		 * First we have to get and lock the parent directory
3024 		 * to which ".." points.
3025 		 */
3026 		error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
3027 		if (error)
3028 			break;
3029 		/*
3030 		 * Now we get and lock the child directory containing "..".
3031 		 */
3032 		FILEDESC_SLOCK(td->td_proc->p_fd);
3033 		dvp = td->td_proc->p_fd->fd_cdir;
3034 		FILEDESC_SUNLOCK(td->td_proc->p_fd);
3035 		if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
3036 			vput(fdvp);
3037 			break;
3038 		}
3039 		dp = VTOI(dvp);
3040 		dp->i_offset = 12;	/* XXX mastertemplate.dot_reclen */
3041 		error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
3042 		    DT_DIR, 0);
3043 		cache_purge(fdvp);
3044 		cache_purge(dvp);
3045 		vput(dvp);
3046 		vput(fdvp);
3047 		break;
3048 
3049 	case FFS_UNLINK:
3050 #ifdef DEBUG
3051 		if (fsckcmds) {
3052 			char buf[32];
3053 
3054 			if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
3055 				strncpy(buf, "Name_too_long", 32);
3056 			printf("%s: unlink %s (inode %jd)\n",
3057 			    mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
3058 		}
3059 #endif /* DEBUG */
3060 		/*
3061 		 * kern_unlinkat will do its own start/finish writes and
3062 		 * they do not nest, so drop ours here. Setting mp == NULL
3063 		 * indicates that vn_finished_write is not needed down below.
3064 		 */
3065 		vn_finished_write(mp);
3066 		mp = NULL;
3067 		error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value,
3068 		    UIO_USERSPACE, (ino_t)cmd.size);
3069 		break;
3070 
3071 	case FFS_SET_INODE:
3072 		if (ump->um_fsckpid != td->td_proc->p_pid) {
3073 			error = EPERM;
3074 			break;
3075 		}
3076 #ifdef DEBUG
3077 		if (fsckcmds) {
3078 			printf("%s: update inode %jd\n",
3079 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3080 		}
3081 #endif /* DEBUG */
3082 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3083 			break;
3084 		AUDIT_ARG_VNODE1(vp);
3085 		ip = VTOI(vp);
3086 		if (I_IS_UFS1(ip))
3087 			error = copyin((void *)(intptr_t)cmd.size, ip->i_din1,
3088 			    sizeof(struct ufs1_dinode));
3089 		else
3090 			error = copyin((void *)(intptr_t)cmd.size, ip->i_din2,
3091 			    sizeof(struct ufs2_dinode));
3092 		if (error) {
3093 			vput(vp);
3094 			break;
3095 		}
3096 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
3097 		error = ffs_update(vp, 1);
3098 		vput(vp);
3099 		break;
3100 
3101 	case FFS_SET_BUFOUTPUT:
3102 		if (ump->um_fsckpid != td->td_proc->p_pid) {
3103 			error = EPERM;
3104 			break;
3105 		}
3106 		if (ITOUMP(VTOI(vp)) != ump) {
3107 			error = EINVAL;
3108 			break;
3109 		}
3110 #ifdef DEBUG
3111 		if (fsckcmds) {
3112 			printf("%s: %s buffered output for descriptor %jd\n",
3113 			    mp->mnt_stat.f_mntonname,
3114 			    cmd.size == 1 ? "enable" : "disable",
3115 			    (intmax_t)cmd.value);
3116 		}
3117 #endif /* DEBUG */
3118 		if ((error = getvnode(td, cmd.value,
3119 		    cap_rights_init(&rights, CAP_FSCK), &vfp)) != 0)
3120 			break;
3121 		if (vfp->f_vnode->v_type != VCHR) {
3122 			fdrop(vfp, td);
3123 			error = EINVAL;
3124 			break;
3125 		}
3126 		if (origops == NULL) {
3127 			origops = vfp->f_ops;
3128 			bcopy((void *)origops, (void *)&bufferedops,
3129 			    sizeof(bufferedops));
3130 			bufferedops.fo_write = buffered_write;
3131 		}
3132 		if (cmd.size == 1)
3133 			atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3134 			    (uintptr_t)&bufferedops);
3135 		else
3136 			atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3137 			    (uintptr_t)origops);
3138 		fdrop(vfp, td);
3139 		break;
3140 
3141 	default:
3142 #ifdef DEBUG
3143 		if (fsckcmds) {
3144 			printf("Invalid request %d from fsck\n",
3145 			    oidp->oid_number);
3146 		}
3147 #endif /* DEBUG */
3148 		error = EINVAL;
3149 		break;
3150 
3151 	}
3152 	fdrop(fp, td);
3153 	vn_finished_write(mp);
3154 	return (error);
3155 }
3156 
3157 /*
3158  * Function to switch a descriptor to use the buffer cache to stage
3159  * its I/O. This is needed so that writes to the filesystem device
3160  * will give snapshots a chance to copy modified blocks for which it
3161  * needs to retain copies.
3162  */
3163 static int
3164 buffered_write(fp, uio, active_cred, flags, td)
3165 	struct file *fp;
3166 	struct uio *uio;
3167 	struct ucred *active_cred;
3168 	int flags;
3169 	struct thread *td;
3170 {
3171 	struct vnode *devvp, *vp;
3172 	struct inode *ip;
3173 	struct buf *bp;
3174 	struct fs *fs;
3175 	struct filedesc *fdp;
3176 	int error;
3177 	daddr_t lbn;
3178 
3179 	/*
3180 	 * The devvp is associated with the /dev filesystem. To discover
3181 	 * the filesystem with which the device is associated, we depend
3182 	 * on the application setting the current directory to a location
3183 	 * within the filesystem being written. Yes, this is an ugly hack.
3184 	 */
3185 	devvp = fp->f_vnode;
3186 	if (!vn_isdisk(devvp, NULL))
3187 		return (EINVAL);
3188 	fdp = td->td_proc->p_fd;
3189 	FILEDESC_SLOCK(fdp);
3190 	vp = fdp->fd_cdir;
3191 	vref(vp);
3192 	FILEDESC_SUNLOCK(fdp);
3193 	vn_lock(vp, LK_SHARED | LK_RETRY);
3194 	/*
3195 	 * Check that the current directory vnode indeed belongs to
3196 	 * UFS before trying to dereference UFS-specific v_data fields.
3197 	 */
3198 	if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) {
3199 		vput(vp);
3200 		return (EINVAL);
3201 	}
3202 	ip = VTOI(vp);
3203 	if (ITODEVVP(ip) != devvp) {
3204 		vput(vp);
3205 		return (EINVAL);
3206 	}
3207 	fs = ITOFS(ip);
3208 	vput(vp);
3209 	foffset_lock_uio(fp, uio, flags);
3210 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
3211 #ifdef DEBUG
3212 	if (fsckcmds) {
3213 		printf("%s: buffered write for block %jd\n",
3214 		    fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset));
3215 	}
3216 #endif /* DEBUG */
3217 	/*
3218 	 * All I/O must be contained within a filesystem block, start on
3219 	 * a fragment boundary, and be a multiple of fragments in length.
3220 	 */
3221 	if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) ||
3222 	    fragoff(fs, uio->uio_offset) != 0 ||
3223 	    fragoff(fs, uio->uio_resid) != 0) {
3224 		error = EINVAL;
3225 		goto out;
3226 	}
3227 	lbn = numfrags(fs, uio->uio_offset);
3228 	bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0);
3229 	bp->b_flags |= B_RELBUF;
3230 	if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) {
3231 		brelse(bp);
3232 		goto out;
3233 	}
3234 	error = bwrite(bp);
3235 out:
3236 	VOP_UNLOCK(devvp, 0);
3237 	foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF);
3238 	return (error);
3239 }
3240