xref: /freebsd/sys/ufs/ffs/ffs_alloc.c (revision 1f4bcc459a76b7aa664f3fd557684cd0ba6da352)
1 /*-
2  * Copyright (c) 2002 Networks Associates Technology, Inc.
3  * All rights reserved.
4  *
5  * This software was developed for the FreeBSD Project by Marshall
6  * Kirk McKusick and Network Associates Laboratories, the Security
7  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9  * research program
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * Copyright (c) 1982, 1986, 1989, 1993
33  *	The Regents of the University of California.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 4. Neither the name of the University nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  *	@(#)ffs_alloc.c	8.18 (Berkeley) 5/26/95
60  */
61 
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
64 
65 #include "opt_quota.h"
66 
67 #include <sys/param.h>
68 #include <sys/capsicum.h>
69 #include <sys/systm.h>
70 #include <sys/bio.h>
71 #include <sys/buf.h>
72 #include <sys/conf.h>
73 #include <sys/fcntl.h>
74 #include <sys/file.h>
75 #include <sys/filedesc.h>
76 #include <sys/priv.h>
77 #include <sys/proc.h>
78 #include <sys/vnode.h>
79 #include <sys/mount.h>
80 #include <sys/kernel.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/syslog.h>
84 #include <sys/taskqueue.h>
85 
86 #include <security/audit/audit.h>
87 
88 #include <geom/geom.h>
89 
90 #include <ufs/ufs/dir.h>
91 #include <ufs/ufs/extattr.h>
92 #include <ufs/ufs/quota.h>
93 #include <ufs/ufs/inode.h>
94 #include <ufs/ufs/ufs_extern.h>
95 #include <ufs/ufs/ufsmount.h>
96 
97 #include <ufs/ffs/fs.h>
98 #include <ufs/ffs/ffs_extern.h>
99 #include <ufs/ffs/softdep.h>
100 
101 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
102 				  int size, int rsize);
103 
104 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int);
105 static ufs2_daddr_t
106 	      ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
107 static void	ffs_blkfree_cg(struct ufsmount *, struct fs *,
108 		    struct vnode *, ufs2_daddr_t, long, ino_t,
109 		    struct workhead *);
110 static void	ffs_blkfree_trim_completed(struct bio *);
111 static void	ffs_blkfree_trim_task(void *ctx, int pending __unused);
112 #ifdef INVARIANTS
113 static int	ffs_checkblk(struct inode *, ufs2_daddr_t, long);
114 #endif
115 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int);
116 static ino_t	ffs_dirpref(struct inode *);
117 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
118 		    int, int);
119 static ufs2_daddr_t	ffs_hashalloc
120 		(struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *);
121 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int,
122 		    int);
123 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
124 static int	ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
125 static int	ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
126 
127 /*
128  * Allocate a block in the filesystem.
129  *
130  * The size of the requested block is given, which must be some
131  * multiple of fs_fsize and <= fs_bsize.
132  * A preference may be optionally specified. If a preference is given
133  * the following hierarchy is used to allocate a block:
134  *   1) allocate the requested block.
135  *   2) allocate a rotationally optimal block in the same cylinder.
136  *   3) allocate a block in the same cylinder group.
137  *   4) quadradically rehash into other cylinder groups, until an
138  *      available block is located.
139  * If no block preference is given the following hierarchy is used
140  * to allocate a block:
141  *   1) allocate a block in the cylinder group that contains the
142  *      inode for the file.
143  *   2) quadradically rehash into other cylinder groups, until an
144  *      available block is located.
145  */
146 int
147 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
148 	struct inode *ip;
149 	ufs2_daddr_t lbn, bpref;
150 	int size, flags;
151 	struct ucred *cred;
152 	ufs2_daddr_t *bnp;
153 {
154 	struct fs *fs;
155 	struct ufsmount *ump;
156 	ufs2_daddr_t bno;
157 	u_int cg, reclaimed;
158 	static struct timeval lastfail;
159 	static int curfail;
160 	int64_t delta;
161 #ifdef QUOTA
162 	int error;
163 #endif
164 
165 	*bnp = 0;
166 	fs = ip->i_fs;
167 	ump = ip->i_ump;
168 	mtx_assert(UFS_MTX(ump), MA_OWNED);
169 #ifdef INVARIANTS
170 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
171 		printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
172 		    devtoname(ip->i_dev), (long)fs->fs_bsize, size,
173 		    fs->fs_fsmnt);
174 		panic("ffs_alloc: bad size");
175 	}
176 	if (cred == NOCRED)
177 		panic("ffs_alloc: missing credential");
178 #endif /* INVARIANTS */
179 	reclaimed = 0;
180 retry:
181 #ifdef QUOTA
182 	UFS_UNLOCK(ump);
183 	error = chkdq(ip, btodb(size), cred, 0);
184 	if (error)
185 		return (error);
186 	UFS_LOCK(ump);
187 #endif
188 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
189 		goto nospace;
190 	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
191 	    freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
192 		goto nospace;
193 	if (bpref >= fs->fs_size)
194 		bpref = 0;
195 	if (bpref == 0)
196 		cg = ino_to_cg(fs, ip->i_number);
197 	else
198 		cg = dtog(fs, bpref);
199 	bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
200 	if (bno > 0) {
201 		delta = btodb(size);
202 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
203 		if (flags & IO_EXT)
204 			ip->i_flag |= IN_CHANGE;
205 		else
206 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
207 		*bnp = bno;
208 		return (0);
209 	}
210 nospace:
211 #ifdef QUOTA
212 	UFS_UNLOCK(ump);
213 	/*
214 	 * Restore user's disk quota because allocation failed.
215 	 */
216 	(void) chkdq(ip, -btodb(size), cred, FORCE);
217 	UFS_LOCK(ump);
218 #endif
219 	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
220 		reclaimed = 1;
221 		softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
222 		goto retry;
223 	}
224 	UFS_UNLOCK(ump);
225 	if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
226 		ffs_fserr(fs, ip->i_number, "filesystem full");
227 		uprintf("\n%s: write failed, filesystem is full\n",
228 		    fs->fs_fsmnt);
229 	}
230 	return (ENOSPC);
231 }
232 
233 /*
234  * Reallocate a fragment to a bigger size
235  *
236  * The number and size of the old block is given, and a preference
237  * and new size is also specified. The allocator attempts to extend
238  * the original block. Failing that, the regular block allocator is
239  * invoked to get an appropriate block.
240  */
241 int
242 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
243 	struct inode *ip;
244 	ufs2_daddr_t lbprev;
245 	ufs2_daddr_t bprev;
246 	ufs2_daddr_t bpref;
247 	int osize, nsize, flags;
248 	struct ucred *cred;
249 	struct buf **bpp;
250 {
251 	struct vnode *vp;
252 	struct fs *fs;
253 	struct buf *bp;
254 	struct ufsmount *ump;
255 	u_int cg, request, reclaimed;
256 	int error, gbflags;
257 	ufs2_daddr_t bno;
258 	static struct timeval lastfail;
259 	static int curfail;
260 	int64_t delta;
261 
262 	*bpp = 0;
263 	vp = ITOV(ip);
264 	fs = ip->i_fs;
265 	bp = NULL;
266 	ump = ip->i_ump;
267 	gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
268 
269 	mtx_assert(UFS_MTX(ump), MA_OWNED);
270 #ifdef INVARIANTS
271 	if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
272 		panic("ffs_realloccg: allocation on suspended filesystem");
273 	if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
274 	    (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
275 		printf(
276 		"dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
277 		    devtoname(ip->i_dev), (long)fs->fs_bsize, osize,
278 		    nsize, fs->fs_fsmnt);
279 		panic("ffs_realloccg: bad size");
280 	}
281 	if (cred == NOCRED)
282 		panic("ffs_realloccg: missing credential");
283 #endif /* INVARIANTS */
284 	reclaimed = 0;
285 retry:
286 	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
287 	    freespace(fs, fs->fs_minfree) -  numfrags(fs, nsize - osize) < 0) {
288 		goto nospace;
289 	}
290 	if (bprev == 0) {
291 		printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
292 		    devtoname(ip->i_dev), (long)fs->fs_bsize, (intmax_t)bprev,
293 		    fs->fs_fsmnt);
294 		panic("ffs_realloccg: bad bprev");
295 	}
296 	UFS_UNLOCK(ump);
297 	/*
298 	 * Allocate the extra space in the buffer.
299 	 */
300 	error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
301 	if (error) {
302 		brelse(bp);
303 		return (error);
304 	}
305 
306 	if (bp->b_blkno == bp->b_lblkno) {
307 		if (lbprev >= NDADDR)
308 			panic("ffs_realloccg: lbprev out of range");
309 		bp->b_blkno = fsbtodb(fs, bprev);
310 	}
311 
312 #ifdef QUOTA
313 	error = chkdq(ip, btodb(nsize - osize), cred, 0);
314 	if (error) {
315 		brelse(bp);
316 		return (error);
317 	}
318 #endif
319 	/*
320 	 * Check for extension in the existing location.
321 	 */
322 	cg = dtog(fs, bprev);
323 	UFS_LOCK(ump);
324 	bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
325 	if (bno) {
326 		if (bp->b_blkno != fsbtodb(fs, bno))
327 			panic("ffs_realloccg: bad blockno");
328 		delta = btodb(nsize - osize);
329 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
330 		if (flags & IO_EXT)
331 			ip->i_flag |= IN_CHANGE;
332 		else
333 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
334 		allocbuf(bp, nsize);
335 		bp->b_flags |= B_DONE;
336 		vfs_bio_bzero_buf(bp, osize, nsize - osize);
337 		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
338 			vfs_bio_set_valid(bp, osize, nsize - osize);
339 		*bpp = bp;
340 		return (0);
341 	}
342 	/*
343 	 * Allocate a new disk location.
344 	 */
345 	if (bpref >= fs->fs_size)
346 		bpref = 0;
347 	switch ((int)fs->fs_optim) {
348 	case FS_OPTSPACE:
349 		/*
350 		 * Allocate an exact sized fragment. Although this makes
351 		 * best use of space, we will waste time relocating it if
352 		 * the file continues to grow. If the fragmentation is
353 		 * less than half of the minimum free reserve, we choose
354 		 * to begin optimizing for time.
355 		 */
356 		request = nsize;
357 		if (fs->fs_minfree <= 5 ||
358 		    fs->fs_cstotal.cs_nffree >
359 		    (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
360 			break;
361 		log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
362 			fs->fs_fsmnt);
363 		fs->fs_optim = FS_OPTTIME;
364 		break;
365 	case FS_OPTTIME:
366 		/*
367 		 * At this point we have discovered a file that is trying to
368 		 * grow a small fragment to a larger fragment. To save time,
369 		 * we allocate a full sized block, then free the unused portion.
370 		 * If the file continues to grow, the `ffs_fragextend' call
371 		 * above will be able to grow it in place without further
372 		 * copying. If aberrant programs cause disk fragmentation to
373 		 * grow within 2% of the free reserve, we choose to begin
374 		 * optimizing for space.
375 		 */
376 		request = fs->fs_bsize;
377 		if (fs->fs_cstotal.cs_nffree <
378 		    (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
379 			break;
380 		log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
381 			fs->fs_fsmnt);
382 		fs->fs_optim = FS_OPTSPACE;
383 		break;
384 	default:
385 		printf("dev = %s, optim = %ld, fs = %s\n",
386 		    devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt);
387 		panic("ffs_realloccg: bad optim");
388 		/* NOTREACHED */
389 	}
390 	bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
391 	if (bno > 0) {
392 		bp->b_blkno = fsbtodb(fs, bno);
393 		if (!DOINGSOFTDEP(vp))
394 			ffs_blkfree(ump, fs, ip->i_devvp, bprev, (long)osize,
395 			    ip->i_number, vp->v_type, NULL);
396 		delta = btodb(nsize - osize);
397 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
398 		if (flags & IO_EXT)
399 			ip->i_flag |= IN_CHANGE;
400 		else
401 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
402 		allocbuf(bp, nsize);
403 		bp->b_flags |= B_DONE;
404 		vfs_bio_bzero_buf(bp, osize, nsize - osize);
405 		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
406 			vfs_bio_set_valid(bp, osize, nsize - osize);
407 		*bpp = bp;
408 		return (0);
409 	}
410 #ifdef QUOTA
411 	UFS_UNLOCK(ump);
412 	/*
413 	 * Restore user's disk quota because allocation failed.
414 	 */
415 	(void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
416 	UFS_LOCK(ump);
417 #endif
418 nospace:
419 	/*
420 	 * no space available
421 	 */
422 	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
423 		reclaimed = 1;
424 		UFS_UNLOCK(ump);
425 		if (bp) {
426 			brelse(bp);
427 			bp = NULL;
428 		}
429 		UFS_LOCK(ump);
430 		softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
431 		goto retry;
432 	}
433 	UFS_UNLOCK(ump);
434 	if (bp)
435 		brelse(bp);
436 	if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
437 		ffs_fserr(fs, ip->i_number, "filesystem full");
438 		uprintf("\n%s: write failed, filesystem is full\n",
439 		    fs->fs_fsmnt);
440 	}
441 	return (ENOSPC);
442 }
443 
444 /*
445  * Reallocate a sequence of blocks into a contiguous sequence of blocks.
446  *
447  * The vnode and an array of buffer pointers for a range of sequential
448  * logical blocks to be made contiguous is given. The allocator attempts
449  * to find a range of sequential blocks starting as close as possible
450  * from the end of the allocation for the logical block immediately
451  * preceding the current range. If successful, the physical block numbers
452  * in the buffer pointers and in the inode are changed to reflect the new
453  * allocation. If unsuccessful, the allocation is left unchanged. The
454  * success in doing the reallocation is returned. Note that the error
455  * return is not reflected back to the user. Rather the previous block
456  * allocation will be used.
457  */
458 
459 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
460 
461 static int doasyncfree = 1;
462 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
463 "do not force synchronous writes when blocks are reallocated");
464 
465 static int doreallocblks = 1;
466 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0,
467 "enable block reallocation");
468 
469 static int maxclustersearch = 10;
470 SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch,
471 0, "max number of cylinder group to search for contigous blocks");
472 
473 #ifdef DEBUG
474 static volatile int prtrealloc = 0;
475 #endif
476 
477 int
478 ffs_reallocblks(ap)
479 	struct vop_reallocblks_args /* {
480 		struct vnode *a_vp;
481 		struct cluster_save *a_buflist;
482 	} */ *ap;
483 {
484 	struct ufsmount *ump;
485 
486 	/*
487 	 * If the underlying device can do deletes, then skip reallocating
488 	 * the blocks of this file into contiguous sequences. Devices that
489 	 * benefit from BIO_DELETE also benefit from not moving the data.
490 	 * These devices are flash and therefore work less well with this
491 	 * optimization. Also skip if reallocblks has been disabled globally.
492 	 */
493 	ump = VTOI(ap->a_vp)->i_ump;
494 	if (ump->um_candelete || doreallocblks == 0)
495 		return (ENOSPC);
496 
497 	/*
498 	 * We can't wait in softdep prealloc as it may fsync and recurse
499 	 * here.  Instead we simply fail to reallocate blocks if this
500 	 * rare condition arises.
501 	 */
502 	if (DOINGSOFTDEP(ap->a_vp))
503 		if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
504 			return (ENOSPC);
505 	if (ump->um_fstype == UFS1)
506 		return (ffs_reallocblks_ufs1(ap));
507 	return (ffs_reallocblks_ufs2(ap));
508 }
509 
510 static int
511 ffs_reallocblks_ufs1(ap)
512 	struct vop_reallocblks_args /* {
513 		struct vnode *a_vp;
514 		struct cluster_save *a_buflist;
515 	} */ *ap;
516 {
517 	struct fs *fs;
518 	struct inode *ip;
519 	struct vnode *vp;
520 	struct buf *sbp, *ebp;
521 	ufs1_daddr_t *bap, *sbap, *ebap = 0;
522 	struct cluster_save *buflist;
523 	struct ufsmount *ump;
524 	ufs_lbn_t start_lbn, end_lbn;
525 	ufs1_daddr_t soff, newblk, blkno;
526 	ufs2_daddr_t pref;
527 	struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
528 	int i, cg, len, start_lvl, end_lvl, ssize;
529 
530 	vp = ap->a_vp;
531 	ip = VTOI(vp);
532 	fs = ip->i_fs;
533 	ump = ip->i_ump;
534 	/*
535 	 * If we are not tracking block clusters or if we have less than 4%
536 	 * free blocks left, then do not attempt to cluster. Running with
537 	 * less than 5% free block reserve is not recommended and those that
538 	 * choose to do so do not expect to have good file layout.
539 	 */
540 	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
541 		return (ENOSPC);
542 	buflist = ap->a_buflist;
543 	len = buflist->bs_nchildren;
544 	start_lbn = buflist->bs_children[0]->b_lblkno;
545 	end_lbn = start_lbn + len - 1;
546 #ifdef INVARIANTS
547 	for (i = 0; i < len; i++)
548 		if (!ffs_checkblk(ip,
549 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
550 			panic("ffs_reallocblks: unallocated block 1");
551 	for (i = 1; i < len; i++)
552 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
553 			panic("ffs_reallocblks: non-logical cluster");
554 	blkno = buflist->bs_children[0]->b_blkno;
555 	ssize = fsbtodb(fs, fs->fs_frag);
556 	for (i = 1; i < len - 1; i++)
557 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
558 			panic("ffs_reallocblks: non-physical cluster %d", i);
559 #endif
560 	/*
561 	 * If the cluster crosses the boundary for the first indirect
562 	 * block, leave space for the indirect block. Indirect blocks
563 	 * are initially laid out in a position after the last direct
564 	 * block. Block reallocation would usually destroy locality by
565 	 * moving the indirect block out of the way to make room for
566 	 * data blocks if we didn't compensate here. We should also do
567 	 * this for other indirect block boundaries, but it is only
568 	 * important for the first one.
569 	 */
570 	if (start_lbn < NDADDR && end_lbn >= NDADDR)
571 		return (ENOSPC);
572 	/*
573 	 * If the latest allocation is in a new cylinder group, assume that
574 	 * the filesystem has decided to move and do not force it back to
575 	 * the previous cylinder group.
576 	 */
577 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
578 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
579 		return (ENOSPC);
580 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
581 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
582 		return (ENOSPC);
583 	/*
584 	 * Get the starting offset and block map for the first block.
585 	 */
586 	if (start_lvl == 0) {
587 		sbap = &ip->i_din1->di_db[0];
588 		soff = start_lbn;
589 	} else {
590 		idp = &start_ap[start_lvl - 1];
591 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
592 			brelse(sbp);
593 			return (ENOSPC);
594 		}
595 		sbap = (ufs1_daddr_t *)sbp->b_data;
596 		soff = idp->in_off;
597 	}
598 	/*
599 	 * If the block range spans two block maps, get the second map.
600 	 */
601 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
602 		ssize = len;
603 	} else {
604 #ifdef INVARIANTS
605 		if (start_lvl > 0 &&
606 		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
607 			panic("ffs_reallocblk: start == end");
608 #endif
609 		ssize = len - (idp->in_off + 1);
610 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
611 			goto fail;
612 		ebap = (ufs1_daddr_t *)ebp->b_data;
613 	}
614 	/*
615 	 * Find the preferred location for the cluster. If we have not
616 	 * previously failed at this endeavor, then follow our standard
617 	 * preference calculation. If we have failed at it, then pick up
618 	 * where we last ended our search.
619 	 */
620 	UFS_LOCK(ump);
621 	if (ip->i_nextclustercg == -1)
622 		pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
623 	else
624 		pref = cgdata(fs, ip->i_nextclustercg);
625 	/*
626 	 * Search the block map looking for an allocation of the desired size.
627 	 * To avoid wasting too much time, we limit the number of cylinder
628 	 * groups that we will search.
629 	 */
630 	cg = dtog(fs, pref);
631 	for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
632 		if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
633 			break;
634 		cg += 1;
635 		if (cg >= fs->fs_ncg)
636 			cg = 0;
637 	}
638 	/*
639 	 * If we have failed in our search, record where we gave up for
640 	 * next time. Otherwise, fall back to our usual search citerion.
641 	 */
642 	if (newblk == 0) {
643 		ip->i_nextclustercg = cg;
644 		UFS_UNLOCK(ump);
645 		goto fail;
646 	}
647 	ip->i_nextclustercg = -1;
648 	/*
649 	 * We have found a new contiguous block.
650 	 *
651 	 * First we have to replace the old block pointers with the new
652 	 * block pointers in the inode and indirect blocks associated
653 	 * with the file.
654 	 */
655 #ifdef DEBUG
656 	if (prtrealloc)
657 		printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
658 		    (uintmax_t)ip->i_number,
659 		    (intmax_t)start_lbn, (intmax_t)end_lbn);
660 #endif
661 	blkno = newblk;
662 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
663 		if (i == ssize) {
664 			bap = ebap;
665 			soff = -i;
666 		}
667 #ifdef INVARIANTS
668 		if (!ffs_checkblk(ip,
669 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
670 			panic("ffs_reallocblks: unallocated block 2");
671 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
672 			panic("ffs_reallocblks: alloc mismatch");
673 #endif
674 #ifdef DEBUG
675 		if (prtrealloc)
676 			printf(" %d,", *bap);
677 #endif
678 		if (DOINGSOFTDEP(vp)) {
679 			if (sbap == &ip->i_din1->di_db[0] && i < ssize)
680 				softdep_setup_allocdirect(ip, start_lbn + i,
681 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
682 				    buflist->bs_children[i]);
683 			else
684 				softdep_setup_allocindir_page(ip, start_lbn + i,
685 				    i < ssize ? sbp : ebp, soff + i, blkno,
686 				    *bap, buflist->bs_children[i]);
687 		}
688 		*bap++ = blkno;
689 	}
690 	/*
691 	 * Next we must write out the modified inode and indirect blocks.
692 	 * For strict correctness, the writes should be synchronous since
693 	 * the old block values may have been written to disk. In practise
694 	 * they are almost never written, but if we are concerned about
695 	 * strict correctness, the `doasyncfree' flag should be set to zero.
696 	 *
697 	 * The test on `doasyncfree' should be changed to test a flag
698 	 * that shows whether the associated buffers and inodes have
699 	 * been written. The flag should be set when the cluster is
700 	 * started and cleared whenever the buffer or inode is flushed.
701 	 * We can then check below to see if it is set, and do the
702 	 * synchronous write only when it has been cleared.
703 	 */
704 	if (sbap != &ip->i_din1->di_db[0]) {
705 		if (doasyncfree)
706 			bdwrite(sbp);
707 		else
708 			bwrite(sbp);
709 	} else {
710 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
711 		if (!doasyncfree)
712 			ffs_update(vp, 1);
713 	}
714 	if (ssize < len) {
715 		if (doasyncfree)
716 			bdwrite(ebp);
717 		else
718 			bwrite(ebp);
719 	}
720 	/*
721 	 * Last, free the old blocks and assign the new blocks to the buffers.
722 	 */
723 #ifdef DEBUG
724 	if (prtrealloc)
725 		printf("\n\tnew:");
726 #endif
727 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
728 		if (!DOINGSOFTDEP(vp))
729 			ffs_blkfree(ump, fs, ip->i_devvp,
730 			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
731 			    fs->fs_bsize, ip->i_number, vp->v_type, NULL);
732 		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
733 #ifdef INVARIANTS
734 		if (!ffs_checkblk(ip,
735 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
736 			panic("ffs_reallocblks: unallocated block 3");
737 #endif
738 #ifdef DEBUG
739 		if (prtrealloc)
740 			printf(" %d,", blkno);
741 #endif
742 	}
743 #ifdef DEBUG
744 	if (prtrealloc) {
745 		prtrealloc--;
746 		printf("\n");
747 	}
748 #endif
749 	return (0);
750 
751 fail:
752 	if (ssize < len)
753 		brelse(ebp);
754 	if (sbap != &ip->i_din1->di_db[0])
755 		brelse(sbp);
756 	return (ENOSPC);
757 }
758 
759 static int
760 ffs_reallocblks_ufs2(ap)
761 	struct vop_reallocblks_args /* {
762 		struct vnode *a_vp;
763 		struct cluster_save *a_buflist;
764 	} */ *ap;
765 {
766 	struct fs *fs;
767 	struct inode *ip;
768 	struct vnode *vp;
769 	struct buf *sbp, *ebp;
770 	ufs2_daddr_t *bap, *sbap, *ebap = 0;
771 	struct cluster_save *buflist;
772 	struct ufsmount *ump;
773 	ufs_lbn_t start_lbn, end_lbn;
774 	ufs2_daddr_t soff, newblk, blkno, pref;
775 	struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
776 	int i, cg, len, start_lvl, end_lvl, ssize;
777 
778 	vp = ap->a_vp;
779 	ip = VTOI(vp);
780 	fs = ip->i_fs;
781 	ump = ip->i_ump;
782 	/*
783 	 * If we are not tracking block clusters or if we have less than 4%
784 	 * free blocks left, then do not attempt to cluster. Running with
785 	 * less than 5% free block reserve is not recommended and those that
786 	 * choose to do so do not expect to have good file layout.
787 	 */
788 	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
789 		return (ENOSPC);
790 	buflist = ap->a_buflist;
791 	len = buflist->bs_nchildren;
792 	start_lbn = buflist->bs_children[0]->b_lblkno;
793 	end_lbn = start_lbn + len - 1;
794 #ifdef INVARIANTS
795 	for (i = 0; i < len; i++)
796 		if (!ffs_checkblk(ip,
797 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
798 			panic("ffs_reallocblks: unallocated block 1");
799 	for (i = 1; i < len; i++)
800 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
801 			panic("ffs_reallocblks: non-logical cluster");
802 	blkno = buflist->bs_children[0]->b_blkno;
803 	ssize = fsbtodb(fs, fs->fs_frag);
804 	for (i = 1; i < len - 1; i++)
805 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
806 			panic("ffs_reallocblks: non-physical cluster %d", i);
807 #endif
808 	/*
809 	 * If the cluster crosses the boundary for the first indirect
810 	 * block, do not move anything in it. Indirect blocks are
811 	 * usually initially laid out in a position between the data
812 	 * blocks. Block reallocation would usually destroy locality by
813 	 * moving the indirect block out of the way to make room for
814 	 * data blocks if we didn't compensate here. We should also do
815 	 * this for other indirect block boundaries, but it is only
816 	 * important for the first one.
817 	 */
818 	if (start_lbn < NDADDR && end_lbn >= NDADDR)
819 		return (ENOSPC);
820 	/*
821 	 * If the latest allocation is in a new cylinder group, assume that
822 	 * the filesystem has decided to move and do not force it back to
823 	 * the previous cylinder group.
824 	 */
825 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
826 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
827 		return (ENOSPC);
828 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
829 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
830 		return (ENOSPC);
831 	/*
832 	 * Get the starting offset and block map for the first block.
833 	 */
834 	if (start_lvl == 0) {
835 		sbap = &ip->i_din2->di_db[0];
836 		soff = start_lbn;
837 	} else {
838 		idp = &start_ap[start_lvl - 1];
839 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
840 			brelse(sbp);
841 			return (ENOSPC);
842 		}
843 		sbap = (ufs2_daddr_t *)sbp->b_data;
844 		soff = idp->in_off;
845 	}
846 	/*
847 	 * If the block range spans two block maps, get the second map.
848 	 */
849 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
850 		ssize = len;
851 	} else {
852 #ifdef INVARIANTS
853 		if (start_lvl > 0 &&
854 		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
855 			panic("ffs_reallocblk: start == end");
856 #endif
857 		ssize = len - (idp->in_off + 1);
858 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
859 			goto fail;
860 		ebap = (ufs2_daddr_t *)ebp->b_data;
861 	}
862 	/*
863 	 * Find the preferred location for the cluster. If we have not
864 	 * previously failed at this endeavor, then follow our standard
865 	 * preference calculation. If we have failed at it, then pick up
866 	 * where we last ended our search.
867 	 */
868 	UFS_LOCK(ump);
869 	if (ip->i_nextclustercg == -1)
870 		pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
871 	else
872 		pref = cgdata(fs, ip->i_nextclustercg);
873 	/*
874 	 * Search the block map looking for an allocation of the desired size.
875 	 * To avoid wasting too much time, we limit the number of cylinder
876 	 * groups that we will search.
877 	 */
878 	cg = dtog(fs, pref);
879 	for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
880 		if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
881 			break;
882 		cg += 1;
883 		if (cg >= fs->fs_ncg)
884 			cg = 0;
885 	}
886 	/*
887 	 * If we have failed in our search, record where we gave up for
888 	 * next time. Otherwise, fall back to our usual search citerion.
889 	 */
890 	if (newblk == 0) {
891 		ip->i_nextclustercg = cg;
892 		UFS_UNLOCK(ump);
893 		goto fail;
894 	}
895 	ip->i_nextclustercg = -1;
896 	/*
897 	 * We have found a new contiguous block.
898 	 *
899 	 * First we have to replace the old block pointers with the new
900 	 * block pointers in the inode and indirect blocks associated
901 	 * with the file.
902 	 */
903 #ifdef DEBUG
904 	if (prtrealloc)
905 		printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number,
906 		    (intmax_t)start_lbn, (intmax_t)end_lbn);
907 #endif
908 	blkno = newblk;
909 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
910 		if (i == ssize) {
911 			bap = ebap;
912 			soff = -i;
913 		}
914 #ifdef INVARIANTS
915 		if (!ffs_checkblk(ip,
916 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
917 			panic("ffs_reallocblks: unallocated block 2");
918 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
919 			panic("ffs_reallocblks: alloc mismatch");
920 #endif
921 #ifdef DEBUG
922 		if (prtrealloc)
923 			printf(" %jd,", (intmax_t)*bap);
924 #endif
925 		if (DOINGSOFTDEP(vp)) {
926 			if (sbap == &ip->i_din2->di_db[0] && i < ssize)
927 				softdep_setup_allocdirect(ip, start_lbn + i,
928 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
929 				    buflist->bs_children[i]);
930 			else
931 				softdep_setup_allocindir_page(ip, start_lbn + i,
932 				    i < ssize ? sbp : ebp, soff + i, blkno,
933 				    *bap, buflist->bs_children[i]);
934 		}
935 		*bap++ = blkno;
936 	}
937 	/*
938 	 * Next we must write out the modified inode and indirect blocks.
939 	 * For strict correctness, the writes should be synchronous since
940 	 * the old block values may have been written to disk. In practise
941 	 * they are almost never written, but if we are concerned about
942 	 * strict correctness, the `doasyncfree' flag should be set to zero.
943 	 *
944 	 * The test on `doasyncfree' should be changed to test a flag
945 	 * that shows whether the associated buffers and inodes have
946 	 * been written. The flag should be set when the cluster is
947 	 * started and cleared whenever the buffer or inode is flushed.
948 	 * We can then check below to see if it is set, and do the
949 	 * synchronous write only when it has been cleared.
950 	 */
951 	if (sbap != &ip->i_din2->di_db[0]) {
952 		if (doasyncfree)
953 			bdwrite(sbp);
954 		else
955 			bwrite(sbp);
956 	} else {
957 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
958 		if (!doasyncfree)
959 			ffs_update(vp, 1);
960 	}
961 	if (ssize < len) {
962 		if (doasyncfree)
963 			bdwrite(ebp);
964 		else
965 			bwrite(ebp);
966 	}
967 	/*
968 	 * Last, free the old blocks and assign the new blocks to the buffers.
969 	 */
970 #ifdef DEBUG
971 	if (prtrealloc)
972 		printf("\n\tnew:");
973 #endif
974 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
975 		if (!DOINGSOFTDEP(vp))
976 			ffs_blkfree(ump, fs, ip->i_devvp,
977 			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
978 			    fs->fs_bsize, ip->i_number, vp->v_type, NULL);
979 		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
980 #ifdef INVARIANTS
981 		if (!ffs_checkblk(ip,
982 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
983 			panic("ffs_reallocblks: unallocated block 3");
984 #endif
985 #ifdef DEBUG
986 		if (prtrealloc)
987 			printf(" %jd,", (intmax_t)blkno);
988 #endif
989 	}
990 #ifdef DEBUG
991 	if (prtrealloc) {
992 		prtrealloc--;
993 		printf("\n");
994 	}
995 #endif
996 	return (0);
997 
998 fail:
999 	if (ssize < len)
1000 		brelse(ebp);
1001 	if (sbap != &ip->i_din2->di_db[0])
1002 		brelse(sbp);
1003 	return (ENOSPC);
1004 }
1005 
1006 /*
1007  * Allocate an inode in the filesystem.
1008  *
1009  * If allocating a directory, use ffs_dirpref to select the inode.
1010  * If allocating in a directory, the following hierarchy is followed:
1011  *   1) allocate the preferred inode.
1012  *   2) allocate an inode in the same cylinder group.
1013  *   3) quadradically rehash into other cylinder groups, until an
1014  *      available inode is located.
1015  * If no inode preference is given the following hierarchy is used
1016  * to allocate an inode:
1017  *   1) allocate an inode in cylinder group 0.
1018  *   2) quadradically rehash into other cylinder groups, until an
1019  *      available inode is located.
1020  */
1021 int
1022 ffs_valloc(pvp, mode, cred, vpp)
1023 	struct vnode *pvp;
1024 	int mode;
1025 	struct ucred *cred;
1026 	struct vnode **vpp;
1027 {
1028 	struct inode *pip;
1029 	struct fs *fs;
1030 	struct inode *ip;
1031 	struct timespec ts;
1032 	struct ufsmount *ump;
1033 	ino_t ino, ipref;
1034 	u_int cg;
1035 	int error, error1, reclaimed;
1036 	static struct timeval lastfail;
1037 	static int curfail;
1038 
1039 	*vpp = NULL;
1040 	pip = VTOI(pvp);
1041 	fs = pip->i_fs;
1042 	ump = pip->i_ump;
1043 
1044 	UFS_LOCK(ump);
1045 	reclaimed = 0;
1046 retry:
1047 	if (fs->fs_cstotal.cs_nifree == 0)
1048 		goto noinodes;
1049 
1050 	if ((mode & IFMT) == IFDIR)
1051 		ipref = ffs_dirpref(pip);
1052 	else
1053 		ipref = pip->i_number;
1054 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
1055 		ipref = 0;
1056 	cg = ino_to_cg(fs, ipref);
1057 	/*
1058 	 * Track number of dirs created one after another
1059 	 * in a same cg without intervening by files.
1060 	 */
1061 	if ((mode & IFMT) == IFDIR) {
1062 		if (fs->fs_contigdirs[cg] < 255)
1063 			fs->fs_contigdirs[cg]++;
1064 	} else {
1065 		if (fs->fs_contigdirs[cg] > 0)
1066 			fs->fs_contigdirs[cg]--;
1067 	}
1068 	ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
1069 					(allocfcn_t *)ffs_nodealloccg);
1070 	if (ino == 0)
1071 		goto noinodes;
1072 	error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
1073 	if (error) {
1074 		error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
1075 		    FFSV_FORCEINSMQ);
1076 		ffs_vfree(pvp, ino, mode);
1077 		if (error1 == 0) {
1078 			ip = VTOI(*vpp);
1079 			if (ip->i_mode)
1080 				goto dup_alloc;
1081 			ip->i_flag |= IN_MODIFIED;
1082 			vput(*vpp);
1083 		}
1084 		return (error);
1085 	}
1086 	ip = VTOI(*vpp);
1087 	if (ip->i_mode) {
1088 dup_alloc:
1089 		printf("mode = 0%o, inum = %ju, fs = %s\n",
1090 		    ip->i_mode, (uintmax_t)ip->i_number, fs->fs_fsmnt);
1091 		panic("ffs_valloc: dup alloc");
1092 	}
1093 	if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) {  /* XXX */
1094 		printf("free inode %s/%lu had %ld blocks\n",
1095 		    fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
1096 		DIP_SET(ip, i_blocks, 0);
1097 	}
1098 	ip->i_flags = 0;
1099 	DIP_SET(ip, i_flags, 0);
1100 	/*
1101 	 * Set up a new generation number for this inode.
1102 	 */
1103 	if (ip->i_gen == 0 || ++ip->i_gen == 0)
1104 		ip->i_gen = arc4random() / 2 + 1;
1105 	DIP_SET(ip, i_gen, ip->i_gen);
1106 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1107 		vfs_timestamp(&ts);
1108 		ip->i_din2->di_birthtime = ts.tv_sec;
1109 		ip->i_din2->di_birthnsec = ts.tv_nsec;
1110 	}
1111 	ufs_prepare_reclaim(*vpp);
1112 	ip->i_flag = 0;
1113 	(*vpp)->v_vflag = 0;
1114 	(*vpp)->v_type = VNON;
1115 	if (fs->fs_magic == FS_UFS2_MAGIC)
1116 		(*vpp)->v_op = &ffs_vnodeops2;
1117 	else
1118 		(*vpp)->v_op = &ffs_vnodeops1;
1119 	return (0);
1120 noinodes:
1121 	if (reclaimed == 0) {
1122 		reclaimed = 1;
1123 		softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
1124 		goto retry;
1125 	}
1126 	UFS_UNLOCK(ump);
1127 	if (ppsratecheck(&lastfail, &curfail, 1)) {
1128 		ffs_fserr(fs, pip->i_number, "out of inodes");
1129 		uprintf("\n%s: create/symlink failed, no inodes free\n",
1130 		    fs->fs_fsmnt);
1131 	}
1132 	return (ENOSPC);
1133 }
1134 
1135 /*
1136  * Find a cylinder group to place a directory.
1137  *
1138  * The policy implemented by this algorithm is to allocate a
1139  * directory inode in the same cylinder group as its parent
1140  * directory, but also to reserve space for its files inodes
1141  * and data. Restrict the number of directories which may be
1142  * allocated one after another in the same cylinder group
1143  * without intervening allocation of files.
1144  *
1145  * If we allocate a first level directory then force allocation
1146  * in another cylinder group.
1147  */
1148 static ino_t
1149 ffs_dirpref(pip)
1150 	struct inode *pip;
1151 {
1152 	struct fs *fs;
1153 	int cg, prefcg, dirsize, cgsize;
1154 	u_int avgifree, avgbfree, avgndir, curdirsize;
1155 	u_int minifree, minbfree, maxndir;
1156 	u_int mincg, minndir;
1157 	u_int maxcontigdirs;
1158 
1159 	mtx_assert(UFS_MTX(pip->i_ump), MA_OWNED);
1160 	fs = pip->i_fs;
1161 
1162 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1163 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1164 	avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1165 
1166 	/*
1167 	 * Force allocation in another cg if creating a first level dir.
1168 	 */
1169 	ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1170 	if (ITOV(pip)->v_vflag & VV_ROOT) {
1171 		prefcg = arc4random() % fs->fs_ncg;
1172 		mincg = prefcg;
1173 		minndir = fs->fs_ipg;
1174 		for (cg = prefcg; cg < fs->fs_ncg; cg++)
1175 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1176 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1177 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1178 				mincg = cg;
1179 				minndir = fs->fs_cs(fs, cg).cs_ndir;
1180 			}
1181 		for (cg = 0; cg < prefcg; cg++)
1182 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1183 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1184 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1185 				mincg = cg;
1186 				minndir = fs->fs_cs(fs, cg).cs_ndir;
1187 			}
1188 		return ((ino_t)(fs->fs_ipg * mincg));
1189 	}
1190 
1191 	/*
1192 	 * Count various limits which used for
1193 	 * optimal allocation of a directory inode.
1194 	 */
1195 	maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1196 	minifree = avgifree - avgifree / 4;
1197 	if (minifree < 1)
1198 		minifree = 1;
1199 	minbfree = avgbfree - avgbfree / 4;
1200 	if (minbfree < 1)
1201 		minbfree = 1;
1202 	cgsize = fs->fs_fsize * fs->fs_fpg;
1203 	dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1204 	curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1205 	if (dirsize < curdirsize)
1206 		dirsize = curdirsize;
1207 	if (dirsize <= 0)
1208 		maxcontigdirs = 0;		/* dirsize overflowed */
1209 	else
1210 		maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1211 	if (fs->fs_avgfpdir > 0)
1212 		maxcontigdirs = min(maxcontigdirs,
1213 				    fs->fs_ipg / fs->fs_avgfpdir);
1214 	if (maxcontigdirs == 0)
1215 		maxcontigdirs = 1;
1216 
1217 	/*
1218 	 * Limit number of dirs in one cg and reserve space for
1219 	 * regular files, but only if we have no deficit in
1220 	 * inodes or space.
1221 	 *
1222 	 * We are trying to find a suitable cylinder group nearby
1223 	 * our preferred cylinder group to place a new directory.
1224 	 * We scan from our preferred cylinder group forward looking
1225 	 * for a cylinder group that meets our criterion. If we get
1226 	 * to the final cylinder group and do not find anything,
1227 	 * we start scanning forwards from the beginning of the
1228 	 * filesystem. While it might seem sensible to start scanning
1229 	 * backwards or even to alternate looking forward and backward,
1230 	 * this approach fails badly when the filesystem is nearly full.
1231 	 * Specifically, we first search all the areas that have no space
1232 	 * and finally try the one preceeding that. We repeat this on
1233 	 * every request and in the case of the final block end up
1234 	 * searching the entire filesystem. By jumping to the front
1235 	 * of the filesystem, our future forward searches always look
1236 	 * in new cylinder groups so finds every possible block after
1237 	 * one pass over the filesystem.
1238 	 */
1239 	prefcg = ino_to_cg(fs, pip->i_number);
1240 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1241 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1242 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1243 		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1244 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1245 				return ((ino_t)(fs->fs_ipg * cg));
1246 		}
1247 	for (cg = 0; cg < prefcg; cg++)
1248 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1249 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1250 		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1251 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1252 				return ((ino_t)(fs->fs_ipg * cg));
1253 		}
1254 	/*
1255 	 * This is a backstop when we have deficit in space.
1256 	 */
1257 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1258 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1259 			return ((ino_t)(fs->fs_ipg * cg));
1260 	for (cg = 0; cg < prefcg; cg++)
1261 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1262 			break;
1263 	return ((ino_t)(fs->fs_ipg * cg));
1264 }
1265 
1266 /*
1267  * Select the desired position for the next block in a file.  The file is
1268  * logically divided into sections. The first section is composed of the
1269  * direct blocks and the next fs_maxbpg blocks. Each additional section
1270  * contains fs_maxbpg blocks.
1271  *
1272  * If no blocks have been allocated in the first section, the policy is to
1273  * request a block in the same cylinder group as the inode that describes
1274  * the file. The first indirect is allocated immediately following the last
1275  * direct block and the data blocks for the first indirect immediately
1276  * follow it.
1277  *
1278  * If no blocks have been allocated in any other section, the indirect
1279  * block(s) are allocated in the same cylinder group as its inode in an
1280  * area reserved immediately following the inode blocks. The policy for
1281  * the data blocks is to place them in a cylinder group with a greater than
1282  * average number of free blocks. An appropriate cylinder group is found
1283  * by using a rotor that sweeps the cylinder groups. When a new group of
1284  * blocks is needed, the sweep begins in the cylinder group following the
1285  * cylinder group from which the previous allocation was made. The sweep
1286  * continues until a cylinder group with greater than the average number
1287  * of free blocks is found. If the allocation is for the first block in an
1288  * indirect block or the previous block is a hole, then the information on
1289  * the previous allocation is unavailable; here a best guess is made based
1290  * on the logical block number being allocated.
1291  *
1292  * If a section is already partially allocated, the policy is to
1293  * allocate blocks contiguously within the section if possible.
1294  */
1295 ufs2_daddr_t
1296 ffs_blkpref_ufs1(ip, lbn, indx, bap)
1297 	struct inode *ip;
1298 	ufs_lbn_t lbn;
1299 	int indx;
1300 	ufs1_daddr_t *bap;
1301 {
1302 	struct fs *fs;
1303 	u_int cg, inocg;
1304 	u_int avgbfree, startcg;
1305 	ufs2_daddr_t pref;
1306 
1307 	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1308 	mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1309 	fs = ip->i_fs;
1310 	/*
1311 	 * Allocation of indirect blocks is indicated by passing negative
1312 	 * values in indx: -1 for single indirect, -2 for double indirect,
1313 	 * -3 for triple indirect. As noted below, we attempt to allocate
1314 	 * the first indirect inline with the file data. For all later
1315 	 * indirect blocks, the data is often allocated in other cylinder
1316 	 * groups. However to speed random file access and to speed up
1317 	 * fsck, the filesystem reserves the first fs_metaspace blocks
1318 	 * (typically half of fs_minfree) of the data area of each cylinder
1319 	 * group to hold these later indirect blocks.
1320 	 */
1321 	inocg = ino_to_cg(fs, ip->i_number);
1322 	if (indx < 0) {
1323 		/*
1324 		 * Our preference for indirect blocks is the zone at the
1325 		 * beginning of the inode's cylinder group data area that
1326 		 * we try to reserve for indirect blocks.
1327 		 */
1328 		pref = cgmeta(fs, inocg);
1329 		/*
1330 		 * If we are allocating the first indirect block, try to
1331 		 * place it immediately following the last direct block.
1332 		 */
1333 		if (indx == -1 && lbn < NDADDR + NINDIR(fs) &&
1334 		    ip->i_din1->di_db[NDADDR - 1] != 0)
1335 			pref = ip->i_din1->di_db[NDADDR - 1] + fs->fs_frag;
1336 		return (pref);
1337 	}
1338 	/*
1339 	 * If we are allocating the first data block in the first indirect
1340 	 * block and the indirect has been allocated in the data block area,
1341 	 * try to place it immediately following the indirect block.
1342 	 */
1343 	if (lbn == NDADDR) {
1344 		pref = ip->i_din1->di_ib[0];
1345 		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1346 		    pref < cgbase(fs, inocg + 1))
1347 			return (pref + fs->fs_frag);
1348 	}
1349 	/*
1350 	 * If we are at the beginning of a file, or we have already allocated
1351 	 * the maximum number of blocks per cylinder group, or we do not
1352 	 * have a block allocated immediately preceeding us, then we need
1353 	 * to decide where to start allocating new blocks.
1354 	 */
1355 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1356 		/*
1357 		 * If we are allocating a directory data block, we want
1358 		 * to place it in the metadata area.
1359 		 */
1360 		if ((ip->i_mode & IFMT) == IFDIR)
1361 			return (cgmeta(fs, inocg));
1362 		/*
1363 		 * Until we fill all the direct and all the first indirect's
1364 		 * blocks, we try to allocate in the data area of the inode's
1365 		 * cylinder group.
1366 		 */
1367 		if (lbn < NDADDR + NINDIR(fs))
1368 			return (cgdata(fs, inocg));
1369 		/*
1370 		 * Find a cylinder with greater than average number of
1371 		 * unused data blocks.
1372 		 */
1373 		if (indx == 0 || bap[indx - 1] == 0)
1374 			startcg = inocg + lbn / fs->fs_maxbpg;
1375 		else
1376 			startcg = dtog(fs, bap[indx - 1]) + 1;
1377 		startcg %= fs->fs_ncg;
1378 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1379 		for (cg = startcg; cg < fs->fs_ncg; cg++)
1380 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1381 				fs->fs_cgrotor = cg;
1382 				return (cgdata(fs, cg));
1383 			}
1384 		for (cg = 0; cg <= startcg; cg++)
1385 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1386 				fs->fs_cgrotor = cg;
1387 				return (cgdata(fs, cg));
1388 			}
1389 		return (0);
1390 	}
1391 	/*
1392 	 * Otherwise, we just always try to lay things out contiguously.
1393 	 */
1394 	return (bap[indx - 1] + fs->fs_frag);
1395 }
1396 
1397 /*
1398  * Same as above, but for UFS2
1399  */
1400 ufs2_daddr_t
1401 ffs_blkpref_ufs2(ip, lbn, indx, bap)
1402 	struct inode *ip;
1403 	ufs_lbn_t lbn;
1404 	int indx;
1405 	ufs2_daddr_t *bap;
1406 {
1407 	struct fs *fs;
1408 	u_int cg, inocg;
1409 	u_int avgbfree, startcg;
1410 	ufs2_daddr_t pref;
1411 
1412 	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1413 	mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1414 	fs = ip->i_fs;
1415 	/*
1416 	 * Allocation of indirect blocks is indicated by passing negative
1417 	 * values in indx: -1 for single indirect, -2 for double indirect,
1418 	 * -3 for triple indirect. As noted below, we attempt to allocate
1419 	 * the first indirect inline with the file data. For all later
1420 	 * indirect blocks, the data is often allocated in other cylinder
1421 	 * groups. However to speed random file access and to speed up
1422 	 * fsck, the filesystem reserves the first fs_metaspace blocks
1423 	 * (typically half of fs_minfree) of the data area of each cylinder
1424 	 * group to hold these later indirect blocks.
1425 	 */
1426 	inocg = ino_to_cg(fs, ip->i_number);
1427 	if (indx < 0) {
1428 		/*
1429 		 * Our preference for indirect blocks is the zone at the
1430 		 * beginning of the inode's cylinder group data area that
1431 		 * we try to reserve for indirect blocks.
1432 		 */
1433 		pref = cgmeta(fs, inocg);
1434 		/*
1435 		 * If we are allocating the first indirect block, try to
1436 		 * place it immediately following the last direct block.
1437 		 */
1438 		if (indx == -1 && lbn < NDADDR + NINDIR(fs) &&
1439 		    ip->i_din2->di_db[NDADDR - 1] != 0)
1440 			pref = ip->i_din2->di_db[NDADDR - 1] + fs->fs_frag;
1441 		return (pref);
1442 	}
1443 	/*
1444 	 * If we are allocating the first data block in the first indirect
1445 	 * block and the indirect has been allocated in the data block area,
1446 	 * try to place it immediately following the indirect block.
1447 	 */
1448 	if (lbn == NDADDR) {
1449 		pref = ip->i_din2->di_ib[0];
1450 		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1451 		    pref < cgbase(fs, inocg + 1))
1452 			return (pref + fs->fs_frag);
1453 	}
1454 	/*
1455 	 * If we are at the beginning of a file, or we have already allocated
1456 	 * the maximum number of blocks per cylinder group, or we do not
1457 	 * have a block allocated immediately preceeding us, then we need
1458 	 * to decide where to start allocating new blocks.
1459 	 */
1460 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1461 		/*
1462 		 * If we are allocating a directory data block, we want
1463 		 * to place it in the metadata area.
1464 		 */
1465 		if ((ip->i_mode & IFMT) == IFDIR)
1466 			return (cgmeta(fs, inocg));
1467 		/*
1468 		 * Until we fill all the direct and all the first indirect's
1469 		 * blocks, we try to allocate in the data area of the inode's
1470 		 * cylinder group.
1471 		 */
1472 		if (lbn < NDADDR + NINDIR(fs))
1473 			return (cgdata(fs, inocg));
1474 		/*
1475 		 * Find a cylinder with greater than average number of
1476 		 * unused data blocks.
1477 		 */
1478 		if (indx == 0 || bap[indx - 1] == 0)
1479 			startcg = inocg + lbn / fs->fs_maxbpg;
1480 		else
1481 			startcg = dtog(fs, bap[indx - 1]) + 1;
1482 		startcg %= fs->fs_ncg;
1483 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1484 		for (cg = startcg; cg < fs->fs_ncg; cg++)
1485 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1486 				fs->fs_cgrotor = cg;
1487 				return (cgdata(fs, cg));
1488 			}
1489 		for (cg = 0; cg <= startcg; cg++)
1490 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1491 				fs->fs_cgrotor = cg;
1492 				return (cgdata(fs, cg));
1493 			}
1494 		return (0);
1495 	}
1496 	/*
1497 	 * Otherwise, we just always try to lay things out contiguously.
1498 	 */
1499 	return (bap[indx - 1] + fs->fs_frag);
1500 }
1501 
1502 /*
1503  * Implement the cylinder overflow algorithm.
1504  *
1505  * The policy implemented by this algorithm is:
1506  *   1) allocate the block in its requested cylinder group.
1507  *   2) quadradically rehash on the cylinder group number.
1508  *   3) brute force search for a free block.
1509  *
1510  * Must be called with the UFS lock held.  Will release the lock on success
1511  * and return with it held on failure.
1512  */
1513 /*VARARGS5*/
1514 static ufs2_daddr_t
1515 ffs_hashalloc(ip, cg, pref, size, rsize, allocator)
1516 	struct inode *ip;
1517 	u_int cg;
1518 	ufs2_daddr_t pref;
1519 	int size;	/* Search size for data blocks, mode for inodes */
1520 	int rsize;	/* Real allocated size. */
1521 	allocfcn_t *allocator;
1522 {
1523 	struct fs *fs;
1524 	ufs2_daddr_t result;
1525 	u_int i, icg = cg;
1526 
1527 	mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1528 #ifdef INVARIANTS
1529 	if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1530 		panic("ffs_hashalloc: allocation on suspended filesystem");
1531 #endif
1532 	fs = ip->i_fs;
1533 	/*
1534 	 * 1: preferred cylinder group
1535 	 */
1536 	result = (*allocator)(ip, cg, pref, size, rsize);
1537 	if (result)
1538 		return (result);
1539 	/*
1540 	 * 2: quadratic rehash
1541 	 */
1542 	for (i = 1; i < fs->fs_ncg; i *= 2) {
1543 		cg += i;
1544 		if (cg >= fs->fs_ncg)
1545 			cg -= fs->fs_ncg;
1546 		result = (*allocator)(ip, cg, 0, size, rsize);
1547 		if (result)
1548 			return (result);
1549 	}
1550 	/*
1551 	 * 3: brute force search
1552 	 * Note that we start at i == 2, since 0 was checked initially,
1553 	 * and 1 is always checked in the quadratic rehash.
1554 	 */
1555 	cg = (icg + 2) % fs->fs_ncg;
1556 	for (i = 2; i < fs->fs_ncg; i++) {
1557 		result = (*allocator)(ip, cg, 0, size, rsize);
1558 		if (result)
1559 			return (result);
1560 		cg++;
1561 		if (cg == fs->fs_ncg)
1562 			cg = 0;
1563 	}
1564 	return (0);
1565 }
1566 
1567 /*
1568  * Determine whether a fragment can be extended.
1569  *
1570  * Check to see if the necessary fragments are available, and
1571  * if they are, allocate them.
1572  */
1573 static ufs2_daddr_t
1574 ffs_fragextend(ip, cg, bprev, osize, nsize)
1575 	struct inode *ip;
1576 	u_int cg;
1577 	ufs2_daddr_t bprev;
1578 	int osize, nsize;
1579 {
1580 	struct fs *fs;
1581 	struct cg *cgp;
1582 	struct buf *bp;
1583 	struct ufsmount *ump;
1584 	int nffree;
1585 	long bno;
1586 	int frags, bbase;
1587 	int i, error;
1588 	u_int8_t *blksfree;
1589 
1590 	ump = ip->i_ump;
1591 	fs = ip->i_fs;
1592 	if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1593 		return (0);
1594 	frags = numfrags(fs, nsize);
1595 	bbase = fragnum(fs, bprev);
1596 	if (bbase > fragnum(fs, (bprev + frags - 1))) {
1597 		/* cannot extend across a block boundary */
1598 		return (0);
1599 	}
1600 	UFS_UNLOCK(ump);
1601 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1602 		(int)fs->fs_cgsize, NOCRED, &bp);
1603 	if (error)
1604 		goto fail;
1605 	cgp = (struct cg *)bp->b_data;
1606 	if (!cg_chkmagic(cgp))
1607 		goto fail;
1608 	bp->b_xflags |= BX_BKGRDWRITE;
1609 	cgp->cg_old_time = cgp->cg_time = time_second;
1610 	bno = dtogd(fs, bprev);
1611 	blksfree = cg_blksfree(cgp);
1612 	for (i = numfrags(fs, osize); i < frags; i++)
1613 		if (isclr(blksfree, bno + i))
1614 			goto fail;
1615 	/*
1616 	 * the current fragment can be extended
1617 	 * deduct the count on fragment being extended into
1618 	 * increase the count on the remaining fragment (if any)
1619 	 * allocate the extended piece
1620 	 */
1621 	for (i = frags; i < fs->fs_frag - bbase; i++)
1622 		if (isclr(blksfree, bno + i))
1623 			break;
1624 	cgp->cg_frsum[i - numfrags(fs, osize)]--;
1625 	if (i != frags)
1626 		cgp->cg_frsum[i - frags]++;
1627 	for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1628 		clrbit(blksfree, bno + i);
1629 		cgp->cg_cs.cs_nffree--;
1630 		nffree++;
1631 	}
1632 	UFS_LOCK(ump);
1633 	fs->fs_cstotal.cs_nffree -= nffree;
1634 	fs->fs_cs(fs, cg).cs_nffree -= nffree;
1635 	fs->fs_fmod = 1;
1636 	ACTIVECLEAR(fs, cg);
1637 	UFS_UNLOCK(ump);
1638 	if (DOINGSOFTDEP(ITOV(ip)))
1639 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
1640 		    frags, numfrags(fs, osize));
1641 	bdwrite(bp);
1642 	return (bprev);
1643 
1644 fail:
1645 	brelse(bp);
1646 	UFS_LOCK(ump);
1647 	return (0);
1648 
1649 }
1650 
1651 /*
1652  * Determine whether a block can be allocated.
1653  *
1654  * Check to see if a block of the appropriate size is available,
1655  * and if it is, allocate it.
1656  */
1657 static ufs2_daddr_t
1658 ffs_alloccg(ip, cg, bpref, size, rsize)
1659 	struct inode *ip;
1660 	u_int cg;
1661 	ufs2_daddr_t bpref;
1662 	int size;
1663 	int rsize;
1664 {
1665 	struct fs *fs;
1666 	struct cg *cgp;
1667 	struct buf *bp;
1668 	struct ufsmount *ump;
1669 	ufs1_daddr_t bno;
1670 	ufs2_daddr_t blkno;
1671 	int i, allocsiz, error, frags;
1672 	u_int8_t *blksfree;
1673 
1674 	ump = ip->i_ump;
1675 	fs = ip->i_fs;
1676 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1677 		return (0);
1678 	UFS_UNLOCK(ump);
1679 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1680 		(int)fs->fs_cgsize, NOCRED, &bp);
1681 	if (error)
1682 		goto fail;
1683 	cgp = (struct cg *)bp->b_data;
1684 	if (!cg_chkmagic(cgp) ||
1685 	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1686 		goto fail;
1687 	bp->b_xflags |= BX_BKGRDWRITE;
1688 	cgp->cg_old_time = cgp->cg_time = time_second;
1689 	if (size == fs->fs_bsize) {
1690 		UFS_LOCK(ump);
1691 		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1692 		ACTIVECLEAR(fs, cg);
1693 		UFS_UNLOCK(ump);
1694 		bdwrite(bp);
1695 		return (blkno);
1696 	}
1697 	/*
1698 	 * check to see if any fragments are already available
1699 	 * allocsiz is the size which will be allocated, hacking
1700 	 * it down to a smaller size if necessary
1701 	 */
1702 	blksfree = cg_blksfree(cgp);
1703 	frags = numfrags(fs, size);
1704 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1705 		if (cgp->cg_frsum[allocsiz] != 0)
1706 			break;
1707 	if (allocsiz == fs->fs_frag) {
1708 		/*
1709 		 * no fragments were available, so a block will be
1710 		 * allocated, and hacked up
1711 		 */
1712 		if (cgp->cg_cs.cs_nbfree == 0)
1713 			goto fail;
1714 		UFS_LOCK(ump);
1715 		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1716 		ACTIVECLEAR(fs, cg);
1717 		UFS_UNLOCK(ump);
1718 		bdwrite(bp);
1719 		return (blkno);
1720 	}
1721 	KASSERT(size == rsize,
1722 	    ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
1723 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1724 	if (bno < 0)
1725 		goto fail;
1726 	for (i = 0; i < frags; i++)
1727 		clrbit(blksfree, bno + i);
1728 	cgp->cg_cs.cs_nffree -= frags;
1729 	cgp->cg_frsum[allocsiz]--;
1730 	if (frags != allocsiz)
1731 		cgp->cg_frsum[allocsiz - frags]++;
1732 	UFS_LOCK(ump);
1733 	fs->fs_cstotal.cs_nffree -= frags;
1734 	fs->fs_cs(fs, cg).cs_nffree -= frags;
1735 	fs->fs_fmod = 1;
1736 	blkno = cgbase(fs, cg) + bno;
1737 	ACTIVECLEAR(fs, cg);
1738 	UFS_UNLOCK(ump);
1739 	if (DOINGSOFTDEP(ITOV(ip)))
1740 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
1741 	bdwrite(bp);
1742 	return (blkno);
1743 
1744 fail:
1745 	brelse(bp);
1746 	UFS_LOCK(ump);
1747 	return (0);
1748 }
1749 
1750 /*
1751  * Allocate a block in a cylinder group.
1752  *
1753  * This algorithm implements the following policy:
1754  *   1) allocate the requested block.
1755  *   2) allocate a rotationally optimal block in the same cylinder.
1756  *   3) allocate the next available block on the block rotor for the
1757  *      specified cylinder group.
1758  * Note that this routine only allocates fs_bsize blocks; these
1759  * blocks may be fragmented by the routine that allocates them.
1760  */
1761 static ufs2_daddr_t
1762 ffs_alloccgblk(ip, bp, bpref, size)
1763 	struct inode *ip;
1764 	struct buf *bp;
1765 	ufs2_daddr_t bpref;
1766 	int size;
1767 {
1768 	struct fs *fs;
1769 	struct cg *cgp;
1770 	struct ufsmount *ump;
1771 	ufs1_daddr_t bno;
1772 	ufs2_daddr_t blkno;
1773 	u_int8_t *blksfree;
1774 	int i, cgbpref;
1775 
1776 	fs = ip->i_fs;
1777 	ump = ip->i_ump;
1778 	mtx_assert(UFS_MTX(ump), MA_OWNED);
1779 	cgp = (struct cg *)bp->b_data;
1780 	blksfree = cg_blksfree(cgp);
1781 	if (bpref == 0) {
1782 		bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
1783 	} else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1784 		/* map bpref to correct zone in this cg */
1785 		if (bpref < cgdata(fs, cgbpref))
1786 			bpref = cgmeta(fs, cgp->cg_cgx);
1787 		else
1788 			bpref = cgdata(fs, cgp->cg_cgx);
1789 	}
1790 	/*
1791 	 * if the requested block is available, use it
1792 	 */
1793 	bno = dtogd(fs, blknum(fs, bpref));
1794 	if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1795 		goto gotit;
1796 	/*
1797 	 * Take the next available block in this cylinder group.
1798 	 */
1799 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1800 	if (bno < 0)
1801 		return (0);
1802 	/* Update cg_rotor only if allocated from the data zone */
1803 	if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1804 		cgp->cg_rotor = bno;
1805 gotit:
1806 	blkno = fragstoblks(fs, bno);
1807 	ffs_clrblock(fs, blksfree, (long)blkno);
1808 	ffs_clusteracct(fs, cgp, blkno, -1);
1809 	cgp->cg_cs.cs_nbfree--;
1810 	fs->fs_cstotal.cs_nbfree--;
1811 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1812 	fs->fs_fmod = 1;
1813 	blkno = cgbase(fs, cgp->cg_cgx) + bno;
1814 	/*
1815 	 * If the caller didn't want the whole block free the frags here.
1816 	 */
1817 	size = numfrags(fs, size);
1818 	if (size != fs->fs_frag) {
1819 		bno = dtogd(fs, blkno);
1820 		for (i = size; i < fs->fs_frag; i++)
1821 			setbit(blksfree, bno + i);
1822 		i = fs->fs_frag - size;
1823 		cgp->cg_cs.cs_nffree += i;
1824 		fs->fs_cstotal.cs_nffree += i;
1825 		fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
1826 		fs->fs_fmod = 1;
1827 		cgp->cg_frsum[i]++;
1828 	}
1829 	/* XXX Fixme. */
1830 	UFS_UNLOCK(ump);
1831 	if (DOINGSOFTDEP(ITOV(ip)))
1832 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno,
1833 		    size, 0);
1834 	UFS_LOCK(ump);
1835 	return (blkno);
1836 }
1837 
1838 /*
1839  * Determine whether a cluster can be allocated.
1840  *
1841  * We do not currently check for optimal rotational layout if there
1842  * are multiple choices in the same cylinder group. Instead we just
1843  * take the first one that we find following bpref.
1844  */
1845 static ufs2_daddr_t
1846 ffs_clusteralloc(ip, cg, bpref, len)
1847 	struct inode *ip;
1848 	u_int cg;
1849 	ufs2_daddr_t bpref;
1850 	int len;
1851 {
1852 	struct fs *fs;
1853 	struct cg *cgp;
1854 	struct buf *bp;
1855 	struct ufsmount *ump;
1856 	int i, run, bit, map, got;
1857 	ufs2_daddr_t bno;
1858 	u_char *mapp;
1859 	int32_t *lp;
1860 	u_int8_t *blksfree;
1861 
1862 	fs = ip->i_fs;
1863 	ump = ip->i_ump;
1864 	if (fs->fs_maxcluster[cg] < len)
1865 		return (0);
1866 	UFS_UNLOCK(ump);
1867 	if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
1868 	    NOCRED, &bp))
1869 		goto fail_lock;
1870 	cgp = (struct cg *)bp->b_data;
1871 	if (!cg_chkmagic(cgp))
1872 		goto fail_lock;
1873 	bp->b_xflags |= BX_BKGRDWRITE;
1874 	/*
1875 	 * Check to see if a cluster of the needed size (or bigger) is
1876 	 * available in this cylinder group.
1877 	 */
1878 	lp = &cg_clustersum(cgp)[len];
1879 	for (i = len; i <= fs->fs_contigsumsize; i++)
1880 		if (*lp++ > 0)
1881 			break;
1882 	if (i > fs->fs_contigsumsize) {
1883 		/*
1884 		 * This is the first time looking for a cluster in this
1885 		 * cylinder group. Update the cluster summary information
1886 		 * to reflect the true maximum sized cluster so that
1887 		 * future cluster allocation requests can avoid reading
1888 		 * the cylinder group map only to find no clusters.
1889 		 */
1890 		lp = &cg_clustersum(cgp)[len - 1];
1891 		for (i = len - 1; i > 0; i--)
1892 			if (*lp-- > 0)
1893 				break;
1894 		UFS_LOCK(ump);
1895 		fs->fs_maxcluster[cg] = i;
1896 		goto fail;
1897 	}
1898 	/*
1899 	 * Search the cluster map to find a big enough cluster.
1900 	 * We take the first one that we find, even if it is larger
1901 	 * than we need as we prefer to get one close to the previous
1902 	 * block allocation. We do not search before the current
1903 	 * preference point as we do not want to allocate a block
1904 	 * that is allocated before the previous one (as we will
1905 	 * then have to wait for another pass of the elevator
1906 	 * algorithm before it will be read). We prefer to fail and
1907 	 * be recalled to try an allocation in the next cylinder group.
1908 	 */
1909 	if (dtog(fs, bpref) != cg)
1910 		bpref = cgdata(fs, cg);
1911 	else
1912 		bpref = blknum(fs, bpref);
1913 	bpref = fragstoblks(fs, dtogd(fs, bpref));
1914 	mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1915 	map = *mapp++;
1916 	bit = 1 << (bpref % NBBY);
1917 	for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1918 		if ((map & bit) == 0) {
1919 			run = 0;
1920 		} else {
1921 			run++;
1922 			if (run == len)
1923 				break;
1924 		}
1925 		if ((got & (NBBY - 1)) != (NBBY - 1)) {
1926 			bit <<= 1;
1927 		} else {
1928 			map = *mapp++;
1929 			bit = 1;
1930 		}
1931 	}
1932 	if (got >= cgp->cg_nclusterblks)
1933 		goto fail_lock;
1934 	/*
1935 	 * Allocate the cluster that we have found.
1936 	 */
1937 	blksfree = cg_blksfree(cgp);
1938 	for (i = 1; i <= len; i++)
1939 		if (!ffs_isblock(fs, blksfree, got - run + i))
1940 			panic("ffs_clusteralloc: map mismatch");
1941 	bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
1942 	if (dtog(fs, bno) != cg)
1943 		panic("ffs_clusteralloc: allocated out of group");
1944 	len = blkstofrags(fs, len);
1945 	UFS_LOCK(ump);
1946 	for (i = 0; i < len; i += fs->fs_frag)
1947 		if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
1948 			panic("ffs_clusteralloc: lost block");
1949 	ACTIVECLEAR(fs, cg);
1950 	UFS_UNLOCK(ump);
1951 	bdwrite(bp);
1952 	return (bno);
1953 
1954 fail_lock:
1955 	UFS_LOCK(ump);
1956 fail:
1957 	brelse(bp);
1958 	return (0);
1959 }
1960 
1961 static inline struct buf *
1962 getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags)
1963 {
1964 	struct fs *fs;
1965 
1966 	fs = ip->i_fs;
1967 	return (getblk(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs,
1968 	    cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
1969 	    gbflags));
1970 }
1971 
1972 /*
1973  * Determine whether an inode can be allocated.
1974  *
1975  * Check to see if an inode is available, and if it is,
1976  * allocate it using the following policy:
1977  *   1) allocate the requested inode.
1978  *   2) allocate the next available inode after the requested
1979  *      inode in the specified cylinder group.
1980  */
1981 static ufs2_daddr_t
1982 ffs_nodealloccg(ip, cg, ipref, mode, unused)
1983 	struct inode *ip;
1984 	u_int cg;
1985 	ufs2_daddr_t ipref;
1986 	int mode;
1987 	int unused;
1988 {
1989 	struct fs *fs;
1990 	struct cg *cgp;
1991 	struct buf *bp, *ibp;
1992 	struct ufsmount *ump;
1993 	u_int8_t *inosused, *loc;
1994 	struct ufs2_dinode *dp2;
1995 	int error, start, len, i;
1996 	u_int32_t old_initediblk;
1997 
1998 	fs = ip->i_fs;
1999 	ump = ip->i_ump;
2000 check_nifree:
2001 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
2002 		return (0);
2003 	UFS_UNLOCK(ump);
2004 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
2005 		(int)fs->fs_cgsize, NOCRED, &bp);
2006 	if (error) {
2007 		brelse(bp);
2008 		UFS_LOCK(ump);
2009 		return (0);
2010 	}
2011 	cgp = (struct cg *)bp->b_data;
2012 restart:
2013 	if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
2014 		brelse(bp);
2015 		UFS_LOCK(ump);
2016 		return (0);
2017 	}
2018 	bp->b_xflags |= BX_BKGRDWRITE;
2019 	inosused = cg_inosused(cgp);
2020 	if (ipref) {
2021 		ipref %= fs->fs_ipg;
2022 		if (isclr(inosused, ipref))
2023 			goto gotit;
2024 	}
2025 	start = cgp->cg_irotor / NBBY;
2026 	len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
2027 	loc = memcchr(&inosused[start], 0xff, len);
2028 	if (loc == NULL) {
2029 		len = start + 1;
2030 		start = 0;
2031 		loc = memcchr(&inosused[start], 0xff, len);
2032 		if (loc == NULL) {
2033 			printf("cg = %d, irotor = %ld, fs = %s\n",
2034 			    cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
2035 			panic("ffs_nodealloccg: map corrupted");
2036 			/* NOTREACHED */
2037 		}
2038 	}
2039 	ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1;
2040 gotit:
2041 	/*
2042 	 * Check to see if we need to initialize more inodes.
2043 	 */
2044 	if (fs->fs_magic == FS_UFS2_MAGIC &&
2045 	    ipref + INOPB(fs) > cgp->cg_initediblk &&
2046 	    cgp->cg_initediblk < cgp->cg_niblk) {
2047 		old_initediblk = cgp->cg_initediblk;
2048 
2049 		/*
2050 		 * Free the cylinder group lock before writing the
2051 		 * initialized inode block.  Entering the
2052 		 * babarrierwrite() with the cylinder group lock
2053 		 * causes lock order violation between the lock and
2054 		 * snaplk.
2055 		 *
2056 		 * Another thread can decide to initialize the same
2057 		 * inode block, but whichever thread first gets the
2058 		 * cylinder group lock after writing the newly
2059 		 * allocated inode block will update it and the other
2060 		 * will realize that it has lost and leave the
2061 		 * cylinder group unchanged.
2062 		 */
2063 		ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
2064 		brelse(bp);
2065 		if (ibp == NULL) {
2066 			/*
2067 			 * The inode block buffer is already owned by
2068 			 * another thread, which must initialize it.
2069 			 * Wait on the buffer to allow another thread
2070 			 * to finish the updates, with dropped cg
2071 			 * buffer lock, then retry.
2072 			 */
2073 			ibp = getinobuf(ip, cg, old_initediblk, 0);
2074 			brelse(ibp);
2075 			UFS_LOCK(ump);
2076 			goto check_nifree;
2077 		}
2078 		bzero(ibp->b_data, (int)fs->fs_bsize);
2079 		dp2 = (struct ufs2_dinode *)(ibp->b_data);
2080 		for (i = 0; i < INOPB(fs); i++) {
2081 			dp2->di_gen = arc4random() / 2 + 1;
2082 			dp2++;
2083 		}
2084 		/*
2085 		 * Rather than adding a soft updates dependency to ensure
2086 		 * that the new inode block is written before it is claimed
2087 		 * by the cylinder group map, we just do a barrier write
2088 		 * here. The barrier write will ensure that the inode block
2089 		 * gets written before the updated cylinder group map can be
2090 		 * written. The barrier write should only slow down bulk
2091 		 * loading of newly created filesystems.
2092 		 */
2093 		babarrierwrite(ibp);
2094 
2095 		/*
2096 		 * After the inode block is written, try to update the
2097 		 * cg initediblk pointer.  If another thread beat us
2098 		 * to it, then leave it unchanged as the other thread
2099 		 * has already set it correctly.
2100 		 */
2101 		error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
2102 		    (int)fs->fs_cgsize, NOCRED, &bp);
2103 		UFS_LOCK(ump);
2104 		ACTIVECLEAR(fs, cg);
2105 		UFS_UNLOCK(ump);
2106 		if (error != 0) {
2107 			brelse(bp);
2108 			return (error);
2109 		}
2110 		cgp = (struct cg *)bp->b_data;
2111 		if (cgp->cg_initediblk == old_initediblk)
2112 			cgp->cg_initediblk += INOPB(fs);
2113 		goto restart;
2114 	}
2115 	cgp->cg_old_time = cgp->cg_time = time_second;
2116 	cgp->cg_irotor = ipref;
2117 	UFS_LOCK(ump);
2118 	ACTIVECLEAR(fs, cg);
2119 	setbit(inosused, ipref);
2120 	cgp->cg_cs.cs_nifree--;
2121 	fs->fs_cstotal.cs_nifree--;
2122 	fs->fs_cs(fs, cg).cs_nifree--;
2123 	fs->fs_fmod = 1;
2124 	if ((mode & IFMT) == IFDIR) {
2125 		cgp->cg_cs.cs_ndir++;
2126 		fs->fs_cstotal.cs_ndir++;
2127 		fs->fs_cs(fs, cg).cs_ndir++;
2128 	}
2129 	UFS_UNLOCK(ump);
2130 	if (DOINGSOFTDEP(ITOV(ip)))
2131 		softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
2132 	bdwrite(bp);
2133 	return ((ino_t)(cg * fs->fs_ipg + ipref));
2134 }
2135 
2136 /*
2137  * Free a block or fragment.
2138  *
2139  * The specified block or fragment is placed back in the
2140  * free map. If a fragment is deallocated, a possible
2141  * block reassembly is checked.
2142  */
2143 static void
2144 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd)
2145 	struct ufsmount *ump;
2146 	struct fs *fs;
2147 	struct vnode *devvp;
2148 	ufs2_daddr_t bno;
2149 	long size;
2150 	ino_t inum;
2151 	struct workhead *dephd;
2152 {
2153 	struct mount *mp;
2154 	struct cg *cgp;
2155 	struct buf *bp;
2156 	ufs1_daddr_t fragno, cgbno;
2157 	ufs2_daddr_t cgblkno;
2158 	int i, blk, frags, bbase;
2159 	u_int cg;
2160 	u_int8_t *blksfree;
2161 	struct cdev *dev;
2162 
2163 	cg = dtog(fs, bno);
2164 	if (devvp->v_type == VREG) {
2165 		/* devvp is a snapshot */
2166 		dev = VTOI(devvp)->i_devvp->v_rdev;
2167 		cgblkno = fragstoblks(fs, cgtod(fs, cg));
2168 	} else {
2169 		/* devvp is a normal disk device */
2170 		dev = devvp->v_rdev;
2171 		cgblkno = fsbtodb(fs, cgtod(fs, cg));
2172 		ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg");
2173 	}
2174 #ifdef INVARIANTS
2175 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
2176 	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
2177 		printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
2178 		    devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
2179 		    size, fs->fs_fsmnt);
2180 		panic("ffs_blkfree_cg: bad size");
2181 	}
2182 #endif
2183 	if ((u_int)bno >= fs->fs_size) {
2184 		printf("bad block %jd, ino %lu\n", (intmax_t)bno,
2185 		    (u_long)inum);
2186 		ffs_fserr(fs, inum, "bad block");
2187 		return;
2188 	}
2189 	if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) {
2190 		brelse(bp);
2191 		return;
2192 	}
2193 	cgp = (struct cg *)bp->b_data;
2194 	if (!cg_chkmagic(cgp)) {
2195 		brelse(bp);
2196 		return;
2197 	}
2198 	bp->b_xflags |= BX_BKGRDWRITE;
2199 	cgp->cg_old_time = cgp->cg_time = time_second;
2200 	cgbno = dtogd(fs, bno);
2201 	blksfree = cg_blksfree(cgp);
2202 	UFS_LOCK(ump);
2203 	if (size == fs->fs_bsize) {
2204 		fragno = fragstoblks(fs, cgbno);
2205 		if (!ffs_isfreeblock(fs, blksfree, fragno)) {
2206 			if (devvp->v_type == VREG) {
2207 				UFS_UNLOCK(ump);
2208 				/* devvp is a snapshot */
2209 				brelse(bp);
2210 				return;
2211 			}
2212 			printf("dev = %s, block = %jd, fs = %s\n",
2213 			    devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
2214 			panic("ffs_blkfree_cg: freeing free block");
2215 		}
2216 		ffs_setblock(fs, blksfree, fragno);
2217 		ffs_clusteracct(fs, cgp, fragno, 1);
2218 		cgp->cg_cs.cs_nbfree++;
2219 		fs->fs_cstotal.cs_nbfree++;
2220 		fs->fs_cs(fs, cg).cs_nbfree++;
2221 	} else {
2222 		bbase = cgbno - fragnum(fs, cgbno);
2223 		/*
2224 		 * decrement the counts associated with the old frags
2225 		 */
2226 		blk = blkmap(fs, blksfree, bbase);
2227 		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
2228 		/*
2229 		 * deallocate the fragment
2230 		 */
2231 		frags = numfrags(fs, size);
2232 		for (i = 0; i < frags; i++) {
2233 			if (isset(blksfree, cgbno + i)) {
2234 				printf("dev = %s, block = %jd, fs = %s\n",
2235 				    devtoname(dev), (intmax_t)(bno + i),
2236 				    fs->fs_fsmnt);
2237 				panic("ffs_blkfree_cg: freeing free frag");
2238 			}
2239 			setbit(blksfree, cgbno + i);
2240 		}
2241 		cgp->cg_cs.cs_nffree += i;
2242 		fs->fs_cstotal.cs_nffree += i;
2243 		fs->fs_cs(fs, cg).cs_nffree += i;
2244 		/*
2245 		 * add back in counts associated with the new frags
2246 		 */
2247 		blk = blkmap(fs, blksfree, bbase);
2248 		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
2249 		/*
2250 		 * if a complete block has been reassembled, account for it
2251 		 */
2252 		fragno = fragstoblks(fs, bbase);
2253 		if (ffs_isblock(fs, blksfree, fragno)) {
2254 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
2255 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
2256 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
2257 			ffs_clusteracct(fs, cgp, fragno, 1);
2258 			cgp->cg_cs.cs_nbfree++;
2259 			fs->fs_cstotal.cs_nbfree++;
2260 			fs->fs_cs(fs, cg).cs_nbfree++;
2261 		}
2262 	}
2263 	fs->fs_fmod = 1;
2264 	ACTIVECLEAR(fs, cg);
2265 	UFS_UNLOCK(ump);
2266 	mp = UFSTOVFS(ump);
2267 	if (MOUNTEDSOFTDEP(mp) && devvp->v_type != VREG)
2268 		softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2269 		    numfrags(fs, size), dephd);
2270 	bdwrite(bp);
2271 }
2272 
2273 TASKQUEUE_DEFINE_THREAD(ffs_trim);
2274 
2275 struct ffs_blkfree_trim_params {
2276 	struct task task;
2277 	struct ufsmount *ump;
2278 	struct vnode *devvp;
2279 	ufs2_daddr_t bno;
2280 	long size;
2281 	ino_t inum;
2282 	struct workhead *pdephd;
2283 	struct workhead dephd;
2284 };
2285 
2286 static void
2287 ffs_blkfree_trim_task(ctx, pending)
2288 	void *ctx;
2289 	int pending;
2290 {
2291 	struct ffs_blkfree_trim_params *tp;
2292 
2293 	tp = ctx;
2294 	ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size,
2295 	    tp->inum, tp->pdephd);
2296 	vn_finished_secondary_write(UFSTOVFS(tp->ump));
2297 	free(tp, M_TEMP);
2298 }
2299 
2300 static void
2301 ffs_blkfree_trim_completed(bip)
2302 	struct bio *bip;
2303 {
2304 	struct ffs_blkfree_trim_params *tp;
2305 
2306 	tp = bip->bio_caller2;
2307 	g_destroy_bio(bip);
2308 	TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
2309 	taskqueue_enqueue(taskqueue_ffs_trim, &tp->task);
2310 }
2311 
2312 void
2313 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd)
2314 	struct ufsmount *ump;
2315 	struct fs *fs;
2316 	struct vnode *devvp;
2317 	ufs2_daddr_t bno;
2318 	long size;
2319 	ino_t inum;
2320 	enum vtype vtype;
2321 	struct workhead *dephd;
2322 {
2323 	struct mount *mp;
2324 	struct bio *bip;
2325 	struct ffs_blkfree_trim_params *tp;
2326 
2327 	/*
2328 	 * Check to see if a snapshot wants to claim the block.
2329 	 * Check that devvp is a normal disk device, not a snapshot,
2330 	 * it has a snapshot(s) associated with it, and one of the
2331 	 * snapshots wants to claim the block.
2332 	 */
2333 	if (devvp->v_type != VREG &&
2334 	    (devvp->v_vflag & VV_COPYONWRITE) &&
2335 	    ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
2336 		return;
2337 	}
2338 	/*
2339 	 * Nothing to delay if TRIM is disabled, or the operation is
2340 	 * performed on the snapshot.
2341 	 */
2342 	if (!ump->um_candelete || devvp->v_type == VREG) {
2343 		ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
2344 		return;
2345 	}
2346 
2347 	/*
2348 	 * Postpone the set of the free bit in the cg bitmap until the
2349 	 * BIO_DELETE is completed.  Otherwise, due to disk queue
2350 	 * reordering, TRIM might be issued after we reuse the block
2351 	 * and write some new data into it.
2352 	 */
2353 	tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK);
2354 	tp->ump = ump;
2355 	tp->devvp = devvp;
2356 	tp->bno = bno;
2357 	tp->size = size;
2358 	tp->inum = inum;
2359 	if (dephd != NULL) {
2360 		LIST_INIT(&tp->dephd);
2361 		LIST_SWAP(dephd, &tp->dephd, worklist, wk_list);
2362 		tp->pdephd = &tp->dephd;
2363 	} else
2364 		tp->pdephd = NULL;
2365 
2366 	bip = g_alloc_bio();
2367 	bip->bio_cmd = BIO_DELETE;
2368 	bip->bio_offset = dbtob(fsbtodb(fs, bno));
2369 	bip->bio_done = ffs_blkfree_trim_completed;
2370 	bip->bio_length = size;
2371 	bip->bio_caller2 = tp;
2372 
2373 	mp = UFSTOVFS(ump);
2374 	vn_start_secondary_write(NULL, &mp, 0);
2375 	g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private);
2376 }
2377 
2378 #ifdef INVARIANTS
2379 /*
2380  * Verify allocation of a block or fragment. Returns true if block or
2381  * fragment is allocated, false if it is free.
2382  */
2383 static int
2384 ffs_checkblk(ip, bno, size)
2385 	struct inode *ip;
2386 	ufs2_daddr_t bno;
2387 	long size;
2388 {
2389 	struct fs *fs;
2390 	struct cg *cgp;
2391 	struct buf *bp;
2392 	ufs1_daddr_t cgbno;
2393 	int i, error, frags, free;
2394 	u_int8_t *blksfree;
2395 
2396 	fs = ip->i_fs;
2397 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
2398 		printf("bsize = %ld, size = %ld, fs = %s\n",
2399 		    (long)fs->fs_bsize, size, fs->fs_fsmnt);
2400 		panic("ffs_checkblk: bad size");
2401 	}
2402 	if ((u_int)bno >= fs->fs_size)
2403 		panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
2404 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))),
2405 		(int)fs->fs_cgsize, NOCRED, &bp);
2406 	if (error)
2407 		panic("ffs_checkblk: cg bread failed");
2408 	cgp = (struct cg *)bp->b_data;
2409 	if (!cg_chkmagic(cgp))
2410 		panic("ffs_checkblk: cg magic mismatch");
2411 	bp->b_xflags |= BX_BKGRDWRITE;
2412 	blksfree = cg_blksfree(cgp);
2413 	cgbno = dtogd(fs, bno);
2414 	if (size == fs->fs_bsize) {
2415 		free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2416 	} else {
2417 		frags = numfrags(fs, size);
2418 		for (free = 0, i = 0; i < frags; i++)
2419 			if (isset(blksfree, cgbno + i))
2420 				free++;
2421 		if (free != 0 && free != frags)
2422 			panic("ffs_checkblk: partially free fragment");
2423 	}
2424 	brelse(bp);
2425 	return (!free);
2426 }
2427 #endif /* INVARIANTS */
2428 
2429 /*
2430  * Free an inode.
2431  */
2432 int
2433 ffs_vfree(pvp, ino, mode)
2434 	struct vnode *pvp;
2435 	ino_t ino;
2436 	int mode;
2437 {
2438 	struct inode *ip;
2439 
2440 	if (DOINGSOFTDEP(pvp)) {
2441 		softdep_freefile(pvp, ino, mode);
2442 		return (0);
2443 	}
2444 	ip = VTOI(pvp);
2445 	return (ffs_freefile(ip->i_ump, ip->i_fs, ip->i_devvp, ino, mode,
2446 	    NULL));
2447 }
2448 
2449 /*
2450  * Do the actual free operation.
2451  * The specified inode is placed back in the free map.
2452  */
2453 int
2454 ffs_freefile(ump, fs, devvp, ino, mode, wkhd)
2455 	struct ufsmount *ump;
2456 	struct fs *fs;
2457 	struct vnode *devvp;
2458 	ino_t ino;
2459 	int mode;
2460 	struct workhead *wkhd;
2461 {
2462 	struct cg *cgp;
2463 	struct buf *bp;
2464 	ufs2_daddr_t cgbno;
2465 	int error;
2466 	u_int cg;
2467 	u_int8_t *inosused;
2468 	struct cdev *dev;
2469 
2470 	cg = ino_to_cg(fs, ino);
2471 	if (devvp->v_type == VREG) {
2472 		/* devvp is a snapshot */
2473 		dev = VTOI(devvp)->i_devvp->v_rdev;
2474 		cgbno = fragstoblks(fs, cgtod(fs, cg));
2475 	} else {
2476 		/* devvp is a normal disk device */
2477 		dev = devvp->v_rdev;
2478 		cgbno = fsbtodb(fs, cgtod(fs, cg));
2479 	}
2480 	if (ino >= fs->fs_ipg * fs->fs_ncg)
2481 		panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s",
2482 		    devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt);
2483 	if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) {
2484 		brelse(bp);
2485 		return (error);
2486 	}
2487 	cgp = (struct cg *)bp->b_data;
2488 	if (!cg_chkmagic(cgp)) {
2489 		brelse(bp);
2490 		return (0);
2491 	}
2492 	bp->b_xflags |= BX_BKGRDWRITE;
2493 	cgp->cg_old_time = cgp->cg_time = time_second;
2494 	inosused = cg_inosused(cgp);
2495 	ino %= fs->fs_ipg;
2496 	if (isclr(inosused, ino)) {
2497 		printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev),
2498 		    (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt);
2499 		if (fs->fs_ronly == 0)
2500 			panic("ffs_freefile: freeing free inode");
2501 	}
2502 	clrbit(inosused, ino);
2503 	if (ino < cgp->cg_irotor)
2504 		cgp->cg_irotor = ino;
2505 	cgp->cg_cs.cs_nifree++;
2506 	UFS_LOCK(ump);
2507 	fs->fs_cstotal.cs_nifree++;
2508 	fs->fs_cs(fs, cg).cs_nifree++;
2509 	if ((mode & IFMT) == IFDIR) {
2510 		cgp->cg_cs.cs_ndir--;
2511 		fs->fs_cstotal.cs_ndir--;
2512 		fs->fs_cs(fs, cg).cs_ndir--;
2513 	}
2514 	fs->fs_fmod = 1;
2515 	ACTIVECLEAR(fs, cg);
2516 	UFS_UNLOCK(ump);
2517 	if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type != VREG)
2518 		softdep_setup_inofree(UFSTOVFS(ump), bp,
2519 		    ino + cg * fs->fs_ipg, wkhd);
2520 	bdwrite(bp);
2521 	return (0);
2522 }
2523 
2524 /*
2525  * Check to see if a file is free.
2526  */
2527 int
2528 ffs_checkfreefile(fs, devvp, ino)
2529 	struct fs *fs;
2530 	struct vnode *devvp;
2531 	ino_t ino;
2532 {
2533 	struct cg *cgp;
2534 	struct buf *bp;
2535 	ufs2_daddr_t cgbno;
2536 	int ret;
2537 	u_int cg;
2538 	u_int8_t *inosused;
2539 
2540 	cg = ino_to_cg(fs, ino);
2541 	if (devvp->v_type == VREG) {
2542 		/* devvp is a snapshot */
2543 		cgbno = fragstoblks(fs, cgtod(fs, cg));
2544 	} else {
2545 		/* devvp is a normal disk device */
2546 		cgbno = fsbtodb(fs, cgtod(fs, cg));
2547 	}
2548 	if (ino >= fs->fs_ipg * fs->fs_ncg)
2549 		return (1);
2550 	if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) {
2551 		brelse(bp);
2552 		return (1);
2553 	}
2554 	cgp = (struct cg *)bp->b_data;
2555 	if (!cg_chkmagic(cgp)) {
2556 		brelse(bp);
2557 		return (1);
2558 	}
2559 	inosused = cg_inosused(cgp);
2560 	ino %= fs->fs_ipg;
2561 	ret = isclr(inosused, ino);
2562 	brelse(bp);
2563 	return (ret);
2564 }
2565 
2566 /*
2567  * Find a block of the specified size in the specified cylinder group.
2568  *
2569  * It is a panic if a request is made to find a block if none are
2570  * available.
2571  */
2572 static ufs1_daddr_t
2573 ffs_mapsearch(fs, cgp, bpref, allocsiz)
2574 	struct fs *fs;
2575 	struct cg *cgp;
2576 	ufs2_daddr_t bpref;
2577 	int allocsiz;
2578 {
2579 	ufs1_daddr_t bno;
2580 	int start, len, loc, i;
2581 	int blk, field, subfield, pos;
2582 	u_int8_t *blksfree;
2583 
2584 	/*
2585 	 * find the fragment by searching through the free block
2586 	 * map for an appropriate bit pattern
2587 	 */
2588 	if (bpref)
2589 		start = dtogd(fs, bpref) / NBBY;
2590 	else
2591 		start = cgp->cg_frotor / NBBY;
2592 	blksfree = cg_blksfree(cgp);
2593 	len = howmany(fs->fs_fpg, NBBY) - start;
2594 	loc = scanc((u_int)len, (u_char *)&blksfree[start],
2595 		fragtbl[fs->fs_frag],
2596 		(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2597 	if (loc == 0) {
2598 		len = start + 1;
2599 		start = 0;
2600 		loc = scanc((u_int)len, (u_char *)&blksfree[0],
2601 			fragtbl[fs->fs_frag],
2602 			(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2603 		if (loc == 0) {
2604 			printf("start = %d, len = %d, fs = %s\n",
2605 			    start, len, fs->fs_fsmnt);
2606 			panic("ffs_alloccg: map corrupted");
2607 			/* NOTREACHED */
2608 		}
2609 	}
2610 	bno = (start + len - loc) * NBBY;
2611 	cgp->cg_frotor = bno;
2612 	/*
2613 	 * found the byte in the map
2614 	 * sift through the bits to find the selected frag
2615 	 */
2616 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2617 		blk = blkmap(fs, blksfree, bno);
2618 		blk <<= 1;
2619 		field = around[allocsiz];
2620 		subfield = inside[allocsiz];
2621 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2622 			if ((blk & field) == subfield)
2623 				return (bno + pos);
2624 			field <<= 1;
2625 			subfield <<= 1;
2626 		}
2627 	}
2628 	printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2629 	panic("ffs_alloccg: block not in map");
2630 	return (-1);
2631 }
2632 
2633 /*
2634  * Fserr prints the name of a filesystem with an error diagnostic.
2635  *
2636  * The form of the error message is:
2637  *	fs: error message
2638  */
2639 void
2640 ffs_fserr(fs, inum, cp)
2641 	struct fs *fs;
2642 	ino_t inum;
2643 	char *cp;
2644 {
2645 	struct thread *td = curthread;	/* XXX */
2646 	struct proc *p = td->td_proc;
2647 
2648 	log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n",
2649 	    p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum,
2650 	    fs->fs_fsmnt, cp);
2651 }
2652 
2653 /*
2654  * This function provides the capability for the fsck program to
2655  * update an active filesystem. Fourteen operations are provided:
2656  *
2657  * adjrefcnt(inode, amt) - adjusts the reference count on the
2658  *	specified inode by the specified amount. Under normal
2659  *	operation the count should always go down. Decrementing
2660  *	the count to zero will cause the inode to be freed.
2661  * adjblkcnt(inode, amt) - adjust the number of blocks used by the
2662  *	inode by the specified amount.
2663  * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
2664  *	adjust the superblock summary.
2665  * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
2666  *	are marked as free. Inodes should never have to be marked
2667  *	as in use.
2668  * freefiles(inode, count) - file inodes [inode..inode + count - 1]
2669  *	are marked as free. Inodes should never have to be marked
2670  *	as in use.
2671  * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
2672  *	are marked as free. Blocks should never have to be marked
2673  *	as in use.
2674  * setflags(flags, set/clear) - the fs_flags field has the specified
2675  *	flags set (second parameter +1) or cleared (second parameter -1).
2676  * setcwd(dirinode) - set the current directory to dirinode in the
2677  *	filesystem associated with the snapshot.
2678  * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
2679  *	in the current directory is oldvalue then change it to newvalue.
2680  * unlink(nameptr, oldvalue) - Verify that the inode number associated
2681  *	with nameptr in the current directory is oldvalue then unlink it.
2682  *
2683  * The following functions may only be used on a quiescent filesystem
2684  * by the soft updates journal. They are not safe to be run on an active
2685  * filesystem.
2686  *
2687  * setinode(inode, dip) - the specified disk inode is replaced with the
2688  *	contents pointed to by dip.
2689  * setbufoutput(fd, flags) - output associated with the specified file
2690  *	descriptor (which must reference the character device supporting
2691  *	the filesystem) switches from using physio to running through the
2692  *	buffer cache when flags is set to 1. The descriptor reverts to
2693  *	physio for output when flags is set to zero.
2694  */
2695 
2696 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
2697 
2698 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
2699 	0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
2700 
2701 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
2702 	sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
2703 
2704 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR,
2705 	sysctl_ffs_fsck, "Adjust number of directories");
2706 
2707 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR,
2708 	sysctl_ffs_fsck, "Adjust number of free blocks");
2709 
2710 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR,
2711 	sysctl_ffs_fsck, "Adjust number of free inodes");
2712 
2713 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR,
2714 	sysctl_ffs_fsck, "Adjust number of free frags");
2715 
2716 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR,
2717 	sysctl_ffs_fsck, "Adjust number of free clusters");
2718 
2719 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
2720 	sysctl_ffs_fsck, "Free Range of Directory Inodes");
2721 
2722 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
2723 	sysctl_ffs_fsck, "Free Range of File Inodes");
2724 
2725 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
2726 	sysctl_ffs_fsck, "Free Range of Blocks");
2727 
2728 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
2729 	sysctl_ffs_fsck, "Change Filesystem Flags");
2730 
2731 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR,
2732 	sysctl_ffs_fsck, "Set Current Working Directory");
2733 
2734 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR,
2735 	sysctl_ffs_fsck, "Change Value of .. Entry");
2736 
2737 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR,
2738 	sysctl_ffs_fsck, "Unlink a Duplicate Name");
2739 
2740 static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR,
2741 	sysctl_ffs_fsck, "Update an On-Disk Inode");
2742 
2743 static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR,
2744 	sysctl_ffs_fsck, "Set Buffered Writing for Descriptor");
2745 
2746 #define DEBUG 1
2747 #ifdef DEBUG
2748 static int fsckcmds = 0;
2749 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
2750 #endif /* DEBUG */
2751 
2752 static int buffered_write(struct file *, struct uio *, struct ucred *,
2753 	int, struct thread *);
2754 
2755 static int
2756 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
2757 {
2758 	struct thread *td = curthread;
2759 	struct fsck_cmd cmd;
2760 	struct ufsmount *ump;
2761 	struct vnode *vp, *dvp, *fdvp;
2762 	struct inode *ip, *dp;
2763 	struct mount *mp;
2764 	struct fs *fs;
2765 	ufs2_daddr_t blkno;
2766 	long blkcnt, blksize;
2767 	struct file *fp, *vfp;
2768 	cap_rights_t rights;
2769 	int filetype, error;
2770 	static struct fileops *origops, bufferedops;
2771 
2772 	if (req->newlen > sizeof cmd)
2773 		return (EBADRPC);
2774 	if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
2775 		return (error);
2776 	if (cmd.version != FFS_CMD_VERSION)
2777 		return (ERPCMISMATCH);
2778 	if ((error = getvnode(td, cmd.handle,
2779 	    cap_rights_init(&rights, CAP_FSCK), &fp)) != 0)
2780 		return (error);
2781 	vp = fp->f_data;
2782 	if (vp->v_type != VREG && vp->v_type != VDIR) {
2783 		fdrop(fp, td);
2784 		return (EINVAL);
2785 	}
2786 	vn_start_write(vp, &mp, V_WAIT);
2787 	if (mp == 0 || strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
2788 		vn_finished_write(mp);
2789 		fdrop(fp, td);
2790 		return (EINVAL);
2791 	}
2792 	ump = VFSTOUFS(mp);
2793 	if ((mp->mnt_flag & MNT_RDONLY) &&
2794 	    ump->um_fsckpid != td->td_proc->p_pid) {
2795 		vn_finished_write(mp);
2796 		fdrop(fp, td);
2797 		return (EROFS);
2798 	}
2799 	fs = ump->um_fs;
2800 	filetype = IFREG;
2801 
2802 	switch (oidp->oid_number) {
2803 
2804 	case FFS_SET_FLAGS:
2805 #ifdef DEBUG
2806 		if (fsckcmds)
2807 			printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
2808 			    cmd.size > 0 ? "set" : "clear");
2809 #endif /* DEBUG */
2810 		if (cmd.size > 0)
2811 			fs->fs_flags |= (long)cmd.value;
2812 		else
2813 			fs->fs_flags &= ~(long)cmd.value;
2814 		break;
2815 
2816 	case FFS_ADJ_REFCNT:
2817 #ifdef DEBUG
2818 		if (fsckcmds) {
2819 			printf("%s: adjust inode %jd link count by %jd\n",
2820 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2821 			    (intmax_t)cmd.size);
2822 		}
2823 #endif /* DEBUG */
2824 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2825 			break;
2826 		ip = VTOI(vp);
2827 		ip->i_nlink += cmd.size;
2828 		DIP_SET(ip, i_nlink, ip->i_nlink);
2829 		ip->i_effnlink += cmd.size;
2830 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2831 		error = ffs_update(vp, 1);
2832 		if (DOINGSOFTDEP(vp))
2833 			softdep_change_linkcnt(ip);
2834 		vput(vp);
2835 		break;
2836 
2837 	case FFS_ADJ_BLKCNT:
2838 #ifdef DEBUG
2839 		if (fsckcmds) {
2840 			printf("%s: adjust inode %jd block count by %jd\n",
2841 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2842 			    (intmax_t)cmd.size);
2843 		}
2844 #endif /* DEBUG */
2845 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2846 			break;
2847 		ip = VTOI(vp);
2848 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
2849 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2850 		error = ffs_update(vp, 1);
2851 		vput(vp);
2852 		break;
2853 
2854 	case FFS_DIR_FREE:
2855 		filetype = IFDIR;
2856 		/* fall through */
2857 
2858 	case FFS_FILE_FREE:
2859 #ifdef DEBUG
2860 		if (fsckcmds) {
2861 			if (cmd.size == 1)
2862 				printf("%s: free %s inode %ju\n",
2863 				    mp->mnt_stat.f_mntonname,
2864 				    filetype == IFDIR ? "directory" : "file",
2865 				    (uintmax_t)cmd.value);
2866 			else
2867 				printf("%s: free %s inodes %ju-%ju\n",
2868 				    mp->mnt_stat.f_mntonname,
2869 				    filetype == IFDIR ? "directory" : "file",
2870 				    (uintmax_t)cmd.value,
2871 				    (uintmax_t)(cmd.value + cmd.size - 1));
2872 		}
2873 #endif /* DEBUG */
2874 		while (cmd.size > 0) {
2875 			if ((error = ffs_freefile(ump, fs, ump->um_devvp,
2876 			    cmd.value, filetype, NULL)))
2877 				break;
2878 			cmd.size -= 1;
2879 			cmd.value += 1;
2880 		}
2881 		break;
2882 
2883 	case FFS_BLK_FREE:
2884 #ifdef DEBUG
2885 		if (fsckcmds) {
2886 			if (cmd.size == 1)
2887 				printf("%s: free block %jd\n",
2888 				    mp->mnt_stat.f_mntonname,
2889 				    (intmax_t)cmd.value);
2890 			else
2891 				printf("%s: free blocks %jd-%jd\n",
2892 				    mp->mnt_stat.f_mntonname,
2893 				    (intmax_t)cmd.value,
2894 				    (intmax_t)cmd.value + cmd.size - 1);
2895 		}
2896 #endif /* DEBUG */
2897 		blkno = cmd.value;
2898 		blkcnt = cmd.size;
2899 		blksize = fs->fs_frag - (blkno % fs->fs_frag);
2900 		while (blkcnt > 0) {
2901 			if (blksize > blkcnt)
2902 				blksize = blkcnt;
2903 			ffs_blkfree(ump, fs, ump->um_devvp, blkno,
2904 			    blksize * fs->fs_fsize, ROOTINO, VDIR, NULL);
2905 			blkno += blksize;
2906 			blkcnt -= blksize;
2907 			blksize = fs->fs_frag;
2908 		}
2909 		break;
2910 
2911 	/*
2912 	 * Adjust superblock summaries.  fsck(8) is expected to
2913 	 * submit deltas when necessary.
2914 	 */
2915 	case FFS_ADJ_NDIR:
2916 #ifdef DEBUG
2917 		if (fsckcmds) {
2918 			printf("%s: adjust number of directories by %jd\n",
2919 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2920 		}
2921 #endif /* DEBUG */
2922 		fs->fs_cstotal.cs_ndir += cmd.value;
2923 		break;
2924 
2925 	case FFS_ADJ_NBFREE:
2926 #ifdef DEBUG
2927 		if (fsckcmds) {
2928 			printf("%s: adjust number of free blocks by %+jd\n",
2929 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2930 		}
2931 #endif /* DEBUG */
2932 		fs->fs_cstotal.cs_nbfree += cmd.value;
2933 		break;
2934 
2935 	case FFS_ADJ_NIFREE:
2936 #ifdef DEBUG
2937 		if (fsckcmds) {
2938 			printf("%s: adjust number of free inodes by %+jd\n",
2939 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2940 		}
2941 #endif /* DEBUG */
2942 		fs->fs_cstotal.cs_nifree += cmd.value;
2943 		break;
2944 
2945 	case FFS_ADJ_NFFREE:
2946 #ifdef DEBUG
2947 		if (fsckcmds) {
2948 			printf("%s: adjust number of free frags by %+jd\n",
2949 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2950 		}
2951 #endif /* DEBUG */
2952 		fs->fs_cstotal.cs_nffree += cmd.value;
2953 		break;
2954 
2955 	case FFS_ADJ_NUMCLUSTERS:
2956 #ifdef DEBUG
2957 		if (fsckcmds) {
2958 			printf("%s: adjust number of free clusters by %+jd\n",
2959 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2960 		}
2961 #endif /* DEBUG */
2962 		fs->fs_cstotal.cs_numclusters += cmd.value;
2963 		break;
2964 
2965 	case FFS_SET_CWD:
2966 #ifdef DEBUG
2967 		if (fsckcmds) {
2968 			printf("%s: set current directory to inode %jd\n",
2969 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2970 		}
2971 #endif /* DEBUG */
2972 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
2973 			break;
2974 		AUDIT_ARG_VNODE1(vp);
2975 		if ((error = change_dir(vp, td)) != 0) {
2976 			vput(vp);
2977 			break;
2978 		}
2979 		VOP_UNLOCK(vp, 0);
2980 		pwd_chdir(td, vp);
2981 		break;
2982 
2983 	case FFS_SET_DOTDOT:
2984 #ifdef DEBUG
2985 		if (fsckcmds) {
2986 			printf("%s: change .. in cwd from %jd to %jd\n",
2987 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2988 			    (intmax_t)cmd.size);
2989 		}
2990 #endif /* DEBUG */
2991 		/*
2992 		 * First we have to get and lock the parent directory
2993 		 * to which ".." points.
2994 		 */
2995 		error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
2996 		if (error)
2997 			break;
2998 		/*
2999 		 * Now we get and lock the child directory containing "..".
3000 		 */
3001 		FILEDESC_SLOCK(td->td_proc->p_fd);
3002 		dvp = td->td_proc->p_fd->fd_cdir;
3003 		FILEDESC_SUNLOCK(td->td_proc->p_fd);
3004 		if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
3005 			vput(fdvp);
3006 			break;
3007 		}
3008 		dp = VTOI(dvp);
3009 		dp->i_offset = 12;	/* XXX mastertemplate.dot_reclen */
3010 		error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
3011 		    DT_DIR, 0);
3012 		cache_purge(fdvp);
3013 		cache_purge(dvp);
3014 		vput(dvp);
3015 		vput(fdvp);
3016 		break;
3017 
3018 	case FFS_UNLINK:
3019 #ifdef DEBUG
3020 		if (fsckcmds) {
3021 			char buf[32];
3022 
3023 			if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
3024 				strncpy(buf, "Name_too_long", 32);
3025 			printf("%s: unlink %s (inode %jd)\n",
3026 			    mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
3027 		}
3028 #endif /* DEBUG */
3029 		/*
3030 		 * kern_unlinkat will do its own start/finish writes and
3031 		 * they do not nest, so drop ours here. Setting mp == NULL
3032 		 * indicates that vn_finished_write is not needed down below.
3033 		 */
3034 		vn_finished_write(mp);
3035 		mp = NULL;
3036 		error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value,
3037 		    UIO_USERSPACE, (ino_t)cmd.size);
3038 		break;
3039 
3040 	case FFS_SET_INODE:
3041 		if (ump->um_fsckpid != td->td_proc->p_pid) {
3042 			error = EPERM;
3043 			break;
3044 		}
3045 #ifdef DEBUG
3046 		if (fsckcmds) {
3047 			printf("%s: update inode %jd\n",
3048 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3049 		}
3050 #endif /* DEBUG */
3051 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3052 			break;
3053 		AUDIT_ARG_VNODE1(vp);
3054 		ip = VTOI(vp);
3055 		if (ip->i_ump->um_fstype == UFS1)
3056 			error = copyin((void *)(intptr_t)cmd.size, ip->i_din1,
3057 			    sizeof(struct ufs1_dinode));
3058 		else
3059 			error = copyin((void *)(intptr_t)cmd.size, ip->i_din2,
3060 			    sizeof(struct ufs2_dinode));
3061 		if (error) {
3062 			vput(vp);
3063 			break;
3064 		}
3065 		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
3066 		error = ffs_update(vp, 1);
3067 		vput(vp);
3068 		break;
3069 
3070 	case FFS_SET_BUFOUTPUT:
3071 		if (ump->um_fsckpid != td->td_proc->p_pid) {
3072 			error = EPERM;
3073 			break;
3074 		}
3075 		if (VTOI(vp)->i_ump != ump) {
3076 			error = EINVAL;
3077 			break;
3078 		}
3079 #ifdef DEBUG
3080 		if (fsckcmds) {
3081 			printf("%s: %s buffered output for descriptor %jd\n",
3082 			    mp->mnt_stat.f_mntonname,
3083 			    cmd.size == 1 ? "enable" : "disable",
3084 			    (intmax_t)cmd.value);
3085 		}
3086 #endif /* DEBUG */
3087 		if ((error = getvnode(td, cmd.value,
3088 		    cap_rights_init(&rights, CAP_FSCK), &vfp)) != 0)
3089 			break;
3090 		if (vfp->f_vnode->v_type != VCHR) {
3091 			fdrop(vfp, td);
3092 			error = EINVAL;
3093 			break;
3094 		}
3095 		if (origops == NULL) {
3096 			origops = vfp->f_ops;
3097 			bcopy((void *)origops, (void *)&bufferedops,
3098 			    sizeof(bufferedops));
3099 			bufferedops.fo_write = buffered_write;
3100 		}
3101 		if (cmd.size == 1)
3102 			atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3103 			    (uintptr_t)&bufferedops);
3104 		else
3105 			atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3106 			    (uintptr_t)origops);
3107 		fdrop(vfp, td);
3108 		break;
3109 
3110 	default:
3111 #ifdef DEBUG
3112 		if (fsckcmds) {
3113 			printf("Invalid request %d from fsck\n",
3114 			    oidp->oid_number);
3115 		}
3116 #endif /* DEBUG */
3117 		error = EINVAL;
3118 		break;
3119 
3120 	}
3121 	fdrop(fp, td);
3122 	vn_finished_write(mp);
3123 	return (error);
3124 }
3125 
3126 /*
3127  * Function to switch a descriptor to use the buffer cache to stage
3128  * its I/O. This is needed so that writes to the filesystem device
3129  * will give snapshots a chance to copy modified blocks for which it
3130  * needs to retain copies.
3131  */
3132 static int
3133 buffered_write(fp, uio, active_cred, flags, td)
3134 	struct file *fp;
3135 	struct uio *uio;
3136 	struct ucred *active_cred;
3137 	int flags;
3138 	struct thread *td;
3139 {
3140 	struct vnode *devvp, *vp;
3141 	struct inode *ip;
3142 	struct buf *bp;
3143 	struct fs *fs;
3144 	struct filedesc *fdp;
3145 	int error;
3146 	daddr_t lbn;
3147 
3148 	/*
3149 	 * The devvp is associated with the /dev filesystem. To discover
3150 	 * the filesystem with which the device is associated, we depend
3151 	 * on the application setting the current directory to a location
3152 	 * within the filesystem being written. Yes, this is an ugly hack.
3153 	 */
3154 	devvp = fp->f_vnode;
3155 	if (!vn_isdisk(devvp, NULL))
3156 		return (EINVAL);
3157 	fdp = td->td_proc->p_fd;
3158 	FILEDESC_SLOCK(fdp);
3159 	vp = fdp->fd_cdir;
3160 	vref(vp);
3161 	FILEDESC_SUNLOCK(fdp);
3162 	vn_lock(vp, LK_SHARED | LK_RETRY);
3163 	/*
3164 	 * Check that the current directory vnode indeed belongs to
3165 	 * UFS before trying to dereference UFS-specific v_data fields.
3166 	 */
3167 	if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) {
3168 		vput(vp);
3169 		return (EINVAL);
3170 	}
3171 	ip = VTOI(vp);
3172 	if (ip->i_devvp != devvp) {
3173 		vput(vp);
3174 		return (EINVAL);
3175 	}
3176 	fs = ip->i_fs;
3177 	vput(vp);
3178 	foffset_lock_uio(fp, uio, flags);
3179 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
3180 #ifdef DEBUG
3181 	if (fsckcmds) {
3182 		printf("%s: buffered write for block %jd\n",
3183 		    fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset));
3184 	}
3185 #endif /* DEBUG */
3186 	/*
3187 	 * All I/O must be contained within a filesystem block, start on
3188 	 * a fragment boundary, and be a multiple of fragments in length.
3189 	 */
3190 	if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) ||
3191 	    fragoff(fs, uio->uio_offset) != 0 ||
3192 	    fragoff(fs, uio->uio_resid) != 0) {
3193 		error = EINVAL;
3194 		goto out;
3195 	}
3196 	lbn = numfrags(fs, uio->uio_offset);
3197 	bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0);
3198 	bp->b_flags |= B_RELBUF;
3199 	if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) {
3200 		brelse(bp);
3201 		goto out;
3202 	}
3203 	error = bwrite(bp);
3204 out:
3205 	VOP_UNLOCK(devvp, 0);
3206 	foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF);
3207 	return (error);
3208 }
3209