xref: /freebsd/sys/ufs/ffs/ffs_alloc.c (revision dc37121d3210d08c96a883ebfed780660e7e2b39)
1 /*-
2  * SPDX-License-Identifier: (BSD-2-Clause AND BSD-3-Clause)
3  *
4  * Copyright (c) 2002 Networks Associates Technology, Inc.
5  * All rights reserved.
6  *
7  * This software was developed for the FreeBSD Project by Marshall
8  * Kirk McKusick and Network Associates Laboratories, the Security
9  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
10  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
11  * research program
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1982, 1986, 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  */
61 
62 #include <sys/cdefs.h>
63 #include "opt_quota.h"
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/bio.h>
68 #include <sys/buf.h>
69 #include <sys/capsicum.h>
70 #include <sys/conf.h>
71 #include <sys/fcntl.h>
72 #include <sys/file.h>
73 #include <sys/filedesc.h>
74 #include <sys/gsb_crc32.h>
75 #include <sys/kernel.h>
76 #include <sys/mount.h>
77 #include <sys/priv.h>
78 #include <sys/proc.h>
79 #include <sys/stat.h>
80 #include <sys/syscallsubr.h>
81 #include <sys/sysctl.h>
82 #include <sys/syslog.h>
83 #include <sys/taskqueue.h>
84 #include <sys/vnode.h>
85 
86 #include <security/audit/audit.h>
87 
88 #include <geom/geom.h>
89 #include <geom/geom_vfs.h>
90 
91 #include <ufs/ufs/dir.h>
92 #include <ufs/ufs/extattr.h>
93 #include <ufs/ufs/quota.h>
94 #include <ufs/ufs/inode.h>
95 #include <ufs/ufs/ufs_extern.h>
96 #include <ufs/ufs/ufsmount.h>
97 
98 #include <ufs/ffs/fs.h>
99 #include <ufs/ffs/ffs_extern.h>
100 #include <ufs/ffs/softdep.h>
101 
102 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, uint64_t cg,
103 				  ufs2_daddr_t bpref, int size, int rsize);
104 
105 static ufs2_daddr_t ffs_alloccg(struct inode *, uint64_t, ufs2_daddr_t, int,
106 				  int);
107 static ufs2_daddr_t
108 	      ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
109 static void	ffs_blkfree_cg(struct ufsmount *, struct fs *,
110 		    struct vnode *, ufs2_daddr_t, long, ino_t,
111 		    struct workhead *);
112 #ifdef INVARIANTS
113 static int	ffs_checkfreeblk(struct inode *, ufs2_daddr_t, long);
114 #endif
115 static void	ffs_checkcgintegrity(struct fs *, uint64_t, int);
116 static ufs2_daddr_t ffs_clusteralloc(struct inode *, uint64_t, ufs2_daddr_t,
117 				  int);
118 static ino_t	ffs_dirpref(struct inode *);
119 static ufs2_daddr_t ffs_fragextend(struct inode *, uint64_t, ufs2_daddr_t,
120 		    int, int);
121 static ufs2_daddr_t	ffs_hashalloc(struct inode *, uint64_t, ufs2_daddr_t,
122 		    int, int, allocfcn_t *);
123 static ufs2_daddr_t ffs_nodealloccg(struct inode *, uint64_t, ufs2_daddr_t, int,
124 		    int);
125 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
126 static int	ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
127 static int	ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
128 static void	ffs_ckhash_cg(struct buf *);
129 
130 /*
131  * Allocate a block in the filesystem.
132  *
133  * The size of the requested block is given, which must be some
134  * multiple of fs_fsize and <= fs_bsize.
135  * A preference may be optionally specified. If a preference is given
136  * the following hierarchy is used to allocate a block:
137  *   1) allocate the requested block.
138  *   2) allocate a rotationally optimal block in the same cylinder.
139  *   3) allocate a block in the same cylinder group.
140  *   4) quadratically rehash into other cylinder groups, until an
141  *      available block is located.
142  * If no block preference is given the following hierarchy is used
143  * to allocate a block:
144  *   1) allocate a block in the cylinder group that contains the
145  *      inode for the file.
146  *   2) quadratically rehash into other cylinder groups, until an
147  *      available block is located.
148  */
149 int
ffs_alloc(struct inode * ip,ufs2_daddr_t lbn,ufs2_daddr_t bpref,int size,int flags,struct ucred * cred,ufs2_daddr_t * bnp)150 ffs_alloc(struct inode *ip,
151 	ufs2_daddr_t lbn,
152 	ufs2_daddr_t bpref,
153 	int size,
154 	int flags,
155 	struct ucred *cred,
156 	ufs2_daddr_t *bnp)
157 {
158 	struct fs *fs;
159 	struct ufsmount *ump;
160 	ufs2_daddr_t bno;
161 	uint64_t cg, reclaimed;
162 	int64_t delta;
163 #ifdef QUOTA
164 	int error;
165 #endif
166 
167 	*bnp = 0;
168 	ump = ITOUMP(ip);
169 	fs = ump->um_fs;
170 	mtx_assert(UFS_MTX(ump), MA_OWNED);
171 #ifdef INVARIANTS
172 	if ((uint64_t)size > fs->fs_bsize || fragoff(fs, size) != 0) {
173 		printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
174 		    devtoname(ump->um_dev), (long)fs->fs_bsize, size,
175 		    fs->fs_fsmnt);
176 		panic("ffs_alloc: bad size");
177 	}
178 	if (cred == NOCRED)
179 		panic("ffs_alloc: missing credential");
180 #endif /* INVARIANTS */
181 	reclaimed = 0;
182 retry:
183 #ifdef QUOTA
184 	UFS_UNLOCK(ump);
185 	error = chkdq(ip, btodb(size), cred, 0);
186 	if (error)
187 		return (error);
188 	UFS_LOCK(ump);
189 #endif
190 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
191 		goto nospace;
192 	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE) &&
193 	    freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
194 		goto nospace;
195 	if (bpref >= fs->fs_size)
196 		bpref = 0;
197 	if (bpref == 0)
198 		cg = ino_to_cg(fs, ip->i_number);
199 	else
200 		cg = dtog(fs, bpref);
201 	bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
202 	if (bno > 0) {
203 		delta = btodb(size);
204 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
205 		if (flags & IO_EXT)
206 			UFS_INODE_SET_FLAG(ip, IN_CHANGE);
207 		else
208 			UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
209 		*bnp = bno;
210 		return (0);
211 	}
212 nospace:
213 #ifdef QUOTA
214 	UFS_UNLOCK(ump);
215 	/*
216 	 * Restore user's disk quota because allocation failed.
217 	 */
218 	(void) chkdq(ip, -btodb(size), cred, FORCE);
219 	UFS_LOCK(ump);
220 #endif
221 	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
222 		reclaimed = 1;
223 		softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
224 		goto retry;
225 	}
226 	if (ffs_fsfail_cleanup_locked(ump, 0)) {
227 		UFS_UNLOCK(ump);
228 		return (ENXIO);
229 	}
230 	if (reclaimed > 0 &&
231 	    ppsratecheck(&ump->um_last_fullmsg, &ump->um_secs_fullmsg, 1)) {
232 		UFS_UNLOCK(ump);
233 		ffs_fserr(fs, ip->i_number, "filesystem full");
234 		uprintf("\n%s: write failed, filesystem is full\n",
235 		    fs->fs_fsmnt);
236 	} else {
237 		UFS_UNLOCK(ump);
238 	}
239 	return (ENOSPC);
240 }
241 
242 /*
243  * Reallocate a fragment to a bigger size
244  *
245  * The number and size of the old block is given, and a preference
246  * and new size is also specified. The allocator attempts to extend
247  * the original block. Failing that, the regular block allocator is
248  * invoked to get an appropriate block.
249  */
250 int
ffs_realloccg(struct inode * ip,ufs2_daddr_t lbprev,ufs2_daddr_t bprev,ufs2_daddr_t bpref,int osize,int nsize,int flags,struct ucred * cred,struct buf ** bpp)251 ffs_realloccg(struct inode *ip,
252 	ufs2_daddr_t lbprev,
253 	ufs2_daddr_t bprev,
254 	ufs2_daddr_t bpref,
255 	int osize,
256 	int nsize,
257 	int flags,
258 	struct ucred *cred,
259 	struct buf **bpp)
260 {
261 	struct vnode *vp;
262 	struct fs *fs;
263 	struct buf *bp;
264 	struct ufsmount *ump;
265 	uint64_t cg, request, reclaimed;
266 	int error, gbflags;
267 	ufs2_daddr_t bno;
268 	int64_t delta;
269 
270 	vp = ITOV(ip);
271 	ump = ITOUMP(ip);
272 	fs = ump->um_fs;
273 	bp = NULL;
274 	gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
275 #ifdef WITNESS
276 	gbflags |= IS_SNAPSHOT(ip) ? GB_NOWITNESS : 0;
277 #endif
278 
279 	mtx_assert(UFS_MTX(ump), MA_OWNED);
280 #ifdef INVARIANTS
281 	if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
282 		panic("ffs_realloccg: allocation on suspended filesystem");
283 	if ((uint64_t)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
284 	    (uint64_t)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
285 		printf(
286 		"dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
287 		    devtoname(ump->um_dev), (long)fs->fs_bsize, osize,
288 		    nsize, fs->fs_fsmnt);
289 		panic("ffs_realloccg: bad size");
290 	}
291 	if (cred == NOCRED)
292 		panic("ffs_realloccg: missing credential");
293 #endif /* INVARIANTS */
294 	reclaimed = 0;
295 retry:
296 	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE) &&
297 	    freespace(fs, fs->fs_minfree) -  numfrags(fs, nsize - osize) < 0) {
298 		goto nospace;
299 	}
300 	if (bprev == 0) {
301 		printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
302 		    devtoname(ump->um_dev), (long)fs->fs_bsize, (intmax_t)bprev,
303 		    fs->fs_fsmnt);
304 		panic("ffs_realloccg: bad bprev");
305 	}
306 	UFS_UNLOCK(ump);
307 	/*
308 	 * Allocate the extra space in the buffer.
309 	 */
310 	error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
311 	if (error) {
312 		return (error);
313 	}
314 
315 	if (bp->b_blkno == bp->b_lblkno) {
316 		if (lbprev >= UFS_NDADDR)
317 			panic("ffs_realloccg: lbprev out of range");
318 		bp->b_blkno = fsbtodb(fs, bprev);
319 	}
320 
321 #ifdef QUOTA
322 	error = chkdq(ip, btodb(nsize - osize), cred, 0);
323 	if (error) {
324 		brelse(bp);
325 		return (error);
326 	}
327 #endif
328 	/*
329 	 * Check for extension in the existing location.
330 	 */
331 	*bpp = NULL;
332 	cg = dtog(fs, bprev);
333 	UFS_LOCK(ump);
334 	bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
335 	if (bno) {
336 		if (bp->b_blkno != fsbtodb(fs, bno))
337 			panic("ffs_realloccg: bad blockno");
338 		delta = btodb(nsize - osize);
339 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
340 		if (flags & IO_EXT)
341 			UFS_INODE_SET_FLAG(ip, IN_CHANGE);
342 		else
343 			UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
344 		allocbuf(bp, nsize);
345 		bp->b_flags |= B_DONE;
346 		vfs_bio_bzero_buf(bp, osize, nsize - osize);
347 		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
348 			vfs_bio_set_valid(bp, osize, nsize - osize);
349 		*bpp = bp;
350 		return (0);
351 	}
352 	/*
353 	 * Allocate a new disk location.
354 	 */
355 	if (bpref >= fs->fs_size)
356 		bpref = 0;
357 	switch ((int)fs->fs_optim) {
358 	case FS_OPTSPACE:
359 		/*
360 		 * Allocate an exact sized fragment. Although this makes
361 		 * best use of space, we will waste time relocating it if
362 		 * the file continues to grow. If the fragmentation is
363 		 * less than half of the minimum free reserve, we choose
364 		 * to begin optimizing for time.
365 		 */
366 		request = nsize;
367 		if (fs->fs_minfree <= 5 ||
368 		    fs->fs_cstotal.cs_nffree >
369 		    (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
370 			break;
371 		log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
372 			fs->fs_fsmnt);
373 		fs->fs_optim = FS_OPTTIME;
374 		break;
375 	case FS_OPTTIME:
376 		/*
377 		 * At this point we have discovered a file that is trying to
378 		 * grow a small fragment to a larger fragment. To save time,
379 		 * we allocate a full sized block, then free the unused portion.
380 		 * If the file continues to grow, the `ffs_fragextend' call
381 		 * above will be able to grow it in place without further
382 		 * copying. If aberrant programs cause disk fragmentation to
383 		 * grow within 2% of the free reserve, we choose to begin
384 		 * optimizing for space.
385 		 */
386 		request = fs->fs_bsize;
387 		if (fs->fs_cstotal.cs_nffree <
388 		    (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
389 			break;
390 		log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
391 			fs->fs_fsmnt);
392 		fs->fs_optim = FS_OPTSPACE;
393 		break;
394 	default:
395 		printf("dev = %s, optim = %ld, fs = %s\n",
396 		    devtoname(ump->um_dev), (long)fs->fs_optim, fs->fs_fsmnt);
397 		panic("ffs_realloccg: bad optim");
398 		/* NOTREACHED */
399 	}
400 	bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
401 	if (bno > 0) {
402 		bp->b_blkno = fsbtodb(fs, bno);
403 		if (!DOINGSOFTDEP(vp))
404 			/*
405 			 * The usual case is that a smaller fragment that
406 			 * was just allocated has been replaced with a bigger
407 			 * fragment or a full-size block. If it is marked as
408 			 * B_DELWRI, the current contents have not been written
409 			 * to disk. It is possible that the block was written
410 			 * earlier, but very uncommon. If the block has never
411 			 * been written, there is no need to send a BIO_DELETE
412 			 * for it when it is freed. The gain from avoiding the
413 			 * TRIMs for the common case of unwritten blocks far
414 			 * exceeds the cost of the write amplification for the
415 			 * uncommon case of failing to send a TRIM for a block
416 			 * that had been written.
417 			 */
418 			ffs_blkfree(ump, fs, ump->um_devvp, bprev, (long)osize,
419 			    ip->i_number, vp->v_type, NULL,
420 			    (bp->b_flags & B_DELWRI) != 0 ?
421 			    NOTRIM_KEY : SINGLETON_KEY);
422 		delta = btodb(nsize - osize);
423 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
424 		if (flags & IO_EXT)
425 			UFS_INODE_SET_FLAG(ip, IN_CHANGE);
426 		else
427 			UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
428 		allocbuf(bp, nsize);
429 		bp->b_flags |= B_DONE;
430 		vfs_bio_bzero_buf(bp, osize, nsize - osize);
431 		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
432 			vfs_bio_set_valid(bp, osize, nsize - osize);
433 		*bpp = bp;
434 		return (0);
435 	}
436 #ifdef QUOTA
437 	UFS_UNLOCK(ump);
438 	/*
439 	 * Restore user's disk quota because allocation failed.
440 	 */
441 	(void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
442 	UFS_LOCK(ump);
443 #endif
444 nospace:
445 	/*
446 	 * no space available
447 	 */
448 	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
449 		reclaimed = 1;
450 		UFS_UNLOCK(ump);
451 		if (bp) {
452 			brelse(bp);
453 			bp = NULL;
454 		}
455 		UFS_LOCK(ump);
456 		softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
457 		goto retry;
458 	}
459 	if (bp)
460 		brelse(bp);
461 	if (ffs_fsfail_cleanup_locked(ump, 0)) {
462 		UFS_UNLOCK(ump);
463 		return (ENXIO);
464 	}
465 	if (reclaimed > 0 &&
466 	    ppsratecheck(&ump->um_last_fullmsg, &ump->um_secs_fullmsg, 1)) {
467 		UFS_UNLOCK(ump);
468 		ffs_fserr(fs, ip->i_number, "filesystem full");
469 		uprintf("\n%s: write failed, filesystem is full\n",
470 		    fs->fs_fsmnt);
471 	} else {
472 		UFS_UNLOCK(ump);
473 	}
474 	return (ENOSPC);
475 }
476 
477 /*
478  * Reallocate a sequence of blocks into a contiguous sequence of blocks.
479  *
480  * The vnode and an array of buffer pointers for a range of sequential
481  * logical blocks to be made contiguous is given. The allocator attempts
482  * to find a range of sequential blocks starting as close as possible
483  * from the end of the allocation for the logical block immediately
484  * preceding the current range. If successful, the physical block numbers
485  * in the buffer pointers and in the inode are changed to reflect the new
486  * allocation. If unsuccessful, the allocation is left unchanged. The
487  * success in doing the reallocation is returned. Note that the error
488  * return is not reflected back to the user. Rather the previous block
489  * allocation will be used.
490  */
491 
492 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
493     "FFS filesystem");
494 
495 static int doasyncfree = 1;
496 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
497 "do not force synchronous writes when blocks are reallocated");
498 
499 static int doreallocblks = 1;
500 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0,
501 "enable block reallocation");
502 
503 static int dotrimcons = 1;
504 SYSCTL_INT(_vfs_ffs, OID_AUTO, dotrimcons, CTLFLAG_RWTUN, &dotrimcons, 0,
505 "enable BIO_DELETE / TRIM consolidation");
506 
507 static int maxclustersearch = 10;
508 SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch,
509 0, "max number of cylinder group to search for contigous blocks");
510 
511 #ifdef DIAGNOSTIC
512 static int prtrealloc = 0;
513 SYSCTL_INT(_debug, OID_AUTO, ffs_prtrealloc, CTLFLAG_RW, &prtrealloc, 0,
514 	"print out FFS filesystem block reallocation operations");
515 #endif
516 
517 int
ffs_reallocblks(struct vop_reallocblks_args * ap)518 ffs_reallocblks(
519 	struct vop_reallocblks_args /* {
520 		struct vnode *a_vp;
521 		struct cluster_save *a_buflist;
522 	} */ *ap)
523 {
524 	struct ufsmount *ump;
525 	int error;
526 
527 	/*
528 	 * We used to skip reallocating the blocks of a file into a
529 	 * contiguous sequence if the underlying flash device requested
530 	 * BIO_DELETE notifications, because devices that benefit from
531 	 * BIO_DELETE also benefit from not moving the data. However,
532 	 * the destination for the data is usually moved before the data
533 	 * is written to the initially allocated location, so we rarely
534 	 * suffer the penalty of extra writes. With the addition of the
535 	 * consolidation of contiguous blocks into single BIO_DELETE
536 	 * operations, having fewer but larger contiguous blocks reduces
537 	 * the number of (slow and expensive) BIO_DELETE operations. So
538 	 * when doing BIO_DELETE consolidation, we do block reallocation.
539 	 *
540 	 * Skip if reallocblks has been disabled globally.
541 	 */
542 	ump = ap->a_vp->v_mount->mnt_data;
543 	if ((((ump->um_flags) & UM_CANDELETE) != 0 && dotrimcons == 0) ||
544 	    doreallocblks == 0)
545 		return (ENOSPC);
546 
547 	/*
548 	 * We can't wait in softdep prealloc as it may fsync and recurse
549 	 * here.  Instead we simply fail to reallocate blocks if this
550 	 * rare condition arises.
551 	 */
552 	if (DOINGSUJ(ap->a_vp))
553 		if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
554 			return (ENOSPC);
555 	vn_seqc_write_begin(ap->a_vp);
556 	error = ump->um_fstype == UFS1 ? ffs_reallocblks_ufs1(ap) :
557 	    ffs_reallocblks_ufs2(ap);
558 	vn_seqc_write_end(ap->a_vp);
559 	return (error);
560 }
561 
562 static int
ffs_reallocblks_ufs1(struct vop_reallocblks_args * ap)563 ffs_reallocblks_ufs1(
564 	struct vop_reallocblks_args /* {
565 		struct vnode *a_vp;
566 		struct cluster_save *a_buflist;
567 	} */ *ap)
568 {
569 	struct fs *fs;
570 	struct inode *ip;
571 	struct vnode *vp;
572 	struct buf *sbp, *ebp, *bp;
573 	ufs1_daddr_t *bap, *sbap, *ebap;
574 	struct cluster_save *buflist;
575 	struct ufsmount *ump;
576 	ufs_lbn_t start_lbn, end_lbn;
577 	ufs1_daddr_t soff, newblk, blkno;
578 	ufs2_daddr_t pref;
579 	struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
580 	int i, cg, len, start_lvl, end_lvl, ssize;
581 
582 	vp = ap->a_vp;
583 	ip = VTOI(vp);
584 	ump = ITOUMP(ip);
585 	fs = ump->um_fs;
586 	/*
587 	 * If we are not tracking block clusters or if we have less than 4%
588 	 * free blocks left, then do not attempt to cluster. Running with
589 	 * less than 5% free block reserve is not recommended and those that
590 	 * choose to do so do not expect to have good file layout.
591 	 */
592 	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
593 		return (ENOSPC);
594 	buflist = ap->a_buflist;
595 	len = buflist->bs_nchildren;
596 	start_lbn = buflist->bs_children[0]->b_lblkno;
597 	end_lbn = start_lbn + len - 1;
598 #ifdef INVARIANTS
599 	for (i = 0; i < len; i++)
600 		if (!ffs_checkfreeblk(ip,
601 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
602 			panic("ffs_reallocblks: unallocated block 1");
603 	for (i = 1; i < len; i++)
604 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
605 			panic("ffs_reallocblks: non-logical cluster");
606 	blkno = buflist->bs_children[0]->b_blkno;
607 	ssize = fsbtodb(fs, fs->fs_frag);
608 	for (i = 1; i < len - 1; i++)
609 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
610 			panic("ffs_reallocblks: non-physical cluster %d", i);
611 #endif
612 	/*
613 	 * If the cluster crosses the boundary for the first indirect
614 	 * block, leave space for the indirect block. Indirect blocks
615 	 * are initially laid out in a position after the last direct
616 	 * block. Block reallocation would usually destroy locality by
617 	 * moving the indirect block out of the way to make room for
618 	 * data blocks if we didn't compensate here. We should also do
619 	 * this for other indirect block boundaries, but it is only
620 	 * important for the first one.
621 	 */
622 	if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
623 		return (ENOSPC);
624 	/*
625 	 * If the latest allocation is in a new cylinder group, assume that
626 	 * the filesystem has decided to move and do not force it back to
627 	 * the previous cylinder group.
628 	 */
629 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
630 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
631 		return (ENOSPC);
632 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
633 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
634 		return (ENOSPC);
635 	/*
636 	 * Get the starting offset and block map for the first block.
637 	 */
638 	if (start_lvl == 0) {
639 		sbap = &ip->i_din1->di_db[0];
640 		soff = start_lbn;
641 	} else {
642 		idp = &start_ap[start_lvl - 1];
643 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
644 			brelse(sbp);
645 			return (ENOSPC);
646 		}
647 		sbap = (ufs1_daddr_t *)sbp->b_data;
648 		soff = idp->in_off;
649 	}
650 	/*
651 	 * If the block range spans two block maps, get the second map.
652 	 */
653 	ebap = NULL;
654 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
655 		ssize = len;
656 	} else {
657 #ifdef INVARIANTS
658 		if (start_lvl > 0 &&
659 		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
660 			panic("ffs_reallocblk: start == end");
661 #endif
662 		ssize = len - (idp->in_off + 1);
663 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
664 			goto fail;
665 		ebap = (ufs1_daddr_t *)ebp->b_data;
666 	}
667 	/*
668 	 * Find the preferred location for the cluster. If we have not
669 	 * previously failed at this endeavor, then follow our standard
670 	 * preference calculation. If we have failed at it, then pick up
671 	 * where we last ended our search.
672 	 */
673 	UFS_LOCK(ump);
674 	if (ip->i_nextclustercg == -1)
675 		pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
676 	else
677 		pref = cgdata(fs, ip->i_nextclustercg);
678 	/*
679 	 * Search the block map looking for an allocation of the desired size.
680 	 * To avoid wasting too much time, we limit the number of cylinder
681 	 * groups that we will search.
682 	 */
683 	cg = dtog(fs, pref);
684 	MPASS(cg < fs->fs_ncg);
685 	for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
686 		if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
687 			break;
688 		cg += 1;
689 		if (cg >= fs->fs_ncg)
690 			cg = 0;
691 	}
692 	/*
693 	 * If we have failed in our search, record where we gave up for
694 	 * next time. Otherwise, fall back to our usual search citerion.
695 	 */
696 	if (newblk == 0) {
697 		ip->i_nextclustercg = cg;
698 		UFS_UNLOCK(ump);
699 		goto fail;
700 	}
701 	ip->i_nextclustercg = -1;
702 	/*
703 	 * We have found a new contiguous block.
704 	 *
705 	 * First we have to replace the old block pointers with the new
706 	 * block pointers in the inode and indirect blocks associated
707 	 * with the file.
708 	 */
709 #ifdef DIAGNOSTIC
710 	if (prtrealloc)
711 		printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
712 		    (uintmax_t)ip->i_number,
713 		    (intmax_t)start_lbn, (intmax_t)end_lbn);
714 #endif
715 	blkno = newblk;
716 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
717 		if (i == ssize) {
718 			bap = ebap;
719 			soff = -i;
720 		}
721 #ifdef INVARIANTS
722 		if (!ffs_checkfreeblk(ip,
723 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
724 			panic("ffs_reallocblks: unallocated block 2");
725 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
726 			panic("ffs_reallocblks: alloc mismatch");
727 #endif
728 #ifdef DIAGNOSTIC
729 		if (prtrealloc)
730 			printf(" %d,", *bap);
731 #endif
732 		if (DOINGSOFTDEP(vp)) {
733 			if (sbap == &ip->i_din1->di_db[0] && i < ssize)
734 				softdep_setup_allocdirect(ip, start_lbn + i,
735 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
736 				    buflist->bs_children[i]);
737 			else
738 				softdep_setup_allocindir_page(ip, start_lbn + i,
739 				    i < ssize ? sbp : ebp, soff + i, blkno,
740 				    *bap, buflist->bs_children[i]);
741 		}
742 		*bap++ = blkno;
743 	}
744 	/*
745 	 * Next we must write out the modified inode and indirect blocks.
746 	 * For strict correctness, the writes should be synchronous since
747 	 * the old block values may have been written to disk. In practise
748 	 * they are almost never written, but if we are concerned about
749 	 * strict correctness, the `doasyncfree' flag should be set to zero.
750 	 *
751 	 * The test on `doasyncfree' should be changed to test a flag
752 	 * that shows whether the associated buffers and inodes have
753 	 * been written. The flag should be set when the cluster is
754 	 * started and cleared whenever the buffer or inode is flushed.
755 	 * We can then check below to see if it is set, and do the
756 	 * synchronous write only when it has been cleared.
757 	 */
758 	if (sbap != &ip->i_din1->di_db[0]) {
759 		if (doasyncfree)
760 			bdwrite(sbp);
761 		else
762 			bwrite(sbp);
763 	} else {
764 		UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
765 		if (!doasyncfree)
766 			ffs_update(vp, 1);
767 	}
768 	if (ssize < len) {
769 		if (doasyncfree)
770 			bdwrite(ebp);
771 		else
772 			bwrite(ebp);
773 	}
774 	/*
775 	 * Last, free the old blocks and assign the new blocks to the buffers.
776 	 */
777 #ifdef DIAGNOSTIC
778 	if (prtrealloc)
779 		printf("\n\tnew:");
780 #endif
781 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
782 		bp = buflist->bs_children[i];
783 		if (!DOINGSOFTDEP(vp))
784 			/*
785 			 * The usual case is that a set of N-contiguous blocks
786 			 * that was just allocated has been replaced with a
787 			 * set of N+1-contiguous blocks. If they are marked as
788 			 * B_DELWRI, the current contents have not been written
789 			 * to disk. It is possible that the blocks were written
790 			 * earlier, but very uncommon. If the blocks have never
791 			 * been written, there is no need to send a BIO_DELETE
792 			 * for them when they are freed. The gain from avoiding
793 			 * the TRIMs for the common case of unwritten blocks
794 			 * far exceeds the cost of the write amplification for
795 			 * the uncommon case of failing to send a TRIM for the
796 			 * blocks that had been written.
797 			 */
798 			ffs_blkfree(ump, fs, ump->um_devvp,
799 			    dbtofsb(fs, bp->b_blkno),
800 			    fs->fs_bsize, ip->i_number, vp->v_type, NULL,
801 			    (bp->b_flags & B_DELWRI) != 0 ?
802 			    NOTRIM_KEY : SINGLETON_KEY);
803 		bp->b_blkno = fsbtodb(fs, blkno);
804 #ifdef INVARIANTS
805 		if (!ffs_checkfreeblk(ip, dbtofsb(fs, bp->b_blkno),
806 		    fs->fs_bsize))
807 			panic("ffs_reallocblks: unallocated block 3");
808 #endif
809 #ifdef DIAGNOSTIC
810 		if (prtrealloc)
811 			printf(" %d,", blkno);
812 #endif
813 	}
814 #ifdef DIAGNOSTIC
815 	if (prtrealloc) {
816 		prtrealloc--;
817 		printf("\n");
818 	}
819 #endif
820 	return (0);
821 
822 fail:
823 	if (ssize < len)
824 		brelse(ebp);
825 	if (sbap != &ip->i_din1->di_db[0])
826 		brelse(sbp);
827 	return (ENOSPC);
828 }
829 
830 static int
ffs_reallocblks_ufs2(struct vop_reallocblks_args * ap)831 ffs_reallocblks_ufs2(
832 	struct vop_reallocblks_args /* {
833 		struct vnode *a_vp;
834 		struct cluster_save *a_buflist;
835 	} */ *ap)
836 {
837 	struct fs *fs;
838 	struct inode *ip;
839 	struct vnode *vp;
840 	struct buf *sbp, *ebp, *bp;
841 	ufs2_daddr_t *bap, *sbap, *ebap;
842 	struct cluster_save *buflist;
843 	struct ufsmount *ump;
844 	ufs_lbn_t start_lbn, end_lbn;
845 	ufs2_daddr_t soff, newblk, blkno, pref;
846 	struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
847 	int i, cg, len, start_lvl, end_lvl, ssize;
848 
849 	vp = ap->a_vp;
850 	ip = VTOI(vp);
851 	ump = ITOUMP(ip);
852 	fs = ump->um_fs;
853 	/*
854 	 * If we are not tracking block clusters or if we have less than 4%
855 	 * free blocks left, then do not attempt to cluster. Running with
856 	 * less than 5% free block reserve is not recommended and those that
857 	 * choose to do so do not expect to have good file layout.
858 	 */
859 	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
860 		return (ENOSPC);
861 	buflist = ap->a_buflist;
862 	len = buflist->bs_nchildren;
863 	start_lbn = buflist->bs_children[0]->b_lblkno;
864 	end_lbn = start_lbn + len - 1;
865 #ifdef INVARIANTS
866 	for (i = 0; i < len; i++)
867 		if (!ffs_checkfreeblk(ip,
868 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
869 			panic("ffs_reallocblks: unallocated block 1");
870 	for (i = 1; i < len; i++)
871 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
872 			panic("ffs_reallocblks: non-logical cluster");
873 	blkno = buflist->bs_children[0]->b_blkno;
874 	ssize = fsbtodb(fs, fs->fs_frag);
875 	for (i = 1; i < len - 1; i++)
876 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
877 			panic("ffs_reallocblks: non-physical cluster %d", i);
878 #endif
879 	/*
880 	 * If the cluster crosses the boundary for the first indirect
881 	 * block, do not move anything in it. Indirect blocks are
882 	 * usually initially laid out in a position between the data
883 	 * blocks. Block reallocation would usually destroy locality by
884 	 * moving the indirect block out of the way to make room for
885 	 * data blocks if we didn't compensate here. We should also do
886 	 * this for other indirect block boundaries, but it is only
887 	 * important for the first one.
888 	 */
889 	if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
890 		return (ENOSPC);
891 	/*
892 	 * If the latest allocation is in a new cylinder group, assume that
893 	 * the filesystem has decided to move and do not force it back to
894 	 * the previous cylinder group.
895 	 */
896 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
897 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
898 		return (ENOSPC);
899 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
900 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
901 		return (ENOSPC);
902 	/*
903 	 * Get the starting offset and block map for the first block.
904 	 */
905 	if (start_lvl == 0) {
906 		sbap = &ip->i_din2->di_db[0];
907 		soff = start_lbn;
908 	} else {
909 		idp = &start_ap[start_lvl - 1];
910 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
911 			brelse(sbp);
912 			return (ENOSPC);
913 		}
914 		sbap = (ufs2_daddr_t *)sbp->b_data;
915 		soff = idp->in_off;
916 	}
917 	/*
918 	 * If the block range spans two block maps, get the second map.
919 	 */
920 	ebap = NULL;
921 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
922 		ssize = len;
923 	} else {
924 #ifdef INVARIANTS
925 		if (start_lvl > 0 &&
926 		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
927 			panic("ffs_reallocblk: start == end");
928 #endif
929 		ssize = len - (idp->in_off + 1);
930 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
931 			goto fail;
932 		ebap = (ufs2_daddr_t *)ebp->b_data;
933 	}
934 	/*
935 	 * Find the preferred location for the cluster. If we have not
936 	 * previously failed at this endeavor, then follow our standard
937 	 * preference calculation. If we have failed at it, then pick up
938 	 * where we last ended our search.
939 	 */
940 	UFS_LOCK(ump);
941 	if (ip->i_nextclustercg == -1)
942 		pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
943 	else
944 		pref = cgdata(fs, ip->i_nextclustercg);
945 	/*
946 	 * Search the block map looking for an allocation of the desired size.
947 	 * To avoid wasting too much time, we limit the number of cylinder
948 	 * groups that we will search.
949 	 */
950 	cg = dtog(fs, pref);
951 	MPASS(cg < fs->fs_ncg);
952 	for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
953 		if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
954 			break;
955 		cg += 1;
956 		if (cg >= fs->fs_ncg)
957 			cg = 0;
958 	}
959 	/*
960 	 * If we have failed in our search, record where we gave up for
961 	 * next time. Otherwise, fall back to our usual search citerion.
962 	 */
963 	if (newblk == 0) {
964 		ip->i_nextclustercg = cg;
965 		UFS_UNLOCK(ump);
966 		goto fail;
967 	}
968 	ip->i_nextclustercg = -1;
969 	/*
970 	 * We have found a new contiguous block.
971 	 *
972 	 * First we have to replace the old block pointers with the new
973 	 * block pointers in the inode and indirect blocks associated
974 	 * with the file.
975 	 */
976 #ifdef DIAGNOSTIC
977 	if (prtrealloc)
978 		printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number,
979 		    (intmax_t)start_lbn, (intmax_t)end_lbn);
980 #endif
981 	blkno = newblk;
982 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
983 		if (i == ssize) {
984 			bap = ebap;
985 			soff = -i;
986 		}
987 #ifdef INVARIANTS
988 		if (!ffs_checkfreeblk(ip,
989 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
990 			panic("ffs_reallocblks: unallocated block 2");
991 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
992 			panic("ffs_reallocblks: alloc mismatch");
993 #endif
994 #ifdef DIAGNOSTIC
995 		if (prtrealloc)
996 			printf(" %jd,", (intmax_t)*bap);
997 #endif
998 		if (DOINGSOFTDEP(vp)) {
999 			if (sbap == &ip->i_din2->di_db[0] && i < ssize)
1000 				softdep_setup_allocdirect(ip, start_lbn + i,
1001 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
1002 				    buflist->bs_children[i]);
1003 			else
1004 				softdep_setup_allocindir_page(ip, start_lbn + i,
1005 				    i < ssize ? sbp : ebp, soff + i, blkno,
1006 				    *bap, buflist->bs_children[i]);
1007 		}
1008 		*bap++ = blkno;
1009 	}
1010 	/*
1011 	 * Next we must write out the modified inode and indirect blocks.
1012 	 * For strict correctness, the writes should be synchronous since
1013 	 * the old block values may have been written to disk. In practise
1014 	 * they are almost never written, but if we are concerned about
1015 	 * strict correctness, the `doasyncfree' flag should be set to zero.
1016 	 *
1017 	 * The test on `doasyncfree' should be changed to test a flag
1018 	 * that shows whether the associated buffers and inodes have
1019 	 * been written. The flag should be set when the cluster is
1020 	 * started and cleared whenever the buffer or inode is flushed.
1021 	 * We can then check below to see if it is set, and do the
1022 	 * synchronous write only when it has been cleared.
1023 	 */
1024 	if (sbap != &ip->i_din2->di_db[0]) {
1025 		if (doasyncfree)
1026 			bdwrite(sbp);
1027 		else
1028 			bwrite(sbp);
1029 	} else {
1030 		UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
1031 		if (!doasyncfree)
1032 			ffs_update(vp, 1);
1033 	}
1034 	if (ssize < len) {
1035 		if (doasyncfree)
1036 			bdwrite(ebp);
1037 		else
1038 			bwrite(ebp);
1039 	}
1040 	/*
1041 	 * Last, free the old blocks and assign the new blocks to the buffers.
1042 	 */
1043 #ifdef DIAGNOSTIC
1044 	if (prtrealloc)
1045 		printf("\n\tnew:");
1046 #endif
1047 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
1048 		bp = buflist->bs_children[i];
1049 		if (!DOINGSOFTDEP(vp))
1050 			/*
1051 			 * The usual case is that a set of N-contiguous blocks
1052 			 * that was just allocated has been replaced with a
1053 			 * set of N+1-contiguous blocks. If they are marked as
1054 			 * B_DELWRI, the current contents have not been written
1055 			 * to disk. It is possible that the blocks were written
1056 			 * earlier, but very uncommon. If the blocks have never
1057 			 * been written, there is no need to send a BIO_DELETE
1058 			 * for them when they are freed. The gain from avoiding
1059 			 * the TRIMs for the common case of unwritten blocks
1060 			 * far exceeds the cost of the write amplification for
1061 			 * the uncommon case of failing to send a TRIM for the
1062 			 * blocks that had been written.
1063 			 */
1064 			ffs_blkfree(ump, fs, ump->um_devvp,
1065 			    dbtofsb(fs, bp->b_blkno),
1066 			    fs->fs_bsize, ip->i_number, vp->v_type, NULL,
1067 			    (bp->b_flags & B_DELWRI) != 0 ?
1068 			    NOTRIM_KEY : SINGLETON_KEY);
1069 		bp->b_blkno = fsbtodb(fs, blkno);
1070 #ifdef INVARIANTS
1071 		if (!ffs_checkfreeblk(ip, dbtofsb(fs, bp->b_blkno),
1072 		    fs->fs_bsize))
1073 			panic("ffs_reallocblks: unallocated block 3");
1074 #endif
1075 #ifdef DIAGNOSTIC
1076 		if (prtrealloc)
1077 			printf(" %jd,", (intmax_t)blkno);
1078 #endif
1079 	}
1080 #ifdef DIAGNOSTIC
1081 	if (prtrealloc) {
1082 		prtrealloc--;
1083 		printf("\n");
1084 	}
1085 #endif
1086 	return (0);
1087 
1088 fail:
1089 	if (ssize < len)
1090 		brelse(ebp);
1091 	if (sbap != &ip->i_din2->di_db[0])
1092 		brelse(sbp);
1093 	return (ENOSPC);
1094 }
1095 
1096 /*
1097  * Allocate an inode in the filesystem.
1098  *
1099  * If allocating a directory, use ffs_dirpref to select the inode.
1100  * If allocating in a directory, the following hierarchy is followed:
1101  *   1) allocate the preferred inode.
1102  *   2) allocate an inode in the same cylinder group.
1103  *   3) quadratically rehash into other cylinder groups, until an
1104  *      available inode is located.
1105  * If no inode preference is given the following hierarchy is used
1106  * to allocate an inode:
1107  *   1) allocate an inode in cylinder group 0.
1108  *   2) quadratically rehash into other cylinder groups, until an
1109  *      available inode is located.
1110  */
1111 int
ffs_valloc(struct vnode * pvp,int mode,struct ucred * cred,struct vnode ** vpp)1112 ffs_valloc(struct vnode *pvp,
1113 	int mode,
1114 	struct ucred *cred,
1115 	struct vnode **vpp)
1116 {
1117 	struct inode *pip;
1118 	struct fs *fs;
1119 	struct inode *ip;
1120 	struct timespec ts;
1121 	struct ufsmount *ump;
1122 	ino_t ino, ipref;
1123 	uint64_t cg;
1124 	int error, reclaimed;
1125 
1126 	*vpp = NULL;
1127 	pip = VTOI(pvp);
1128 	ump = ITOUMP(pip);
1129 	fs = ump->um_fs;
1130 
1131 	UFS_LOCK(ump);
1132 	reclaimed = 0;
1133 retry:
1134 	if (fs->fs_cstotal.cs_nifree == 0)
1135 		goto noinodes;
1136 
1137 	if ((mode & IFMT) == IFDIR)
1138 		ipref = ffs_dirpref(pip);
1139 	else
1140 		ipref = pip->i_number;
1141 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
1142 		ipref = 0;
1143 	cg = ino_to_cg(fs, ipref);
1144 	/*
1145 	 * Track number of dirs created one after another
1146 	 * in a same cg without intervening by files.
1147 	 */
1148 	if ((mode & IFMT) == IFDIR) {
1149 		if (fs->fs_contigdirs[cg] < 255)
1150 			fs->fs_contigdirs[cg]++;
1151 	} else {
1152 		if (fs->fs_contigdirs[cg] > 0)
1153 			fs->fs_contigdirs[cg]--;
1154 	}
1155 	ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
1156 					(allocfcn_t *)ffs_nodealloccg);
1157 	if (ino == 0)
1158 		goto noinodes;
1159 	/*
1160 	 * Get rid of the cached old vnode, force allocation of a new vnode
1161 	 * for this inode. If this fails, release the allocated ino and
1162 	 * return the error.
1163 	 */
1164 	if ((error = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
1165 	    FFSV_FORCEINSMQ | FFSV_REPLACE | FFSV_NEWINODE)) != 0) {
1166 		ffs_vfree(pvp, ino, mode);
1167 		return (error);
1168 	}
1169 	/*
1170 	 * We got an inode, so check mode and panic if it is already allocated.
1171 	 */
1172 	ip = VTOI(*vpp);
1173 	if (ip->i_mode) {
1174 		printf("mode = 0%o, inum = %ju, fs = %s\n",
1175 		    ip->i_mode, (uintmax_t)ip->i_number, fs->fs_fsmnt);
1176 		panic("ffs_valloc: dup alloc");
1177 	}
1178 	if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) {  /* XXX */
1179 		printf("free inode %s/%ju had %ld blocks\n",
1180 		    fs->fs_fsmnt, (intmax_t)ino, (long)DIP(ip, i_blocks));
1181 		DIP_SET(ip, i_blocks, 0);
1182 	}
1183 	ip->i_flags = 0;
1184 	DIP_SET(ip, i_flags, 0);
1185 	if ((mode & IFMT) == IFDIR)
1186 		DIP_SET(ip, i_dirdepth, DIP(pip, i_dirdepth) + 1);
1187 	/*
1188 	 * Set up a new generation number for this inode.
1189 	 */
1190 	while (ip->i_gen == 0 || ++ip->i_gen == 0)
1191 		ip->i_gen = arc4random();
1192 	DIP_SET(ip, i_gen, ip->i_gen);
1193 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1194 		vfs_timestamp(&ts);
1195 		ip->i_din2->di_birthtime = ts.tv_sec;
1196 		ip->i_din2->di_birthnsec = ts.tv_nsec;
1197 	}
1198 	ip->i_flag = 0;
1199 	(*vpp)->v_vflag = 0;
1200 	(*vpp)->v_type = VNON;
1201 	if (fs->fs_magic == FS_UFS2_MAGIC) {
1202 		(*vpp)->v_op = &ffs_vnodeops2;
1203 		UFS_INODE_SET_FLAG(ip, IN_UFS2);
1204 	} else {
1205 		(*vpp)->v_op = &ffs_vnodeops1;
1206 	}
1207 	return (0);
1208 noinodes:
1209 	if (reclaimed == 0) {
1210 		reclaimed = 1;
1211 		softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
1212 		goto retry;
1213 	}
1214 	if (ffs_fsfail_cleanup_locked(ump, 0)) {
1215 		UFS_UNLOCK(ump);
1216 		return (ENXIO);
1217 	}
1218 	if (ppsratecheck(&ump->um_last_fullmsg, &ump->um_secs_fullmsg, 1)) {
1219 		UFS_UNLOCK(ump);
1220 		ffs_fserr(fs, pip->i_number, "out of inodes");
1221 		uprintf("\n%s: create/symlink failed, no inodes free\n",
1222 		    fs->fs_fsmnt);
1223 	} else {
1224 		UFS_UNLOCK(ump);
1225 	}
1226 	return (ENOSPC);
1227 }
1228 
1229 /*
1230  * Find a cylinder group to place a directory.
1231  *
1232  * The policy implemented by this algorithm is to allocate a
1233  * directory inode in the same cylinder group as its parent
1234  * directory, but also to reserve space for its files inodes
1235  * and data. Restrict the number of directories which may be
1236  * allocated one after another in the same cylinder group
1237  * without intervening allocation of files.
1238  *
1239  * If we allocate a first level directory then force allocation
1240  * in another cylinder group.
1241  */
1242 static ino_t
ffs_dirpref(struct inode * pip)1243 ffs_dirpref(struct inode *pip)
1244 {
1245 	struct fs *fs;
1246 	int cg, prefcg, curcg, dirsize, cgsize;
1247 	int depth, range, start, end, numdirs, power, numerator, denominator;
1248 	uint64_t avgifree, avgbfree, avgndir, curdirsize;
1249 	uint64_t minifree, minbfree, maxndir;
1250 	uint64_t maxcontigdirs;
1251 
1252 	mtx_assert(UFS_MTX(ITOUMP(pip)), MA_OWNED);
1253 	fs = ITOFS(pip);
1254 
1255 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1256 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1257 	avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1258 
1259 	/*
1260 	 * Select a preferred cylinder group to place a new directory.
1261 	 * If we are near the root of the filesystem we aim to spread
1262 	 * them out as much as possible. As we descend deeper from the
1263 	 * root we cluster them closer together around their parent as
1264 	 * we expect them to be more closely interactive. Higher-level
1265 	 * directories like usr/src/sys and usr/src/bin should be
1266 	 * separated while the directories in these areas are more
1267 	 * likely to be accessed together so should be closer.
1268 	 *
1269 	 * We pick a range of cylinder groups around the cylinder group
1270 	 * of the directory in which we are being created. The size of
1271 	 * the range for our search is based on our depth from the root
1272 	 * of our filesystem. We then probe that range based on how many
1273 	 * directories are already present. The first new directory is at
1274 	 * 1/2 (middle) of the range; the second is in the first 1/4 of the
1275 	 * range, then at 3/4, 1/8, 3/8, 5/8, 7/8, 1/16, 3/16, 5/16, etc.
1276 	 */
1277 	depth = DIP(pip, i_dirdepth);
1278 	range = fs->fs_ncg / (1 << depth);
1279 	curcg = ino_to_cg(fs, pip->i_number);
1280 	start = curcg - (range / 2);
1281 	if (start < 0)
1282 		start += fs->fs_ncg;
1283 	end = curcg + (range / 2);
1284 	if (end >= fs->fs_ncg)
1285 		end -= fs->fs_ncg;
1286 	numdirs = pip->i_effnlink - 1;
1287 	power = fls(numdirs);
1288 	numerator = (numdirs & ~(1 << (power - 1))) * 2 + 1;
1289 	denominator = 1 << power;
1290 	prefcg = (curcg - (range / 2) + (range * numerator / denominator));
1291 	if (prefcg < 0)
1292 		prefcg += fs->fs_ncg;
1293 	if (prefcg >= fs->fs_ncg)
1294 		prefcg -= fs->fs_ncg;
1295 	/*
1296 	 * If this filesystem is not tracking directory depths,
1297 	 * revert to the old algorithm.
1298 	 */
1299 	if (depth == 0 && pip->i_number != UFS_ROOTINO)
1300 		prefcg = curcg;
1301 
1302 	/*
1303 	 * Count various limits which used for
1304 	 * optimal allocation of a directory inode.
1305 	 */
1306 	maxndir = min(avgndir + (1 << depth), fs->fs_ipg);
1307 	minifree = avgifree - avgifree / 4;
1308 	if (minifree < 1)
1309 		minifree = 1;
1310 	minbfree = avgbfree - avgbfree / 4;
1311 	if (minbfree < 1)
1312 		minbfree = 1;
1313 	cgsize = fs->fs_fsize * fs->fs_fpg;
1314 	dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1315 	curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1316 	if (dirsize < curdirsize)
1317 		dirsize = curdirsize;
1318 	if (dirsize <= 0)
1319 		maxcontigdirs = 0;		/* dirsize overflowed */
1320 	else
1321 		maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1322 	if (fs->fs_avgfpdir > 0)
1323 		maxcontigdirs = min(maxcontigdirs,
1324 				    fs->fs_ipg / fs->fs_avgfpdir);
1325 	if (maxcontigdirs == 0)
1326 		maxcontigdirs = 1;
1327 
1328 	/*
1329 	 * Limit number of dirs in one cg and reserve space for
1330 	 * regular files, but only if we have no deficit in
1331 	 * inodes or space.
1332 	 *
1333 	 * We are trying to find a suitable cylinder group nearby
1334 	 * our preferred cylinder group to place a new directory.
1335 	 * We scan from our preferred cylinder group forward looking
1336 	 * for a cylinder group that meets our criterion. If we get
1337 	 * to the final cylinder group and do not find anything,
1338 	 * we start scanning forwards from the beginning of the
1339 	 * filesystem. While it might seem sensible to start scanning
1340 	 * backwards or even to alternate looking forward and backward,
1341 	 * this approach fails badly when the filesystem is nearly full.
1342 	 * Specifically, we first search all the areas that have no space
1343 	 * and finally try the one preceding that. We repeat this on
1344 	 * every request and in the case of the final block end up
1345 	 * searching the entire filesystem. By jumping to the front
1346 	 * of the filesystem, our future forward searches always look
1347 	 * in new cylinder groups so finds every possible block after
1348 	 * one pass over the filesystem.
1349 	 */
1350 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1351 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1352 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1353 		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1354 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1355 				return ((ino_t)(fs->fs_ipg * cg));
1356 		}
1357 	for (cg = 0; cg < prefcg; cg++)
1358 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1359 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1360 		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1361 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1362 				return ((ino_t)(fs->fs_ipg * cg));
1363 		}
1364 	/*
1365 	 * This is a backstop when we have deficit in space.
1366 	 */
1367 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1368 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1369 			return ((ino_t)(fs->fs_ipg * cg));
1370 	for (cg = 0; cg < prefcg; cg++)
1371 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1372 			break;
1373 	return ((ino_t)(fs->fs_ipg * cg));
1374 }
1375 
1376 /*
1377  * Select the desired position for the next block in a file.  The file is
1378  * logically divided into sections. The first section is composed of the
1379  * direct blocks and the next fs_maxbpg blocks. Each additional section
1380  * contains fs_maxbpg blocks.
1381  *
1382  * If no blocks have been allocated in the first section, the policy is to
1383  * request a block in the same cylinder group as the inode that describes
1384  * the file. The first indirect is allocated immediately following the last
1385  * direct block and the data blocks for the first indirect immediately
1386  * follow it.
1387  *
1388  * If no blocks have been allocated in any other section, the indirect
1389  * block(s) are allocated in the same cylinder group as its inode in an
1390  * area reserved immediately following the inode blocks. The policy for
1391  * the data blocks is to place them in a cylinder group with a greater than
1392  * average number of free blocks. An appropriate cylinder group is found
1393  * by using a rotor that sweeps the cylinder groups. When a new group of
1394  * blocks is needed, the sweep begins in the cylinder group following the
1395  * cylinder group from which the previous allocation was made. The sweep
1396  * continues until a cylinder group with greater than the average number
1397  * of free blocks is found. If the allocation is for the first block in an
1398  * indirect block or the previous block is a hole, then the information on
1399  * the previous allocation is unavailable; here a best guess is made based
1400  * on the logical block number being allocated.
1401  *
1402  * If a section is already partially allocated, the policy is to
1403  * allocate blocks contiguously within the section if possible.
1404  */
1405 ufs2_daddr_t
ffs_blkpref_ufs1(struct inode * ip,ufs_lbn_t lbn,int indx,ufs1_daddr_t * bap)1406 ffs_blkpref_ufs1(struct inode *ip,
1407 	ufs_lbn_t lbn,
1408 	int indx,
1409 	ufs1_daddr_t *bap)
1410 {
1411 	struct fs *fs;
1412 	uint64_t cg, inocg;
1413 	uint64_t avgbfree, startcg;
1414 	ufs2_daddr_t pref, prevbn;
1415 
1416 	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1417 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1418 	fs = ITOFS(ip);
1419 	/*
1420 	 * Allocation of indirect blocks is indicated by passing negative
1421 	 * values in indx: -1 for single indirect, -2 for double indirect,
1422 	 * -3 for triple indirect. As noted below, we attempt to allocate
1423 	 * the first indirect inline with the file data. For all later
1424 	 * indirect blocks, the data is often allocated in other cylinder
1425 	 * groups. However to speed random file access and to speed up
1426 	 * fsck, the filesystem reserves the first fs_metaspace blocks
1427 	 * (typically half of fs_minfree) of the data area of each cylinder
1428 	 * group to hold these later indirect blocks.
1429 	 */
1430 	inocg = ino_to_cg(fs, ip->i_number);
1431 	if (indx < 0) {
1432 		/*
1433 		 * Our preference for indirect blocks is the zone at the
1434 		 * beginning of the inode's cylinder group data area that
1435 		 * we try to reserve for indirect blocks.
1436 		 */
1437 		pref = cgmeta(fs, inocg);
1438 		/*
1439 		 * If we are allocating the first indirect block, try to
1440 		 * place it immediately following the last direct block.
1441 		 */
1442 		if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1443 		    ip->i_din1->di_db[UFS_NDADDR - 1] != 0) {
1444 			pref = ip->i_din1->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1445 			if (dtog(fs, pref) >= fs->fs_ncg)
1446 				pref = 0;
1447 		}
1448 		return (pref);
1449 	}
1450 	/*
1451 	 * If we are allocating the first data block in the first indirect
1452 	 * block and the indirect has been allocated in the data block area,
1453 	 * try to place it immediately following the indirect block.
1454 	 */
1455 	if (lbn == UFS_NDADDR) {
1456 		pref = ip->i_din1->di_ib[0];
1457 		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1458 		    pref < cgbase(fs, inocg + 1)) {
1459 			if (dtog(fs, pref + fs->fs_frag) >= fs->fs_ncg)
1460 				return (0);
1461 			return (pref + fs->fs_frag);
1462 		}
1463 	}
1464 	/*
1465 	 * If we are at the beginning of a file, or we have already allocated
1466 	 * the maximum number of blocks per cylinder group, or we do not
1467 	 * have a block allocated immediately preceding us, then we need
1468 	 * to decide where to start allocating new blocks.
1469 	 */
1470 	if (indx ==  0) {
1471 		prevbn = 0;
1472 	} else {
1473 		prevbn = bap[indx - 1];
1474 		if (UFS_CHECK_BLKNO(ITOVFS(ip), ip->i_number, prevbn,
1475 		    fs->fs_bsize) != 0)
1476 			prevbn = 0;
1477 	}
1478 	if (indx % fs->fs_maxbpg == 0 || prevbn == 0) {
1479 		/*
1480 		 * If we are allocating a directory data block, we want
1481 		 * to place it in the metadata area.
1482 		 */
1483 		if ((ip->i_mode & IFMT) == IFDIR)
1484 			return (cgmeta(fs, inocg));
1485 		/*
1486 		 * Until we fill all the direct and all the first indirect's
1487 		 * blocks, we try to allocate in the data area of the inode's
1488 		 * cylinder group.
1489 		 */
1490 		if (lbn < UFS_NDADDR + NINDIR(fs))
1491 			return (cgdata(fs, inocg));
1492 		/*
1493 		 * Find a cylinder with greater than average number of
1494 		 * unused data blocks.
1495 		 */
1496 		if (indx == 0 || prevbn == 0)
1497 			startcg = inocg + lbn / fs->fs_maxbpg;
1498 		else
1499 			startcg = dtog(fs, prevbn) + 1;
1500 		startcg %= fs->fs_ncg;
1501 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1502 		for (cg = startcg; cg < fs->fs_ncg; cg++)
1503 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1504 				fs->fs_cgrotor = cg;
1505 				return (cgdata(fs, cg));
1506 			}
1507 		for (cg = 0; cg < startcg; cg++)
1508 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1509 				fs->fs_cgrotor = cg;
1510 				return (cgdata(fs, cg));
1511 			}
1512 		return (0);
1513 	}
1514 	/*
1515 	 * Otherwise, we just always try to lay things out contiguously.
1516 	 */
1517 	if (dtog(fs, prevbn + fs->fs_frag) >= fs->fs_ncg)
1518 		return (0);
1519 	return (prevbn + fs->fs_frag);
1520 }
1521 
1522 /*
1523  * Same as above, but for UFS2
1524  */
1525 ufs2_daddr_t
ffs_blkpref_ufs2(struct inode * ip,ufs_lbn_t lbn,int indx,ufs2_daddr_t * bap)1526 ffs_blkpref_ufs2(struct inode *ip,
1527 	ufs_lbn_t lbn,
1528 	int indx,
1529 	ufs2_daddr_t *bap)
1530 {
1531 	struct fs *fs;
1532 	uint64_t cg, inocg;
1533 	uint64_t avgbfree, startcg;
1534 	ufs2_daddr_t pref, prevbn;
1535 
1536 	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1537 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1538 	fs = ITOFS(ip);
1539 	/*
1540 	 * Allocation of indirect blocks is indicated by passing negative
1541 	 * values in indx: -1 for single indirect, -2 for double indirect,
1542 	 * -3 for triple indirect. As noted below, we attempt to allocate
1543 	 * the first indirect inline with the file data. For all later
1544 	 * indirect blocks, the data is often allocated in other cylinder
1545 	 * groups. However to speed random file access and to speed up
1546 	 * fsck, the filesystem reserves the first fs_metaspace blocks
1547 	 * (typically half of fs_minfree) of the data area of each cylinder
1548 	 * group to hold these later indirect blocks.
1549 	 */
1550 	inocg = ino_to_cg(fs, ip->i_number);
1551 	if (indx < 0) {
1552 		/*
1553 		 * Our preference for indirect blocks is the zone at the
1554 		 * beginning of the inode's cylinder group data area that
1555 		 * we try to reserve for indirect blocks.
1556 		 */
1557 		pref = cgmeta(fs, inocg);
1558 		/*
1559 		 * If we are allocating the first indirect block, try to
1560 		 * place it immediately following the last direct block.
1561 		 */
1562 		if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1563 		    ip->i_din2->di_db[UFS_NDADDR - 1] != 0) {
1564 			pref = ip->i_din2->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1565 			if (dtog(fs, pref) >= fs->fs_ncg)
1566 				pref = 0;
1567 		}
1568 		return (pref);
1569 	}
1570 	/*
1571 	 * If we are allocating the first data block in the first indirect
1572 	 * block and the indirect has been allocated in the data block area,
1573 	 * try to place it immediately following the indirect block.
1574 	 */
1575 	if (lbn == UFS_NDADDR) {
1576 		pref = ip->i_din2->di_ib[0];
1577 		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1578 		    pref < cgbase(fs, inocg + 1)) {
1579 			if (dtog(fs, pref + fs->fs_frag) >= fs->fs_ncg)
1580 				return (0);
1581 			return (pref + fs->fs_frag);
1582 		}
1583 	}
1584 	/*
1585 	 * If we are at the beginning of a file, or we have already allocated
1586 	 * the maximum number of blocks per cylinder group, or we do not
1587 	 * have a block allocated immediately preceding us, then we need
1588 	 * to decide where to start allocating new blocks.
1589 	 */
1590 	if (indx ==  0) {
1591 		prevbn = 0;
1592 	} else {
1593 		prevbn = bap[indx - 1];
1594 		if (UFS_CHECK_BLKNO(ITOVFS(ip), ip->i_number, prevbn,
1595 		    fs->fs_bsize) != 0)
1596 			prevbn = 0;
1597 	}
1598 	if (indx % fs->fs_maxbpg == 0 || prevbn == 0) {
1599 		/*
1600 		 * If we are allocating a directory data block, we want
1601 		 * to place it in the metadata area.
1602 		 */
1603 		if ((ip->i_mode & IFMT) == IFDIR)
1604 			return (cgmeta(fs, inocg));
1605 		/*
1606 		 * Until we fill all the direct and all the first indirect's
1607 		 * blocks, we try to allocate in the data area of the inode's
1608 		 * cylinder group.
1609 		 */
1610 		if (lbn < UFS_NDADDR + NINDIR(fs))
1611 			return (cgdata(fs, inocg));
1612 		/*
1613 		 * Find a cylinder with greater than average number of
1614 		 * unused data blocks.
1615 		 */
1616 		if (indx == 0 || prevbn == 0)
1617 			startcg = inocg + lbn / fs->fs_maxbpg;
1618 		else
1619 			startcg = dtog(fs, prevbn) + 1;
1620 		startcg %= fs->fs_ncg;
1621 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1622 		for (cg = startcg; cg < fs->fs_ncg; cg++)
1623 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1624 				fs->fs_cgrotor = cg;
1625 				return (cgdata(fs, cg));
1626 			}
1627 		for (cg = 0; cg < startcg; cg++)
1628 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1629 				fs->fs_cgrotor = cg;
1630 				return (cgdata(fs, cg));
1631 			}
1632 		return (0);
1633 	}
1634 	/*
1635 	 * Otherwise, we just always try to lay things out contiguously.
1636 	 */
1637 	if (dtog(fs, prevbn + fs->fs_frag) >= fs->fs_ncg)
1638 		return (0);
1639 	return (prevbn + fs->fs_frag);
1640 }
1641 
1642 /*
1643  * Implement the cylinder overflow algorithm.
1644  *
1645  * The policy implemented by this algorithm is:
1646  *   1) allocate the block in its requested cylinder group.
1647  *   2) quadratically rehash on the cylinder group number.
1648  *   3) brute force search for a free block.
1649  *
1650  * Must be called with the UFS lock held.  Will release the lock on success
1651  * and return with it held on failure.
1652  */
1653 /*VARARGS5*/
1654 static ufs2_daddr_t
ffs_hashalloc(struct inode * ip,uint64_t cg,ufs2_daddr_t pref,int size,int rsize,allocfcn_t * allocator)1655 ffs_hashalloc(struct inode *ip,
1656 	uint64_t cg,
1657 	ufs2_daddr_t pref,
1658 	int size,	/* Search size for data blocks, mode for inodes */
1659 	int rsize,	/* Real allocated size. */
1660 	allocfcn_t *allocator)
1661 {
1662 	struct fs *fs;
1663 	ufs2_daddr_t result;
1664 	uint64_t i, icg = cg;
1665 
1666 	mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1667 #ifdef INVARIANTS
1668 	if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1669 		panic("ffs_hashalloc: allocation on suspended filesystem");
1670 #endif
1671 	fs = ITOFS(ip);
1672 	/*
1673 	 * 1: preferred cylinder group
1674 	 */
1675 	result = (*allocator)(ip, cg, pref, size, rsize);
1676 	if (result)
1677 		return (result);
1678 	/*
1679 	 * 2: quadratic rehash
1680 	 */
1681 	for (i = 1; i < fs->fs_ncg; i *= 2) {
1682 		cg += i;
1683 		if (cg >= fs->fs_ncg)
1684 			cg -= fs->fs_ncg;
1685 		result = (*allocator)(ip, cg, 0, size, rsize);
1686 		if (result)
1687 			return (result);
1688 	}
1689 	/*
1690 	 * 3: brute force search
1691 	 * Note that we start at i == 2, since 0 was checked initially,
1692 	 * and 1 is always checked in the quadratic rehash.
1693 	 */
1694 	cg = (icg + 2) % fs->fs_ncg;
1695 	for (i = 2; i < fs->fs_ncg; i++) {
1696 		result = (*allocator)(ip, cg, 0, size, rsize);
1697 		if (result)
1698 			return (result);
1699 		cg++;
1700 		if (cg == fs->fs_ncg)
1701 			cg = 0;
1702 	}
1703 	return (0);
1704 }
1705 
1706 /*
1707  * Determine whether a fragment can be extended.
1708  *
1709  * Check to see if the necessary fragments are available, and
1710  * if they are, allocate them.
1711  */
1712 static ufs2_daddr_t
ffs_fragextend(struct inode * ip,uint64_t cg,ufs2_daddr_t bprev,int osize,int nsize)1713 ffs_fragextend(struct inode *ip,
1714 	uint64_t cg,
1715 	ufs2_daddr_t bprev,
1716 	int osize,
1717 	int nsize)
1718 {
1719 	struct fs *fs;
1720 	struct cg *cgp;
1721 	struct buf *bp;
1722 	struct ufsmount *ump;
1723 	int nffree;
1724 	long bno;
1725 	int frags, bbase;
1726 	int i, error;
1727 	uint8_t *blksfree;
1728 
1729 	ump = ITOUMP(ip);
1730 	fs = ump->um_fs;
1731 	if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1732 		return (0);
1733 	frags = numfrags(fs, nsize);
1734 	bbase = fragnum(fs, bprev);
1735 	if (bbase > fragnum(fs, (bprev + frags - 1))) {
1736 		/* cannot extend across a block boundary */
1737 		return (0);
1738 	}
1739 	UFS_UNLOCK(ump);
1740 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0) {
1741 		ffs_checkcgintegrity(fs, cg, error);
1742 		goto fail;
1743 	}
1744 	bno = dtogd(fs, bprev);
1745 	blksfree = cg_blksfree(cgp);
1746 	for (i = numfrags(fs, osize); i < frags; i++)
1747 		if (isclr(blksfree, bno + i))
1748 			goto fail;
1749 	/*
1750 	 * the current fragment can be extended
1751 	 * deduct the count on fragment being extended into
1752 	 * increase the count on the remaining fragment (if any)
1753 	 * allocate the extended piece
1754 	 */
1755 	for (i = frags; i < fs->fs_frag - bbase; i++)
1756 		if (isclr(blksfree, bno + i))
1757 			break;
1758 	cgp->cg_frsum[i - numfrags(fs, osize)]--;
1759 	if (i != frags)
1760 		cgp->cg_frsum[i - frags]++;
1761 	for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1762 		clrbit(blksfree, bno + i);
1763 		cgp->cg_cs.cs_nffree--;
1764 		nffree++;
1765 	}
1766 	UFS_LOCK(ump);
1767 	fs->fs_cstotal.cs_nffree -= nffree;
1768 	fs->fs_cs(fs, cg).cs_nffree -= nffree;
1769 	fs->fs_fmod = 1;
1770 	ACTIVECLEAR(fs, cg);
1771 	UFS_UNLOCK(ump);
1772 	if (DOINGSOFTDEP(ITOV(ip)))
1773 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
1774 		    frags, numfrags(fs, osize));
1775 	bdwrite(bp);
1776 	return (bprev);
1777 
1778 fail:
1779 	brelse(bp);
1780 	UFS_LOCK(ump);
1781 	return (0);
1782 
1783 }
1784 
1785 /*
1786  * Determine whether a block can be allocated.
1787  *
1788  * Check to see if a block of the appropriate size is available,
1789  * and if it is, allocate it.
1790  */
1791 static ufs2_daddr_t
ffs_alloccg(struct inode * ip,uint64_t cg,ufs2_daddr_t bpref,int size,int rsize)1792 ffs_alloccg(struct inode *ip,
1793 	uint64_t cg,
1794 	ufs2_daddr_t bpref,
1795 	int size,
1796 	int rsize)
1797 {
1798 	struct fs *fs;
1799 	struct cg *cgp;
1800 	struct buf *bp;
1801 	struct ufsmount *ump;
1802 	ufs1_daddr_t bno;
1803 	ufs2_daddr_t blkno;
1804 	int i, allocsiz, error, frags;
1805 	uint8_t *blksfree;
1806 
1807 	ump = ITOUMP(ip);
1808 	fs = ump->um_fs;
1809 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1810 		return (0);
1811 	UFS_UNLOCK(ump);
1812 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0 ||
1813 	   (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
1814 		ffs_checkcgintegrity(fs, cg, error);
1815 		goto fail;
1816 	}
1817 	if (size == fs->fs_bsize) {
1818 		UFS_LOCK(ump);
1819 		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1820 		ACTIVECLEAR(fs, cg);
1821 		UFS_UNLOCK(ump);
1822 		bdwrite(bp);
1823 		return (blkno);
1824 	}
1825 	/*
1826 	 * check to see if any fragments are already available
1827 	 * allocsiz is the size which will be allocated, hacking
1828 	 * it down to a smaller size if necessary
1829 	 */
1830 	blksfree = cg_blksfree(cgp);
1831 	frags = numfrags(fs, size);
1832 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1833 		if (cgp->cg_frsum[allocsiz] != 0)
1834 			break;
1835 	if (allocsiz == fs->fs_frag) {
1836 		/*
1837 		 * no fragments were available, so a block will be
1838 		 * allocated, and hacked up
1839 		 */
1840 		if (cgp->cg_cs.cs_nbfree == 0)
1841 			goto fail;
1842 		UFS_LOCK(ump);
1843 		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1844 		ACTIVECLEAR(fs, cg);
1845 		UFS_UNLOCK(ump);
1846 		bdwrite(bp);
1847 		return (blkno);
1848 	}
1849 	KASSERT(size == rsize,
1850 	    ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
1851 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1852 	if (bno < 0)
1853 		goto fail;
1854 	for (i = 0; i < frags; i++)
1855 		clrbit(blksfree, bno + i);
1856 	cgp->cg_cs.cs_nffree -= frags;
1857 	cgp->cg_frsum[allocsiz]--;
1858 	if (frags != allocsiz)
1859 		cgp->cg_frsum[allocsiz - frags]++;
1860 	UFS_LOCK(ump);
1861 	fs->fs_cstotal.cs_nffree -= frags;
1862 	fs->fs_cs(fs, cg).cs_nffree -= frags;
1863 	fs->fs_fmod = 1;
1864 	blkno = cgbase(fs, cg) + bno;
1865 	ACTIVECLEAR(fs, cg);
1866 	UFS_UNLOCK(ump);
1867 	if (DOINGSOFTDEP(ITOV(ip)))
1868 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
1869 	bdwrite(bp);
1870 	return (blkno);
1871 
1872 fail:
1873 	brelse(bp);
1874 	UFS_LOCK(ump);
1875 	return (0);
1876 }
1877 
1878 /*
1879  * Allocate a block in a cylinder group.
1880  *
1881  * This algorithm implements the following policy:
1882  *   1) allocate the requested block.
1883  *   2) allocate a rotationally optimal block in the same cylinder.
1884  *   3) allocate the next available block on the block rotor for the
1885  *      specified cylinder group.
1886  * Note that this routine only allocates fs_bsize blocks; these
1887  * blocks may be fragmented by the routine that allocates them.
1888  */
1889 static ufs2_daddr_t
ffs_alloccgblk(struct inode * ip,struct buf * bp,ufs2_daddr_t bpref,int size)1890 ffs_alloccgblk(struct inode *ip,
1891 	struct buf *bp,
1892 	ufs2_daddr_t bpref,
1893 	int size)
1894 {
1895 	struct fs *fs;
1896 	struct cg *cgp;
1897 	struct ufsmount *ump;
1898 	ufs1_daddr_t bno;
1899 	ufs2_daddr_t blkno;
1900 	uint8_t *blksfree;
1901 	int i, cgbpref;
1902 
1903 	ump = ITOUMP(ip);
1904 	fs = ump->um_fs;
1905 	mtx_assert(UFS_MTX(ump), MA_OWNED);
1906 	cgp = (struct cg *)bp->b_data;
1907 	blksfree = cg_blksfree(cgp);
1908 	if (bpref == 0) {
1909 		bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
1910 	} else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1911 		/* map bpref to correct zone in this cg */
1912 		if (bpref < cgdata(fs, cgbpref))
1913 			bpref = cgmeta(fs, cgp->cg_cgx);
1914 		else
1915 			bpref = cgdata(fs, cgp->cg_cgx);
1916 	}
1917 	/*
1918 	 * if the requested block is available, use it
1919 	 */
1920 	bno = dtogd(fs, blknum(fs, bpref));
1921 	if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1922 		goto gotit;
1923 	/*
1924 	 * Take the next available block in this cylinder group.
1925 	 */
1926 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1927 	if (bno < 0)
1928 		return (0);
1929 	/* Update cg_rotor only if allocated from the data zone */
1930 	if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1931 		cgp->cg_rotor = bno;
1932 gotit:
1933 	blkno = fragstoblks(fs, bno);
1934 	ffs_clrblock(fs, blksfree, (long)blkno);
1935 	ffs_clusteracct(fs, cgp, blkno, -1);
1936 	cgp->cg_cs.cs_nbfree--;
1937 	fs->fs_cstotal.cs_nbfree--;
1938 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1939 	fs->fs_fmod = 1;
1940 	blkno = cgbase(fs, cgp->cg_cgx) + bno;
1941 	/*
1942 	 * If the caller didn't want the whole block free the frags here.
1943 	 */
1944 	size = numfrags(fs, size);
1945 	if (size != fs->fs_frag) {
1946 		bno = dtogd(fs, blkno);
1947 		for (i = size; i < fs->fs_frag; i++)
1948 			setbit(blksfree, bno + i);
1949 		i = fs->fs_frag - size;
1950 		cgp->cg_cs.cs_nffree += i;
1951 		fs->fs_cstotal.cs_nffree += i;
1952 		fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
1953 		fs->fs_fmod = 1;
1954 		cgp->cg_frsum[i]++;
1955 	}
1956 	/* XXX Fixme. */
1957 	UFS_UNLOCK(ump);
1958 	if (DOINGSOFTDEP(ITOV(ip)))
1959 		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, size, 0);
1960 	UFS_LOCK(ump);
1961 	return (blkno);
1962 }
1963 
1964 /*
1965  * Determine whether a cluster can be allocated.
1966  *
1967  * We do not currently check for optimal rotational layout if there
1968  * are multiple choices in the same cylinder group. Instead we just
1969  * take the first one that we find following bpref.
1970  */
1971 static ufs2_daddr_t
ffs_clusteralloc(struct inode * ip,uint64_t cg,ufs2_daddr_t bpref,int len)1972 ffs_clusteralloc(struct inode *ip,
1973 	uint64_t cg,
1974 	ufs2_daddr_t bpref,
1975 	int len)
1976 {
1977 	struct fs *fs;
1978 	struct cg *cgp;
1979 	struct buf *bp;
1980 	struct ufsmount *ump;
1981 	int i, run, bit, map, got, error;
1982 	ufs2_daddr_t bno;
1983 	uint8_t *mapp;
1984 	int32_t *lp;
1985 	uint8_t *blksfree;
1986 
1987 	ump = ITOUMP(ip);
1988 	fs = ump->um_fs;
1989 	MPASS(cg < fs->fs_ncg);
1990 	if (fs->fs_maxcluster[cg] < len)
1991 		return (0);
1992 	UFS_UNLOCK(ump);
1993 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0) {
1994 		ffs_checkcgintegrity(fs, cg, error);
1995 		UFS_LOCK(ump);
1996 		return (0);
1997 	}
1998 	/*
1999 	 * Check to see if a cluster of the needed size (or bigger) is
2000 	 * available in this cylinder group.
2001 	 */
2002 	lp = &cg_clustersum(cgp)[len];
2003 	for (i = len; i <= fs->fs_contigsumsize; i++)
2004 		if (*lp++ > 0)
2005 			break;
2006 	if (i > fs->fs_contigsumsize) {
2007 		/*
2008 		 * This is the first time looking for a cluster in this
2009 		 * cylinder group. Update the cluster summary information
2010 		 * to reflect the true maximum sized cluster so that
2011 		 * future cluster allocation requests can avoid reading
2012 		 * the cylinder group map only to find no clusters.
2013 		 */
2014 		lp = &cg_clustersum(cgp)[len - 1];
2015 		for (i = len - 1; i > 0; i--)
2016 			if (*lp-- > 0)
2017 				break;
2018 		UFS_LOCK(ump);
2019 		fs->fs_maxcluster[cg] = i;
2020 		brelse(bp);
2021 		return (0);
2022 	}
2023 	/*
2024 	 * Search the cluster map to find a big enough cluster.
2025 	 * We take the first one that we find, even if it is larger
2026 	 * than we need as we prefer to get one close to the previous
2027 	 * block allocation. We do not search before the current
2028 	 * preference point as we do not want to allocate a block
2029 	 * that is allocated before the previous one (as we will
2030 	 * then have to wait for another pass of the elevator
2031 	 * algorithm before it will be read). We prefer to fail and
2032 	 * be recalled to try an allocation in the next cylinder group.
2033 	 */
2034 	if (dtog(fs, bpref) != cg)
2035 		bpref = cgdata(fs, cg);
2036 	else
2037 		bpref = blknum(fs, bpref);
2038 	bpref = fragstoblks(fs, dtogd(fs, bpref));
2039 	mapp = &cg_clustersfree(cgp)[bpref / NBBY];
2040 	map = *mapp++;
2041 	bit = 1 << (bpref % NBBY);
2042 	for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
2043 		if ((map & bit) == 0) {
2044 			run = 0;
2045 		} else {
2046 			run++;
2047 			if (run == len)
2048 				break;
2049 		}
2050 		if ((got & (NBBY - 1)) != (NBBY - 1)) {
2051 			bit <<= 1;
2052 		} else {
2053 			map = *mapp++;
2054 			bit = 1;
2055 		}
2056 	}
2057 	if (got >= cgp->cg_nclusterblks) {
2058 		UFS_LOCK(ump);
2059 		brelse(bp);
2060 		return (0);
2061 	}
2062 	/*
2063 	 * Allocate the cluster that we have found.
2064 	 */
2065 	blksfree = cg_blksfree(cgp);
2066 	for (i = 1; i <= len; i++)
2067 		if (!ffs_isblock(fs, blksfree, got - run + i))
2068 			panic("ffs_clusteralloc: map mismatch");
2069 	bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
2070 	if (dtog(fs, bno) != cg)
2071 		panic("ffs_clusteralloc: allocated out of group");
2072 	len = blkstofrags(fs, len);
2073 	UFS_LOCK(ump);
2074 	for (i = 0; i < len; i += fs->fs_frag)
2075 		if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
2076 			panic("ffs_clusteralloc: lost block");
2077 	ACTIVECLEAR(fs, cg);
2078 	UFS_UNLOCK(ump);
2079 	bdwrite(bp);
2080 	return (bno);
2081 }
2082 
2083 static inline struct buf *
getinobuf(struct inode * ip,uint64_t cg,uint32_t cginoblk,int gbflags)2084 getinobuf(struct inode *ip,
2085 	uint64_t cg,
2086 	uint32_t cginoblk,
2087 	int gbflags)
2088 {
2089 	struct fs *fs;
2090 
2091 	fs = ITOFS(ip);
2092 	return (getblk(ITODEVVP(ip), fsbtodb(fs, ino_to_fsba(fs,
2093 	    cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
2094 	    gbflags));
2095 }
2096 
2097 /*
2098  * Synchronous inode initialization is needed only when barrier writes do not
2099  * work as advertised, and will impose a heavy cost on file creation in a newly
2100  * created filesystem.
2101  */
2102 static int doasyncinodeinit = 1;
2103 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncinodeinit, CTLFLAG_RWTUN,
2104     &doasyncinodeinit, 0,
2105     "Perform inode block initialization using asynchronous writes");
2106 
2107 /*
2108  * Determine whether an inode can be allocated.
2109  *
2110  * Check to see if an inode is available, and if it is,
2111  * allocate it using the following policy:
2112  *   1) allocate the requested inode.
2113  *   2) allocate the next available inode after the requested
2114  *      inode in the specified cylinder group.
2115  */
2116 static ufs2_daddr_t
ffs_nodealloccg(struct inode * ip,uint64_t cg,ufs2_daddr_t ipref,int mode,int unused)2117 ffs_nodealloccg(struct inode *ip,
2118 	uint64_t cg,
2119 	ufs2_daddr_t ipref,
2120 	int mode,
2121 	int unused)
2122 {
2123 	struct fs *fs;
2124 	struct cg *cgp;
2125 	struct buf *bp, *ibp;
2126 	struct ufsmount *ump;
2127 	uint8_t *inosused, *loc;
2128 	struct ufs2_dinode *dp2;
2129 	int error, start, len, i;
2130 	uint32_t old_initediblk;
2131 
2132 	ump = ITOUMP(ip);
2133 	fs = ump->um_fs;
2134 check_nifree:
2135 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
2136 		return (0);
2137 	UFS_UNLOCK(ump);
2138 	if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0) {
2139 		ffs_checkcgintegrity(fs, cg, error);
2140 		UFS_LOCK(ump);
2141 		return (0);
2142 	}
2143 restart:
2144 	if (cgp->cg_cs.cs_nifree == 0) {
2145 		brelse(bp);
2146 		UFS_LOCK(ump);
2147 		return (0);
2148 	}
2149 	inosused = cg_inosused(cgp);
2150 	if (ipref) {
2151 		ipref %= fs->fs_ipg;
2152 		if (isclr(inosused, ipref))
2153 			goto gotit;
2154 	}
2155 	start = cgp->cg_irotor / NBBY;
2156 	len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
2157 	loc = memcchr(&inosused[start], 0xff, len);
2158 	if (loc == NULL) {
2159 		len = start + 1;
2160 		start = 0;
2161 		loc = memcchr(&inosused[start], 0xff, len);
2162 		if (loc == NULL) {
2163 			printf("cg = %ju, irotor = %ld, fs = %s\n",
2164 			    (intmax_t)cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
2165 			panic("ffs_nodealloccg: map corrupted");
2166 			/* NOTREACHED */
2167 		}
2168 	}
2169 	ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1;
2170 gotit:
2171 	/*
2172 	 * Check to see if we need to initialize more inodes.
2173 	 */
2174 	if (fs->fs_magic == FS_UFS2_MAGIC &&
2175 	    ipref + INOPB(fs) > cgp->cg_initediblk &&
2176 	    cgp->cg_initediblk < cgp->cg_niblk) {
2177 		old_initediblk = cgp->cg_initediblk;
2178 
2179 		/*
2180 		 * Free the cylinder group lock before writing the
2181 		 * initialized inode block.  Entering the
2182 		 * babarrierwrite() with the cylinder group lock
2183 		 * causes lock order violation between the lock and
2184 		 * snaplk.
2185 		 *
2186 		 * Another thread can decide to initialize the same
2187 		 * inode block, but whichever thread first gets the
2188 		 * cylinder group lock after writing the newly
2189 		 * allocated inode block will update it and the other
2190 		 * will realize that it has lost and leave the
2191 		 * cylinder group unchanged.
2192 		 */
2193 		ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
2194 		brelse(bp);
2195 		if (ibp == NULL) {
2196 			/*
2197 			 * The inode block buffer is already owned by
2198 			 * another thread, which must initialize it.
2199 			 * Wait on the buffer to allow another thread
2200 			 * to finish the updates, with dropped cg
2201 			 * buffer lock, then retry.
2202 			 */
2203 			ibp = getinobuf(ip, cg, old_initediblk, 0);
2204 			brelse(ibp);
2205 			UFS_LOCK(ump);
2206 			goto check_nifree;
2207 		}
2208 		bzero(ibp->b_data, (int)fs->fs_bsize);
2209 		dp2 = (struct ufs2_dinode *)(ibp->b_data);
2210 		for (i = 0; i < INOPB(fs); i++) {
2211 			while (dp2->di_gen == 0)
2212 				dp2->di_gen = arc4random();
2213 			dp2++;
2214 		}
2215 
2216 		/*
2217 		 * Rather than adding a soft updates dependency to ensure
2218 		 * that the new inode block is written before it is claimed
2219 		 * by the cylinder group map, we just do a barrier write
2220 		 * here. The barrier write will ensure that the inode block
2221 		 * gets written before the updated cylinder group map can be
2222 		 * written. The barrier write should only slow down bulk
2223 		 * loading of newly created filesystems.
2224 		 */
2225 		if (doasyncinodeinit)
2226 			babarrierwrite(ibp);
2227 		else
2228 			bwrite(ibp);
2229 
2230 		/*
2231 		 * After the inode block is written, try to update the
2232 		 * cg initediblk pointer.  If another thread beat us
2233 		 * to it, then leave it unchanged as the other thread
2234 		 * has already set it correctly.
2235 		 */
2236 		error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp);
2237 		UFS_LOCK(ump);
2238 		ACTIVECLEAR(fs, cg);
2239 		UFS_UNLOCK(ump);
2240 		if (error != 0)
2241 			return (error);
2242 		if (cgp->cg_initediblk == old_initediblk)
2243 			cgp->cg_initediblk += INOPB(fs);
2244 		goto restart;
2245 	}
2246 	cgp->cg_irotor = ipref;
2247 	UFS_LOCK(ump);
2248 	ACTIVECLEAR(fs, cg);
2249 	setbit(inosused, ipref);
2250 	cgp->cg_cs.cs_nifree--;
2251 	fs->fs_cstotal.cs_nifree--;
2252 	fs->fs_cs(fs, cg).cs_nifree--;
2253 	fs->fs_fmod = 1;
2254 	if ((mode & IFMT) == IFDIR) {
2255 		cgp->cg_cs.cs_ndir++;
2256 		fs->fs_cstotal.cs_ndir++;
2257 		fs->fs_cs(fs, cg).cs_ndir++;
2258 	}
2259 	UFS_UNLOCK(ump);
2260 	if (DOINGSOFTDEP(ITOV(ip)))
2261 		softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
2262 	bdwrite(bp);
2263 	return ((ino_t)(cg * fs->fs_ipg + ipref));
2264 }
2265 
2266 /*
2267  * Free a block or fragment.
2268  *
2269  * The specified block or fragment is placed back in the
2270  * free map. If a fragment is deallocated, a possible
2271  * block reassembly is checked.
2272  */
2273 static void
ffs_blkfree_cg(struct ufsmount * ump,struct fs * fs,struct vnode * devvp,ufs2_daddr_t bno,long size,ino_t inum,struct workhead * dephd)2274 ffs_blkfree_cg(struct ufsmount *ump,
2275 	struct fs *fs,
2276 	struct vnode *devvp,
2277 	ufs2_daddr_t bno,
2278 	long size,
2279 	ino_t inum,
2280 	struct workhead *dephd)
2281 {
2282 	struct mount *mp;
2283 	struct cg *cgp;
2284 	struct buf *bp;
2285 	daddr_t dbn;
2286 	ufs1_daddr_t fragno, cgbno;
2287 	int i, blk, frags, bbase, error;
2288 	uint64_t cg;
2289 	uint8_t *blksfree;
2290 	struct cdev *dev;
2291 
2292 	cg = dtog(fs, bno);
2293 	if (devvp->v_type == VREG) {
2294 		/* devvp is a snapshot */
2295 		MPASS(devvp->v_mount->mnt_data == ump);
2296 		dev = ump->um_devvp->v_rdev;
2297 	} else if (devvp->v_type == VCHR) {
2298 		/*
2299 		 * devvp is a normal disk device
2300 		 * XXXKIB: devvp is not locked there, v_rdev access depends on
2301 		 * busy mount, which prevents mntfs devvp from reclamation.
2302 		 */
2303 		dev = devvp->v_rdev;
2304 	} else
2305 		return;
2306 #ifdef INVARIANTS
2307 	if ((uint64_t)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
2308 	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
2309 		printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
2310 		    devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
2311 		    size, fs->fs_fsmnt);
2312 		panic("ffs_blkfree_cg: invalid size");
2313 	}
2314 #endif
2315 	if ((uint64_t)bno >= fs->fs_size) {
2316 		printf("bad block %jd, ino %ju\n", (intmax_t)bno,
2317 		    (intmax_t)inum);
2318 		ffs_fserr(fs, inum, "bad block");
2319 		return;
2320 	}
2321 	if ((error = ffs_getcg(fs, devvp, cg, GB_CVTENXIO, &bp, &cgp)) != 0) {
2322 		if (!MOUNTEDSOFTDEP(UFSTOVFS(ump)) || devvp->v_type != VCHR)
2323 			return;
2324 		/*
2325 		 * Would like to just downgrade to read-only. Until that
2326 		 * capability is available, just toss the cylinder group
2327 		 * update and mark the filesystem as needing to run fsck.
2328 		 */
2329 		fs->fs_flags |= FS_NEEDSFSCK;
2330 		if (devvp->v_type == VREG)
2331 			dbn = fragstoblks(fs, cgtod(fs, cg));
2332 		else
2333 			dbn = fsbtodb(fs, cgtod(fs, cg));
2334 		error = getblkx(devvp, dbn, dbn, fs->fs_cgsize, 0, 0, 0, &bp);
2335 		KASSERT(error == 0, ("getblkx failed"));
2336 		softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2337 		    numfrags(fs, size), dephd, true);
2338 		bp->b_flags |= B_RELBUF | B_NOCACHE;
2339 		bp->b_flags &= ~B_CACHE;
2340 		bawrite(bp);
2341 		return;
2342 	}
2343 	cgbno = dtogd(fs, bno);
2344 	blksfree = cg_blksfree(cgp);
2345 	UFS_LOCK(ump);
2346 	if (size == fs->fs_bsize) {
2347 		fragno = fragstoblks(fs, cgbno);
2348 		if (!ffs_isfreeblock(fs, blksfree, fragno)) {
2349 			if (devvp->v_type == VREG) {
2350 				UFS_UNLOCK(ump);
2351 				/* devvp is a snapshot */
2352 				brelse(bp);
2353 				return;
2354 			}
2355 			printf("dev = %s, block = %jd, fs = %s\n",
2356 			    devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
2357 			panic("ffs_blkfree_cg: freeing free block");
2358 		}
2359 		ffs_setblock(fs, blksfree, fragno);
2360 		ffs_clusteracct(fs, cgp, fragno, 1);
2361 		cgp->cg_cs.cs_nbfree++;
2362 		fs->fs_cstotal.cs_nbfree++;
2363 		fs->fs_cs(fs, cg).cs_nbfree++;
2364 	} else {
2365 		bbase = cgbno - fragnum(fs, cgbno);
2366 		/*
2367 		 * decrement the counts associated with the old frags
2368 		 */
2369 		blk = blkmap(fs, blksfree, bbase);
2370 		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
2371 		/*
2372 		 * deallocate the fragment
2373 		 */
2374 		frags = numfrags(fs, size);
2375 		for (i = 0; i < frags; i++) {
2376 			if (isset(blksfree, cgbno + i)) {
2377 				printf("dev = %s, block = %jd, fs = %s\n",
2378 				    devtoname(dev), (intmax_t)(bno + i),
2379 				    fs->fs_fsmnt);
2380 				panic("ffs_blkfree_cg: freeing free frag");
2381 			}
2382 			setbit(blksfree, cgbno + i);
2383 		}
2384 		cgp->cg_cs.cs_nffree += i;
2385 		fs->fs_cstotal.cs_nffree += i;
2386 		fs->fs_cs(fs, cg).cs_nffree += i;
2387 		/*
2388 		 * add back in counts associated with the new frags
2389 		 */
2390 		blk = blkmap(fs, blksfree, bbase);
2391 		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
2392 		/*
2393 		 * if a complete block has been reassembled, account for it
2394 		 */
2395 		fragno = fragstoblks(fs, bbase);
2396 		if (ffs_isblock(fs, blksfree, fragno)) {
2397 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
2398 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
2399 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
2400 			ffs_clusteracct(fs, cgp, fragno, 1);
2401 			cgp->cg_cs.cs_nbfree++;
2402 			fs->fs_cstotal.cs_nbfree++;
2403 			fs->fs_cs(fs, cg).cs_nbfree++;
2404 		}
2405 	}
2406 	fs->fs_fmod = 1;
2407 	ACTIVECLEAR(fs, cg);
2408 	UFS_UNLOCK(ump);
2409 	mp = UFSTOVFS(ump);
2410 	if (MOUNTEDSOFTDEP(mp) && devvp->v_type == VCHR)
2411 		softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2412 		    numfrags(fs, size), dephd, false);
2413 	bdwrite(bp);
2414 }
2415 
2416 /*
2417  * Structures and routines associated with trim management.
2418  *
2419  * The following requests are passed to trim_lookup to indicate
2420  * the actions that should be taken.
2421  */
2422 #define	NEW	1	/* if found, error else allocate and hash it */
2423 #define	OLD	2	/* if not found, error, else return it */
2424 #define	REPLACE	3	/* if not found, error else unhash and reallocate it */
2425 #define	DONE	4	/* if not found, error else unhash and return it */
2426 #define	SINGLE	5	/* don't look up, just allocate it and don't hash it */
2427 
2428 MALLOC_DEFINE(M_TRIM, "ufs_trim", "UFS trim structures");
2429 
2430 #define	TRIMLIST_HASH(ump, key) \
2431 	(&(ump)->um_trimhash[(key) & (ump)->um_trimlisthashsize])
2432 
2433 /*
2434  * These structures describe each of the block free requests aggregated
2435  * together to make up a trim request.
2436  */
2437 struct trim_blkreq {
2438 	TAILQ_ENTRY(trim_blkreq) blkreqlist;
2439 	ufs2_daddr_t bno;
2440 	long size;
2441 	struct workhead *pdephd;
2442 	struct workhead dephd;
2443 };
2444 
2445 /*
2446  * Description of a trim request.
2447  */
2448 struct ffs_blkfree_trim_params {
2449 	TAILQ_HEAD(, trim_blkreq) blklist;
2450 	LIST_ENTRY(ffs_blkfree_trim_params) hashlist;
2451 	struct task task;
2452 	struct ufsmount *ump;
2453 	struct vnode *devvp;
2454 	ino_t inum;
2455 	ufs2_daddr_t bno;
2456 	long size;
2457 	long key;
2458 };
2459 
2460 static void	ffs_blkfree_trim_completed(struct buf *);
2461 static void	ffs_blkfree_trim_task(void *ctx, int pending __unused);
2462 static struct	ffs_blkfree_trim_params *trim_lookup(struct ufsmount *,
2463 		    struct vnode *, ufs2_daddr_t, long, ino_t, uint64_t, int);
2464 static void	ffs_blkfree_sendtrim(struct ffs_blkfree_trim_params *);
2465 
2466 /*
2467  * Called on trim completion to start a task to free the associated block(s).
2468  */
2469 static void
ffs_blkfree_trim_completed(struct buf * bp)2470 ffs_blkfree_trim_completed(struct buf *bp)
2471 {
2472 	struct ffs_blkfree_trim_params *tp;
2473 
2474 	tp = bp->b_fsprivate1;
2475 	free(bp, M_TRIM);
2476 	TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
2477 	taskqueue_enqueue(tp->ump->um_trim_tq, &tp->task);
2478 }
2479 
2480 /*
2481  * Trim completion task that free associated block(s).
2482  */
2483 static void
ffs_blkfree_trim_task(void * ctx,int pending)2484 ffs_blkfree_trim_task(void *ctx, int pending)
2485 {
2486 	struct ffs_blkfree_trim_params *tp;
2487 	struct trim_blkreq *blkelm;
2488 	struct ufsmount *ump;
2489 
2490 	tp = ctx;
2491 	ump = tp->ump;
2492 	while ((blkelm = TAILQ_FIRST(&tp->blklist)) != NULL) {
2493 		ffs_blkfree_cg(ump, ump->um_fs, tp->devvp, blkelm->bno,
2494 		    blkelm->size, tp->inum, blkelm->pdephd);
2495 		TAILQ_REMOVE(&tp->blklist, blkelm, blkreqlist);
2496 		free(blkelm, M_TRIM);
2497 	}
2498 	vn_finished_secondary_write(UFSTOVFS(ump));
2499 	UFS_LOCK(ump);
2500 	ump->um_trim_inflight -= 1;
2501 	ump->um_trim_inflight_blks -= numfrags(ump->um_fs, tp->size);
2502 	UFS_UNLOCK(ump);
2503 	free(tp, M_TRIM);
2504 }
2505 
2506 /*
2507  * Lookup a trim request by inode number.
2508  * Allocate if requested (NEW, REPLACE, SINGLE).
2509  */
2510 static struct ffs_blkfree_trim_params *
trim_lookup(struct ufsmount * ump,struct vnode * devvp,ufs2_daddr_t bno,long size,ino_t inum,uint64_t key,int alloctype)2511 trim_lookup(struct ufsmount *ump,
2512 	struct vnode *devvp,
2513 	ufs2_daddr_t bno,
2514 	long size,
2515 	ino_t inum,
2516 	uint64_t key,
2517 	int alloctype)
2518 {
2519 	struct trimlist_hashhead *tphashhead;
2520 	struct ffs_blkfree_trim_params *tp, *ntp;
2521 
2522 	ntp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TRIM, M_WAITOK);
2523 	if (alloctype != SINGLE) {
2524 		KASSERT(key >= FIRST_VALID_KEY, ("trim_lookup: invalid key"));
2525 		UFS_LOCK(ump);
2526 		tphashhead = TRIMLIST_HASH(ump, key);
2527 		LIST_FOREACH(tp, tphashhead, hashlist)
2528 			if (key == tp->key)
2529 				break;
2530 	}
2531 	switch (alloctype) {
2532 	case NEW:
2533 		KASSERT(tp == NULL, ("trim_lookup: found trim"));
2534 		break;
2535 	case OLD:
2536 		KASSERT(tp != NULL,
2537 		    ("trim_lookup: missing call to ffs_blkrelease_start()"));
2538 		UFS_UNLOCK(ump);
2539 		free(ntp, M_TRIM);
2540 		return (tp);
2541 	case REPLACE:
2542 		KASSERT(tp != NULL, ("trim_lookup: missing REPLACE trim"));
2543 		LIST_REMOVE(tp, hashlist);
2544 		/* tp will be freed by caller */
2545 		break;
2546 	case DONE:
2547 		KASSERT(tp != NULL, ("trim_lookup: missing DONE trim"));
2548 		LIST_REMOVE(tp, hashlist);
2549 		UFS_UNLOCK(ump);
2550 		free(ntp, M_TRIM);
2551 		return (tp);
2552 	}
2553 	TAILQ_INIT(&ntp->blklist);
2554 	ntp->ump = ump;
2555 	ntp->devvp = devvp;
2556 	ntp->bno = bno;
2557 	ntp->size = size;
2558 	ntp->inum = inum;
2559 	ntp->key = key;
2560 	if (alloctype != SINGLE) {
2561 		LIST_INSERT_HEAD(tphashhead, ntp, hashlist);
2562 		UFS_UNLOCK(ump);
2563 	}
2564 	return (ntp);
2565 }
2566 
2567 /*
2568  * Dispatch a trim request.
2569  */
2570 static void
ffs_blkfree_sendtrim(struct ffs_blkfree_trim_params * tp)2571 ffs_blkfree_sendtrim(struct ffs_blkfree_trim_params *tp)
2572 {
2573 	struct ufsmount *ump;
2574 	struct mount *mp;
2575 	struct buf *bp;
2576 
2577 	/*
2578 	 * Postpone the set of the free bit in the cg bitmap until the
2579 	 * BIO_DELETE is completed.  Otherwise, due to disk queue
2580 	 * reordering, TRIM might be issued after we reuse the block
2581 	 * and write some new data into it.
2582 	 */
2583 	ump = tp->ump;
2584 	bp = malloc(sizeof(*bp), M_TRIM, M_WAITOK | M_ZERO);
2585 	bp->b_iocmd = BIO_DELETE;
2586 	bp->b_iooffset = dbtob(fsbtodb(ump->um_fs, tp->bno));
2587 	bp->b_iodone = ffs_blkfree_trim_completed;
2588 	bp->b_bcount = tp->size;
2589 	bp->b_fsprivate1 = tp;
2590 	UFS_LOCK(ump);
2591 	ump->um_trim_total += 1;
2592 	ump->um_trim_inflight += 1;
2593 	ump->um_trim_inflight_blks += numfrags(ump->um_fs, tp->size);
2594 	ump->um_trim_total_blks += numfrags(ump->um_fs, tp->size);
2595 	UFS_UNLOCK(ump);
2596 
2597 	mp = UFSTOVFS(ump);
2598 	vn_start_secondary_write(NULL, &mp, 0);
2599 	g_vfs_strategy(ump->um_bo, bp);
2600 }
2601 
2602 /*
2603  * Allocate a new key to use to identify a range of blocks.
2604  */
2605 uint64_t
ffs_blkrelease_start(struct ufsmount * ump,struct vnode * devvp,ino_t inum)2606 ffs_blkrelease_start(struct ufsmount *ump,
2607 	struct vnode *devvp,
2608 	ino_t inum)
2609 {
2610 	static u_long masterkey;
2611 	uint64_t key;
2612 
2613 	if (((ump->um_flags & UM_CANDELETE) == 0) || dotrimcons == 0)
2614 		return (SINGLETON_KEY);
2615 	do {
2616 		key = atomic_fetchadd_long(&masterkey, 1);
2617 	} while (key < FIRST_VALID_KEY);
2618 	(void) trim_lookup(ump, devvp, 0, 0, inum, key, NEW);
2619 	return (key);
2620 }
2621 
2622 /*
2623  * Deallocate a key that has been used to identify a range of blocks.
2624  */
2625 void
ffs_blkrelease_finish(struct ufsmount * ump,uint64_t key)2626 ffs_blkrelease_finish(struct ufsmount *ump, uint64_t key)
2627 {
2628 	struct ffs_blkfree_trim_params *tp;
2629 
2630 	if (((ump->um_flags & UM_CANDELETE) == 0) || dotrimcons == 0)
2631 		return;
2632 	/*
2633 	 * If the vfs.ffs.dotrimcons sysctl option is enabled while
2634 	 * a file deletion is active, specifically after a call
2635 	 * to ffs_blkrelease_start() but before the call to
2636 	 * ffs_blkrelease_finish(), ffs_blkrelease_start() will
2637 	 * have handed out SINGLETON_KEY rather than starting a
2638 	 * collection sequence. Thus if we get a SINGLETON_KEY
2639 	 * passed to ffs_blkrelease_finish(), we just return rather
2640 	 * than trying to finish the nonexistent sequence.
2641 	 */
2642 	if (key == SINGLETON_KEY) {
2643 #ifdef INVARIANTS
2644 		printf("%s: vfs.ffs.dotrimcons enabled on active filesystem\n",
2645 		    ump->um_mountp->mnt_stat.f_mntonname);
2646 #endif
2647 		return;
2648 	}
2649 	/*
2650 	 * We are done with sending blocks using this key. Look up the key
2651 	 * using the DONE alloctype (in tp) to request that it be unhashed
2652 	 * as we will not be adding to it. If the key has never been used,
2653 	 * tp->size will be zero, so we can just free tp. Otherwise the call
2654 	 * to ffs_blkfree_sendtrim(tp) causes the block range described by
2655 	 * tp to be issued (and then tp to be freed).
2656 	 */
2657 	tp = trim_lookup(ump, NULL, 0, 0, 0, key, DONE);
2658 	if (tp->size == 0)
2659 		free(tp, M_TRIM);
2660 	else
2661 		ffs_blkfree_sendtrim(tp);
2662 }
2663 
2664 /*
2665  * Setup to free a block or fragment.
2666  *
2667  * Check for snapshots that might want to claim the block.
2668  * If trims are requested, prepare a trim request. Attempt to
2669  * aggregate consecutive blocks into a single trim request.
2670  */
2671 void
ffs_blkfree(struct ufsmount * ump,struct fs * fs,struct vnode * devvp,ufs2_daddr_t bno,long size,ino_t inum,__enum_uint8 (vtype)vtype,struct workhead * dephd,uint64_t key)2672 ffs_blkfree(struct ufsmount *ump,
2673 	struct fs *fs,
2674 	struct vnode *devvp,
2675 	ufs2_daddr_t bno,
2676 	long size,
2677 	ino_t inum,
2678 	__enum_uint8(vtype) vtype,
2679 	struct workhead *dephd,
2680 	uint64_t key)
2681 {
2682 	struct ffs_blkfree_trim_params *tp, *ntp;
2683 	struct trim_blkreq *blkelm;
2684 
2685 	/*
2686 	 * Check to see if a snapshot wants to claim the block.
2687 	 * Check that devvp is a normal disk device, not a snapshot,
2688 	 * it has a snapshot(s) associated with it, and one of the
2689 	 * snapshots wants to claim the block.
2690 	 */
2691 	if (devvp->v_type == VCHR &&
2692 	    (devvp->v_vflag & VV_COPYONWRITE) &&
2693 	    ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
2694 		return;
2695 	}
2696 	/*
2697 	 * Nothing to delay if TRIM is not required for this block or TRIM
2698 	 * is disabled or the operation is performed on a snapshot.
2699 	 */
2700 	if (key == NOTRIM_KEY || ((ump->um_flags & UM_CANDELETE) == 0) ||
2701 	    devvp->v_type == VREG) {
2702 		ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
2703 		return;
2704 	}
2705 	blkelm = malloc(sizeof(struct trim_blkreq), M_TRIM, M_WAITOK);
2706 	blkelm->bno = bno;
2707 	blkelm->size = size;
2708 	if (dephd == NULL) {
2709 		blkelm->pdephd = NULL;
2710 	} else {
2711 		LIST_INIT(&blkelm->dephd);
2712 		LIST_SWAP(dephd, &blkelm->dephd, worklist, wk_list);
2713 		blkelm->pdephd = &blkelm->dephd;
2714 	}
2715 	if (key == SINGLETON_KEY) {
2716 		/*
2717 		 * Just a single non-contiguous piece. Use the SINGLE
2718 		 * alloctype to return a trim request that will not be
2719 		 * hashed for future lookup.
2720 		 */
2721 		tp = trim_lookup(ump, devvp, bno, size, inum, key, SINGLE);
2722 		TAILQ_INSERT_HEAD(&tp->blklist, blkelm, blkreqlist);
2723 		ffs_blkfree_sendtrim(tp);
2724 		return;
2725 	}
2726 	/*
2727 	 * The callers of this function are not tracking whether or not
2728 	 * the blocks are contiguous. They are just saying that they
2729 	 * are freeing a set of blocks. It is this code that determines
2730 	 * the pieces of that range that are actually contiguous.
2731 	 *
2732 	 * Calling ffs_blkrelease_start() will have created an entry
2733 	 * that we will use.
2734 	 */
2735 	tp = trim_lookup(ump, devvp, bno, size, inum, key, OLD);
2736 	if (tp->size == 0) {
2737 		/*
2738 		 * First block of a potential range, set block and size
2739 		 * for the trim block.
2740 		 */
2741 		tp->bno = bno;
2742 		tp->size = size;
2743 		TAILQ_INSERT_HEAD(&tp->blklist, blkelm, blkreqlist);
2744 		return;
2745 	}
2746 	/*
2747 	 * If this block is a continuation of the range (either
2748 	 * follows at the end or preceeds in the front) then we
2749 	 * add it to the front or back of the list and return.
2750 	 *
2751 	 * If it is not a continuation of the trim that we were
2752 	 * building, using the REPLACE alloctype, we request that
2753 	 * the old trim request (still in tp) be unhashed and a
2754 	 * new range started (in ntp). The ffs_blkfree_sendtrim(tp)
2755 	 * call causes the block range described by tp to be issued
2756 	 * (and then tp to be freed).
2757 	 */
2758 	if (bno + numfrags(fs, size) == tp->bno) {
2759 		TAILQ_INSERT_HEAD(&tp->blklist, blkelm, blkreqlist);
2760 		tp->bno = bno;
2761 		tp->size += size;
2762 		return;
2763 	} else if (bno == tp->bno + numfrags(fs, tp->size)) {
2764 		TAILQ_INSERT_TAIL(&tp->blklist, blkelm, blkreqlist);
2765 		tp->size += size;
2766 		return;
2767 	}
2768 	ntp = trim_lookup(ump, devvp, bno, size, inum, key, REPLACE);
2769 	TAILQ_INSERT_HEAD(&ntp->blklist, blkelm, blkreqlist);
2770 	ffs_blkfree_sendtrim(tp);
2771 }
2772 
2773 #ifdef INVARIANTS
2774 /*
2775  * Verify allocation of a block or fragment.
2776  * Return 1 if block or fragment is free.
2777  */
2778 static int
ffs_checkfreeblk(struct inode * ip,ufs2_daddr_t bno,long size)2779 ffs_checkfreeblk(struct inode *ip,
2780 	ufs2_daddr_t bno,
2781 	long size)
2782 {
2783 	struct fs *fs;
2784 	struct cg *cgp;
2785 	struct buf *bp;
2786 	ufs1_daddr_t cgbno;
2787 	int i, frags, blkalloced;
2788 	uint8_t *blksfree;
2789 
2790 	fs = ITOFS(ip);
2791 	if ((uint64_t)size > fs->fs_bsize || fragoff(fs, size) != 0) {
2792 		printf("bsize = %ld, size = %ld, fs = %s\n",
2793 		    (long)fs->fs_bsize, size, fs->fs_fsmnt);
2794 		panic("ffs_checkfreeblk: bad size");
2795 	}
2796 	if ((uint64_t)bno >= fs->fs_size)
2797 		panic("ffs_checkfreeblk: too big block %jd", (intmax_t)bno);
2798 	if (ffs_getcg(fs, ITODEVVP(ip), dtog(fs, bno), 0, &bp, &cgp) != 0)
2799 		return (0);
2800 	blksfree = cg_blksfree(cgp);
2801 	cgbno = dtogd(fs, bno);
2802 	if (size == fs->fs_bsize) {
2803 		blkalloced = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2804 	} else {
2805 		frags = numfrags(fs, size);
2806 		for (blkalloced = 0, i = 0; i < frags; i++)
2807 			if (isset(blksfree, cgbno + i))
2808 				blkalloced++;
2809 		if (blkalloced != 0 && blkalloced != frags)
2810 			panic("ffs_checkfreeblk: partially free fragment");
2811 	}
2812 	brelse(bp);
2813 	return (blkalloced == 0);
2814 }
2815 #endif /* INVARIANTS */
2816 
2817 /*
2818  * Free an inode.
2819  */
2820 int
ffs_vfree(struct vnode * pvp,ino_t ino,int mode)2821 ffs_vfree(struct vnode *pvp,
2822 	ino_t ino,
2823 	int mode)
2824 {
2825 	struct ufsmount *ump;
2826 
2827 	if (DOINGSOFTDEP(pvp)) {
2828 		softdep_freefile(pvp, ino, mode);
2829 		return (0);
2830 	}
2831 	ump = VFSTOUFS(pvp->v_mount);
2832 	return (ffs_freefile(ump, ump->um_fs, ump->um_devvp, ino, mode, NULL));
2833 }
2834 
2835 /*
2836  * Do the actual free operation.
2837  * The specified inode is placed back in the free map.
2838  */
2839 int
ffs_freefile(struct ufsmount * ump,struct fs * fs,struct vnode * devvp,ino_t ino,int mode,struct workhead * wkhd)2840 ffs_freefile(struct ufsmount *ump,
2841 	struct fs *fs,
2842 	struct vnode *devvp,
2843 	ino_t ino,
2844 	int mode,
2845 	struct workhead *wkhd)
2846 {
2847 	struct cg *cgp;
2848 	struct buf *bp;
2849 	daddr_t dbn;
2850 	int error;
2851 	uint64_t cg;
2852 	uint8_t *inosused;
2853 	struct cdev *dev;
2854 	ino_t cgino;
2855 
2856 	cg = ino_to_cg(fs, ino);
2857 	if (devvp->v_type == VREG) {
2858 		/* devvp is a snapshot */
2859 		MPASS(devvp->v_mount->mnt_data == ump);
2860 		dev = ump->um_devvp->v_rdev;
2861 	} else if (devvp->v_type == VCHR) {
2862 		/* devvp is a normal disk device */
2863 		dev = devvp->v_rdev;
2864 	} else {
2865 		bp = NULL;
2866 		return (0);
2867 	}
2868 	if (ino >= fs->fs_ipg * fs->fs_ncg)
2869 		panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s",
2870 		    devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt);
2871 	if ((error = ffs_getcg(fs, devvp, cg, GB_CVTENXIO, &bp, &cgp)) != 0) {
2872 		if (!MOUNTEDSOFTDEP(UFSTOVFS(ump)) || devvp->v_type != VCHR)
2873 			return (error);
2874 		/*
2875 		 * Would like to just downgrade to read-only. Until that
2876 		 * capability is available, just toss the cylinder group
2877 		 * update and mark the filesystem as needing to run fsck.
2878 		 */
2879 		fs->fs_flags |= FS_NEEDSFSCK;
2880 		if (devvp->v_type == VREG)
2881 			dbn = fragstoblks(fs, cgtod(fs, cg));
2882 		else
2883 			dbn = fsbtodb(fs, cgtod(fs, cg));
2884 		error = getblkx(devvp, dbn, dbn, fs->fs_cgsize, 0, 0, 0, &bp);
2885 		KASSERT(error == 0, ("getblkx failed"));
2886 		softdep_setup_inofree(UFSTOVFS(ump), bp, ino, wkhd, true);
2887 		bp->b_flags |= B_RELBUF | B_NOCACHE;
2888 		bp->b_flags &= ~B_CACHE;
2889 		bawrite(bp);
2890 		return (error);
2891 	}
2892 	inosused = cg_inosused(cgp);
2893 	cgino = ino % fs->fs_ipg;
2894 	if (isclr(inosused, cgino)) {
2895 		printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev),
2896 		    (uintmax_t)ino, fs->fs_fsmnt);
2897 		if (fs->fs_ronly == 0)
2898 			panic("ffs_freefile: freeing free inode");
2899 	}
2900 	clrbit(inosused, cgino);
2901 	if (cgino < cgp->cg_irotor)
2902 		cgp->cg_irotor = cgino;
2903 	cgp->cg_cs.cs_nifree++;
2904 	UFS_LOCK(ump);
2905 	fs->fs_cstotal.cs_nifree++;
2906 	fs->fs_cs(fs, cg).cs_nifree++;
2907 	if ((mode & IFMT) == IFDIR) {
2908 		cgp->cg_cs.cs_ndir--;
2909 		fs->fs_cstotal.cs_ndir--;
2910 		fs->fs_cs(fs, cg).cs_ndir--;
2911 	}
2912 	fs->fs_fmod = 1;
2913 	ACTIVECLEAR(fs, cg);
2914 	UFS_UNLOCK(ump);
2915 	if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type == VCHR)
2916 		softdep_setup_inofree(UFSTOVFS(ump), bp, ino, wkhd, false);
2917 	bdwrite(bp);
2918 	return (0);
2919 }
2920 
2921 /*
2922  * Check to see if a file is free.
2923  * Used to check for allocated files in snapshots.
2924  * Return 1 if file is free.
2925  */
2926 int
ffs_checkfreefile(struct fs * fs,struct vnode * devvp,ino_t ino)2927 ffs_checkfreefile(struct fs *fs,
2928 	struct vnode *devvp,
2929 	ino_t ino)
2930 {
2931 	struct cg *cgp;
2932 	struct buf *bp;
2933 	int ret, error;
2934 	uint64_t cg;
2935 	uint8_t *inosused;
2936 
2937 	cg = ino_to_cg(fs, ino);
2938 	if ((devvp->v_type != VREG) && (devvp->v_type != VCHR))
2939 		return (1);
2940 	if (ino >= fs->fs_ipg * fs->fs_ncg)
2941 		return (1);
2942 	if ((error = ffs_getcg(fs, devvp, cg, 0, &bp, &cgp)) != 0)
2943 		return (1);
2944 	inosused = cg_inosused(cgp);
2945 	ino %= fs->fs_ipg;
2946 	ret = isclr(inosused, ino);
2947 	brelse(bp);
2948 	return (ret);
2949 }
2950 
2951 /*
2952  * Find a block of the specified size in the specified cylinder group.
2953  *
2954  * It is a panic if a request is made to find a block if none are
2955  * available.
2956  */
2957 static ufs1_daddr_t
ffs_mapsearch(struct fs * fs,struct cg * cgp,ufs2_daddr_t bpref,int allocsiz)2958 ffs_mapsearch(struct fs *fs,
2959 	struct cg *cgp,
2960 	ufs2_daddr_t bpref,
2961 	int allocsiz)
2962 {
2963 	ufs1_daddr_t bno;
2964 	int start, len, loc, i;
2965 	int blk, field, subfield, pos;
2966 	uint8_t *blksfree;
2967 
2968 	/*
2969 	 * find the fragment by searching through the free block
2970 	 * map for an appropriate bit pattern
2971 	 */
2972 	if (bpref)
2973 		start = dtogd(fs, bpref) / NBBY;
2974 	else
2975 		start = cgp->cg_frotor / NBBY;
2976 	blksfree = cg_blksfree(cgp);
2977 	len = howmany(fs->fs_fpg, NBBY) - start;
2978 	loc = scanc((uint64_t)len, (uint8_t *)&blksfree[start],
2979 		fragtbl[fs->fs_frag],
2980 		(uint8_t)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2981 	if (loc == 0) {
2982 		len = start + 1;
2983 		start = 0;
2984 		loc = scanc((uint64_t)len, (uint8_t *)&blksfree[0],
2985 			fragtbl[fs->fs_frag],
2986 			(uint8_t)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2987 		if (loc == 0) {
2988 			printf("start = %d, len = %d, fs = %s\n",
2989 			    start, len, fs->fs_fsmnt);
2990 			panic("ffs_alloccg: map corrupted");
2991 			/* NOTREACHED */
2992 		}
2993 	}
2994 	bno = (start + len - loc) * NBBY;
2995 	cgp->cg_frotor = bno;
2996 	/*
2997 	 * found the byte in the map
2998 	 * sift through the bits to find the selected frag
2999 	 */
3000 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
3001 		blk = blkmap(fs, blksfree, bno);
3002 		blk <<= 1;
3003 		field = around[allocsiz];
3004 		subfield = inside[allocsiz];
3005 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
3006 			if ((blk & field) == subfield)
3007 				return (bno + pos);
3008 			field <<= 1;
3009 			subfield <<= 1;
3010 		}
3011 	}
3012 	printf("bno = %ju, fs = %s\n", (intmax_t)bno, fs->fs_fsmnt);
3013 	panic("ffs_alloccg: block not in map");
3014 	return (-1);
3015 }
3016 
3017 /*
3018  * Fetch and verify a cylinder group.
3019  */
3020 int
ffs_getcg(struct fs * fs,struct vnode * devvp,uint64_t cg,int flags,struct buf ** bpp,struct cg ** cgpp)3021 ffs_getcg(struct fs *fs,
3022 	struct vnode *devvp,
3023 	uint64_t cg,
3024 	int flags,
3025 	struct buf **bpp,
3026 	struct cg **cgpp)
3027 {
3028 	struct buf *bp;
3029 	struct cg *cgp;
3030 	struct mount *mp;
3031 	const struct statfs *sfs;
3032 	daddr_t blkno;
3033 	int error;
3034 
3035 	*bpp = NULL;
3036 	*cgpp = NULL;
3037 	if ((fs->fs_metackhash & CK_CYLGRP) != 0)
3038 		flags |= GB_CKHASH;
3039 	if (devvp->v_type == VCHR) {
3040 		blkno = fsbtodb(fs, cgtod(fs, cg));
3041 		mp = devvp->v_rdev->si_mountpt;
3042 	} else {
3043 		blkno = fragstoblks(fs, cgtod(fs, cg));
3044 		mp = devvp->v_mount;
3045 	}
3046 	error = breadn_flags(devvp, blkno, blkno, (int)fs->fs_cgsize, NULL,
3047 	    NULL, 0, NOCRED, flags, ffs_ckhash_cg, &bp);
3048 	if (error != 0)
3049 		return (error);
3050 	cgp = (struct cg *)bp->b_data;
3051 	if ((fs->fs_metackhash & CK_CYLGRP) != 0 &&
3052 	    (bp->b_flags & B_CKHASH) != 0 &&
3053 	    cgp->cg_ckhash != bp->b_ckhash) {
3054 		if (ppsratecheck(&VFSTOUFS(mp)->um_last_integritymsg,
3055 		    &VFSTOUFS(mp)->um_secs_integritymsg, 1)) {
3056 			sfs = &mp->mnt_stat;
3057 			printf("UFS %s%s (%s) cylinder checkhash failed: "
3058 			    "cg %ju, cgp: 0x%x != bp: 0x%jx\n",
3059 			    devvp->v_type == VCHR ? "" : "snapshot of ",
3060 			    sfs->f_mntfromname, sfs->f_mntonname, (intmax_t)cg,
3061 			    cgp->cg_ckhash, (uintmax_t)bp->b_ckhash);
3062 		}
3063 		bp->b_flags &= ~B_CKHASH;
3064 		bp->b_flags |= B_INVAL | B_NOCACHE;
3065 		brelse(bp);
3066 		return (EINTEGRITY);
3067 	}
3068 	if (!cg_chkmagic(cgp) || cgp->cg_cgx != cg) {
3069 		if (ppsratecheck(&VFSTOUFS(mp)->um_last_integritymsg,
3070 		    &VFSTOUFS(mp)->um_secs_integritymsg, 1)) {
3071 			sfs = &mp->mnt_stat;
3072 			printf("UFS %s%s (%s)",
3073 			    devvp->v_type == VCHR ? "" : "snapshot of ",
3074 			    sfs->f_mntfromname, sfs->f_mntonname);
3075 			if (!cg_chkmagic(cgp))
3076 				printf(" cg %ju: bad magic number 0x%x should "
3077 				    "be 0x%x\n", (intmax_t)cg, cgp->cg_magic,
3078 				    CG_MAGIC);
3079 			else
3080 				printf(": wrong cylinder group cg %ju != "
3081 				    "cgx %u\n", (intmax_t)cg, cgp->cg_cgx);
3082 		}
3083 		bp->b_flags &= ~B_CKHASH;
3084 		bp->b_flags |= B_INVAL | B_NOCACHE;
3085 		brelse(bp);
3086 		return (EINTEGRITY);
3087 	}
3088 	bp->b_flags &= ~B_CKHASH;
3089 	bp->b_xflags |= BX_BKGRDWRITE;
3090 	/*
3091 	 * If we are using check hashes on the cylinder group then we want
3092 	 * to limit changing the cylinder group time to when we are actually
3093 	 * going to write it to disk so that its check hash remains correct
3094 	 * in memory. If the CK_CYLGRP flag is set the time is updated in
3095 	 * ffs_bufwrite() as the buffer is queued for writing. Otherwise we
3096 	 * update the time here as we have done historically.
3097 	 */
3098 	if ((fs->fs_metackhash & CK_CYLGRP) != 0)
3099 		bp->b_xflags |= BX_CYLGRP;
3100 	else
3101 		cgp->cg_old_time = cgp->cg_time = time_second;
3102 	*bpp = bp;
3103 	*cgpp = cgp;
3104 	return (0);
3105 }
3106 
3107 static void
ffs_ckhash_cg(struct buf * bp)3108 ffs_ckhash_cg(struct buf *bp)
3109 {
3110 	uint32_t ckhash;
3111 	struct cg *cgp;
3112 
3113 	cgp = (struct cg *)bp->b_data;
3114 	ckhash = cgp->cg_ckhash;
3115 	cgp->cg_ckhash = 0;
3116 	bp->b_ckhash = calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
3117 	cgp->cg_ckhash = ckhash;
3118 }
3119 
3120 /*
3121  * Called when a cylinder group read has failed. If an integrity check
3122  * is the cause of failure then the cylinder group will not be usable
3123  * until the filesystem has been unmounted and fsck has been run to
3124  * repair it. To avoid future attempts to allocate resources from the
3125  * cylinder group, its available resources are set to zero in the
3126  * superblock summary information. Since it will appear to have no
3127  * resources available, no further calls will be made to allocate
3128  * resources from it. When resources are freed to the cylinder group
3129  * the resource free routines will find the cylinder group unusable so
3130  * the resource will simply be discarded and thus will not show up in
3131  * the superblock summary information until they are recovered by fsck.
3132  */
3133 static void
ffs_checkcgintegrity(struct fs * fs,uint64_t cg,int error)3134 ffs_checkcgintegrity(struct fs *fs,
3135 	uint64_t cg,
3136 	int error)
3137 {
3138 
3139 	if (error != EINTEGRITY)
3140 		return;
3141 	fs->fs_cstotal.cs_nffree -= fs->fs_cs(fs, cg).cs_nffree;
3142 	fs->fs_cs(fs, cg).cs_nffree = 0;
3143 	fs->fs_cstotal.cs_nbfree -= fs->fs_cs(fs, cg).cs_nbfree;
3144 	fs->fs_cs(fs, cg).cs_nbfree = 0;
3145 	fs->fs_cstotal.cs_nifree -= fs->fs_cs(fs, cg).cs_nifree;
3146 	fs->fs_cs(fs, cg).cs_nifree = 0;
3147 	fs->fs_maxcluster[cg] = 0;
3148 	fs->fs_flags |= FS_NEEDSFSCK;
3149 	fs->fs_fmod = 1;
3150 }
3151 
3152 /*
3153  * Fserr prints the name of a filesystem with an error diagnostic.
3154  *
3155  * The form of the error message is:
3156  *	fs: error message
3157  */
3158 void
ffs_fserr(struct fs * fs,ino_t inum,char * cp)3159 ffs_fserr(struct fs *fs,
3160 	ino_t inum,
3161 	char *cp)
3162 {
3163 	struct thread *td = curthread;	/* XXX */
3164 	struct proc *p = td->td_proc;
3165 
3166 	log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n",
3167 	    p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum,
3168 	    fs->fs_fsmnt, cp);
3169 }
3170 
3171 /*
3172  * This function provides the capability for the fsck program to
3173  * update an active filesystem. Sixteen operations are provided:
3174  *
3175  * adjrefcnt(inode, amt) - adjusts the reference count on the
3176  *	specified inode by the specified amount. Under normal
3177  *	operation the count should always go down. Decrementing
3178  *	the count to zero will cause the inode to be freed.
3179  * adjblkcnt(inode, amt) - adjust the number of blocks used by the
3180  *	inode by the specified amount.
3181  * adjdepth(inode, amt) - adjust the depth of the specified directory
3182  *	inode by the specified amount.
3183  * setsize(inode, size) - set the size of the inode to the
3184  *	specified size.
3185  * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
3186  *	adjust the superblock summary.
3187  * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
3188  *	are marked as free. Inodes should never have to be marked
3189  *	as in use.
3190  * freefiles(inode, count) - file inodes [inode..inode + count - 1]
3191  *	are marked as free. Inodes should never have to be marked
3192  *	as in use.
3193  * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
3194  *	are marked as free. Blocks should never have to be marked
3195  *	as in use.
3196  * setflags(flags, set/clear) - the fs_flags field has the specified
3197  *	flags set (second parameter +1) or cleared (second parameter -1).
3198  * setcwd(dirinode) - set the current directory to dirinode in the
3199  *	filesystem associated with the snapshot.
3200  * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
3201  *	in the current directory is oldvalue then change it to newvalue.
3202  * unlink(nameptr, oldvalue) - Verify that the inode number associated
3203  *	with nameptr in the current directory is oldvalue then unlink it.
3204  */
3205 
3206 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
3207 
3208 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt,
3209     CTLFLAG_WR | CTLTYPE_STRUCT | CTLFLAG_NEEDGIANT,
3210     0, 0, sysctl_ffs_fsck, "S,fsck",
3211     "Adjust Inode Reference Count");
3212 
3213 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt,
3214     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3215     "Adjust Inode Used Blocks Count");
3216 
3217 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_DEPTH, adjdepth,
3218     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3219     "Adjust Directory Inode Depth");
3220 
3221 static SYSCTL_NODE(_vfs_ffs, FFS_SET_SIZE, setsize,
3222     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3223     "Set the inode size");
3224 
3225 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir,
3226     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3227     "Adjust number of directories");
3228 
3229 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree,
3230     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3231     "Adjust number of free blocks");
3232 
3233 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree,
3234     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3235     "Adjust number of free inodes");
3236 
3237 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree,
3238     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3239     "Adjust number of free frags");
3240 
3241 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters,
3242     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3243     "Adjust number of free clusters");
3244 
3245 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs,
3246     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3247     "Free Range of Directory Inodes");
3248 
3249 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles,
3250     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3251     "Free Range of File Inodes");
3252 
3253 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks,
3254     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3255     "Free Range of Blocks");
3256 
3257 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags,
3258     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3259     "Change Filesystem Flags");
3260 
3261 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd,
3262     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3263     "Set Current Working Directory");
3264 
3265 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot,
3266     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3267     "Change Value of .. Entry");
3268 
3269 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink,
3270     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3271     "Unlink a Duplicate Name");
3272 
3273 #ifdef DIAGNOSTIC
3274 static int fsckcmds = 0;
3275 SYSCTL_INT(_debug, OID_AUTO, ffs_fsckcmds, CTLFLAG_RW, &fsckcmds, 0,
3276 	"print out fsck_ffs-based filesystem update commands");
3277 #endif /* DIAGNOSTIC */
3278 
3279 static int
sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)3280 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
3281 {
3282 	struct thread *td = curthread;
3283 	struct fsck_cmd cmd;
3284 	struct ufsmount *ump;
3285 	struct vnode *vp, *dvp, *fdvp;
3286 	struct inode *ip, *dp;
3287 	struct mount *mp;
3288 	struct fs *fs;
3289 	struct pwd *pwd;
3290 	ufs2_daddr_t blkno;
3291 	long blkcnt, blksize;
3292 	uint64_t key;
3293 	struct file *fp;
3294 	cap_rights_t rights;
3295 	int filetype, error;
3296 
3297 	if (req->newptr == NULL || req->newlen > sizeof(cmd))
3298 		return (EBADRPC);
3299 	if ((error = SYSCTL_IN(req, &cmd, sizeof(cmd))) != 0)
3300 		return (error);
3301 	if (cmd.version != FFS_CMD_VERSION)
3302 		return (ERPCMISMATCH);
3303 	if ((error = getvnode(td, cmd.handle,
3304 	    cap_rights_init_one(&rights, CAP_FSCK), &fp)) != 0)
3305 		return (error);
3306 	vp = fp->f_vnode;
3307 	if (vp->v_type != VREG && vp->v_type != VDIR) {
3308 		fdrop(fp, td);
3309 		return (EINVAL);
3310 	}
3311 	vn_start_write(vp, &mp, V_WAIT);
3312 	if (mp == NULL ||
3313 	    strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
3314 		vn_finished_write(mp);
3315 		fdrop(fp, td);
3316 		return (EINVAL);
3317 	}
3318 	ump = VFSTOUFS(mp);
3319 	if (mp->mnt_flag & MNT_RDONLY) {
3320 		vn_finished_write(mp);
3321 		fdrop(fp, td);
3322 		return (EROFS);
3323 	}
3324 	fs = ump->um_fs;
3325 	filetype = IFREG;
3326 
3327 	switch (oidp->oid_number) {
3328 	case FFS_SET_FLAGS:
3329 #ifdef DIAGNOSTIC
3330 		if (fsckcmds)
3331 			printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
3332 			    cmd.size > 0 ? "set" : "clear");
3333 #endif /* DIAGNOSTIC */
3334 		if (cmd.size > 0)
3335 			fs->fs_flags |= (long)cmd.value;
3336 		else
3337 			fs->fs_flags &= ~(long)cmd.value;
3338 		break;
3339 
3340 	case FFS_ADJ_REFCNT:
3341 #ifdef DIAGNOSTIC
3342 		if (fsckcmds) {
3343 			printf("%s: adjust inode %jd link count by %jd\n",
3344 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3345 			    (intmax_t)cmd.size);
3346 		}
3347 #endif /* DIAGNOSTIC */
3348 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3349 			break;
3350 		ip = VTOI(vp);
3351 		ip->i_nlink += cmd.size;
3352 		DIP_SET_NLINK(ip, ip->i_nlink);
3353 		ip->i_effnlink += cmd.size;
3354 		UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_MODIFIED);
3355 		error = ffs_update(vp, 1);
3356 		if (DOINGSOFTDEP(vp))
3357 			softdep_change_linkcnt(ip);
3358 		vput(vp);
3359 		break;
3360 
3361 	case FFS_ADJ_BLKCNT:
3362 #ifdef DIAGNOSTIC
3363 		if (fsckcmds) {
3364 			printf("%s: adjust inode %jd block count by %jd\n",
3365 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3366 			    (intmax_t)cmd.size);
3367 		}
3368 #endif /* DIAGNOSTIC */
3369 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3370 			break;
3371 		ip = VTOI(vp);
3372 		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
3373 		UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_MODIFIED);
3374 		error = ffs_update(vp, 1);
3375 		vput(vp);
3376 		break;
3377 
3378 	case FFS_ADJ_DEPTH:
3379 #ifdef DIAGNOSTIC
3380 		if (fsckcmds) {
3381 			printf("%s: adjust directory inode %jd depth by %jd\n",
3382 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3383 			    (intmax_t)cmd.size);
3384 		}
3385 #endif /* DIAGNOSTIC */
3386 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3387 			break;
3388 		if (vp->v_type != VDIR) {
3389 			vput(vp);
3390 			error = ENOTDIR;
3391 			break;
3392 		}
3393 		ip = VTOI(vp);
3394 		DIP_SET(ip, i_dirdepth, DIP(ip, i_dirdepth) + cmd.size);
3395 		UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_MODIFIED);
3396 		error = ffs_update(vp, 1);
3397 		vput(vp);
3398 		break;
3399 
3400 	case FFS_SET_SIZE:
3401 #ifdef DIAGNOSTIC
3402 		if (fsckcmds) {
3403 			printf("%s: set inode %jd size to %jd\n",
3404 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3405 			    (intmax_t)cmd.size);
3406 		}
3407 #endif /* DIAGNOSTIC */
3408 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3409 			break;
3410 		ip = VTOI(vp);
3411 		DIP_SET(ip, i_size, cmd.size);
3412 		UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_MODIFIED);
3413 		error = ffs_update(vp, 1);
3414 		vput(vp);
3415 		break;
3416 
3417 	case FFS_DIR_FREE:
3418 		filetype = IFDIR;
3419 		/* fall through */
3420 
3421 	case FFS_FILE_FREE:
3422 #ifdef DIAGNOSTIC
3423 		if (fsckcmds) {
3424 			if (cmd.size == 1)
3425 				printf("%s: free %s inode %ju\n",
3426 				    mp->mnt_stat.f_mntonname,
3427 				    filetype == IFDIR ? "directory" : "file",
3428 				    (uintmax_t)cmd.value);
3429 			else
3430 				printf("%s: free %s inodes %ju-%ju\n",
3431 				    mp->mnt_stat.f_mntonname,
3432 				    filetype == IFDIR ? "directory" : "file",
3433 				    (uintmax_t)cmd.value,
3434 				    (uintmax_t)(cmd.value + cmd.size - 1));
3435 		}
3436 #endif /* DIAGNOSTIC */
3437 		while (cmd.size > 0) {
3438 			if ((error = ffs_freefile(ump, fs, ump->um_devvp,
3439 			    cmd.value, filetype, NULL)))
3440 				break;
3441 			cmd.size -= 1;
3442 			cmd.value += 1;
3443 		}
3444 		break;
3445 
3446 	case FFS_BLK_FREE:
3447 #ifdef DIAGNOSTIC
3448 		if (fsckcmds) {
3449 			if (cmd.size == 1)
3450 				printf("%s: free block %jd\n",
3451 				    mp->mnt_stat.f_mntonname,
3452 				    (intmax_t)cmd.value);
3453 			else
3454 				printf("%s: free blocks %jd-%jd\n",
3455 				    mp->mnt_stat.f_mntonname,
3456 				    (intmax_t)cmd.value,
3457 				    (intmax_t)cmd.value + cmd.size - 1);
3458 		}
3459 #endif /* DIAGNOSTIC */
3460 		blkno = cmd.value;
3461 		blkcnt = cmd.size;
3462 		blksize = fs->fs_frag - (blkno % fs->fs_frag);
3463 		key = ffs_blkrelease_start(ump, ump->um_devvp, UFS_ROOTINO);
3464 		while (blkcnt > 0) {
3465 			if (blkcnt < blksize)
3466 				blksize = blkcnt;
3467 			ffs_blkfree(ump, fs, ump->um_devvp, blkno,
3468 			    blksize * fs->fs_fsize, UFS_ROOTINO,
3469 			    VDIR, NULL, key);
3470 			blkno += blksize;
3471 			blkcnt -= blksize;
3472 			blksize = fs->fs_frag;
3473 		}
3474 		ffs_blkrelease_finish(ump, key);
3475 		break;
3476 
3477 	/*
3478 	 * Adjust superblock summaries.  fsck(8) is expected to
3479 	 * submit deltas when necessary.
3480 	 */
3481 	case FFS_ADJ_NDIR:
3482 #ifdef DIAGNOSTIC
3483 		if (fsckcmds) {
3484 			printf("%s: adjust number of directories by %jd\n",
3485 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3486 		}
3487 #endif /* DIAGNOSTIC */
3488 		fs->fs_cstotal.cs_ndir += cmd.value;
3489 		break;
3490 
3491 	case FFS_ADJ_NBFREE:
3492 #ifdef DIAGNOSTIC
3493 		if (fsckcmds) {
3494 			printf("%s: adjust number of free blocks by %+jd\n",
3495 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3496 		}
3497 #endif /* DIAGNOSTIC */
3498 		fs->fs_cstotal.cs_nbfree += cmd.value;
3499 		break;
3500 
3501 	case FFS_ADJ_NIFREE:
3502 #ifdef DIAGNOSTIC
3503 		if (fsckcmds) {
3504 			printf("%s: adjust number of free inodes by %+jd\n",
3505 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3506 		}
3507 #endif /* DIAGNOSTIC */
3508 		fs->fs_cstotal.cs_nifree += cmd.value;
3509 		break;
3510 
3511 	case FFS_ADJ_NFFREE:
3512 #ifdef DIAGNOSTIC
3513 		if (fsckcmds) {
3514 			printf("%s: adjust number of free frags by %+jd\n",
3515 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3516 		}
3517 #endif /* DIAGNOSTIC */
3518 		fs->fs_cstotal.cs_nffree += cmd.value;
3519 		break;
3520 
3521 	case FFS_ADJ_NUMCLUSTERS:
3522 #ifdef DIAGNOSTIC
3523 		if (fsckcmds) {
3524 			printf("%s: adjust number of free clusters by %+jd\n",
3525 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3526 		}
3527 #endif /* DIAGNOSTIC */
3528 		fs->fs_cstotal.cs_numclusters += cmd.value;
3529 		break;
3530 
3531 	case FFS_SET_CWD:
3532 #ifdef DIAGNOSTIC
3533 		if (fsckcmds) {
3534 			printf("%s: set current directory to inode %jd\n",
3535 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3536 		}
3537 #endif /* DIAGNOSTIC */
3538 		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
3539 			break;
3540 		AUDIT_ARG_VNODE1(vp);
3541 		if ((error = change_dir(vp, td)) != 0) {
3542 			vput(vp);
3543 			break;
3544 		}
3545 		VOP_UNLOCK(vp);
3546 		pwd_chdir(td, vp);
3547 		break;
3548 
3549 	case FFS_SET_DOTDOT:
3550 #ifdef DIAGNOSTIC
3551 		if (fsckcmds) {
3552 			printf("%s: change .. in cwd from %jd to %jd\n",
3553 			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3554 			    (intmax_t)cmd.size);
3555 		}
3556 #endif /* DIAGNOSTIC */
3557 		/*
3558 		 * First we have to get and lock the parent directory
3559 		 * to which ".." points.
3560 		 */
3561 		error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
3562 		if (error)
3563 			break;
3564 		/*
3565 		 * Now we get and lock the child directory containing "..".
3566 		 */
3567 		pwd = pwd_hold(td);
3568 		dvp = pwd->pwd_cdir;
3569 		if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
3570 			vput(fdvp);
3571 			pwd_drop(pwd);
3572 			break;
3573 		}
3574 		dp = VTOI(dvp);
3575 		SET_I_OFFSET(dp, 12);	/* XXX mastertemplate.dot_reclen */
3576 		error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
3577 		    DT_DIR, 0);
3578 		cache_purge(fdvp);
3579 		cache_purge(dvp);
3580 		vput(dvp);
3581 		vput(fdvp);
3582 		pwd_drop(pwd);
3583 		break;
3584 
3585 	case FFS_UNLINK:
3586 #ifdef DIAGNOSTIC
3587 		if (fsckcmds) {
3588 			char buf[32];
3589 
3590 			if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
3591 				strncpy(buf, "Name_too_long", 32);
3592 			printf("%s: unlink %s (inode %jd)\n",
3593 			    mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
3594 		}
3595 #endif /* DIAGNOSTIC */
3596 		/*
3597 		 * kern_funlinkat will do its own start/finish writes and
3598 		 * they do not nest, so drop ours here. Setting mp == NULL
3599 		 * indicates that vn_finished_write is not needed down below.
3600 		 */
3601 		vn_finished_write(mp);
3602 		mp = NULL;
3603 		error = kern_funlinkat(td, AT_FDCWD,
3604 		    (char *)(intptr_t)cmd.value, FD_NONE, UIO_USERSPACE,
3605 		    0, (ino_t)cmd.size);
3606 		break;
3607 
3608 	default:
3609 #ifdef DIAGNOSTIC
3610 		if (fsckcmds) {
3611 			printf("Invalid request %d from fsck\n",
3612 			    oidp->oid_number);
3613 		}
3614 #endif /* DIAGNOSTIC */
3615 		error = EINVAL;
3616 		break;
3617 	}
3618 	fdrop(fp, td);
3619 	vn_finished_write(mp);
3620 	return (error);
3621 }
3622