xref: /freebsd/sys/ufs/ffs/ffs_alloc.c (revision ee2ea5ceafed78a5bd9810beb9e3ca927180c226)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)ffs_alloc.c	8.18 (Berkeley) 5/26/95
34  * $FreeBSD$
35  */
36 
37 #include "opt_quota.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bio.h>
42 #include <sys/buf.h>
43 #include <sys/conf.h>
44 #include <sys/file.h>
45 #include <sys/proc.h>
46 #include <sys/vnode.h>
47 #include <sys/mount.h>
48 #include <sys/kernel.h>
49 #include <sys/sysctl.h>
50 #include <sys/syslog.h>
51 
52 #include <ufs/ufs/extattr.h>
53 #include <ufs/ufs/quota.h>
54 #include <ufs/ufs/inode.h>
55 #include <ufs/ufs/ufs_extern.h>
56 #include <ufs/ufs/ufsmount.h>
57 
58 #include <ufs/ffs/fs.h>
59 #include <ufs/ffs/ffs_extern.h>
60 
61 typedef ufs_daddr_t allocfcn_t(struct inode *ip, int cg, ufs_daddr_t bpref,
62 				  int size);
63 
64 static ufs_daddr_t ffs_alloccg(struct inode *, int, ufs_daddr_t, int);
65 static ufs_daddr_t
66 	      ffs_alloccgblk(struct inode *, struct buf *, ufs_daddr_t);
67 #ifdef DIAGNOSTIC
68 static int	ffs_checkblk(struct inode *, ufs_daddr_t, long);
69 #endif
70 static ufs_daddr_t ffs_clusteralloc(struct inode *, int, ufs_daddr_t, int);
71 static ino_t	ffs_dirpref(struct inode *);
72 static ufs_daddr_t ffs_fragextend(struct inode *, int, long, int, int);
73 static void	ffs_fserr(struct fs *, ino_t, char *);
74 static u_long	ffs_hashalloc
75 		(struct inode *, int, long, int, allocfcn_t *);
76 static ino_t	ffs_nodealloccg(struct inode *, int, ufs_daddr_t, int);
77 static ufs_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs_daddr_t, int);
78 
79 /*
80  * Allocate a block in the file system.
81  *
82  * The size of the requested block is given, which must be some
83  * multiple of fs_fsize and <= fs_bsize.
84  * A preference may be optionally specified. If a preference is given
85  * the following hierarchy is used to allocate a block:
86  *   1) allocate the requested block.
87  *   2) allocate a rotationally optimal block in the same cylinder.
88  *   3) allocate a block in the same cylinder group.
89  *   4) quadradically rehash into other cylinder groups, until an
90  *      available block is located.
91  * If no block preference is given the following heirarchy is used
92  * to allocate a block:
93  *   1) allocate a block in the cylinder group that contains the
94  *      inode for the file.
95  *   2) quadradically rehash into other cylinder groups, until an
96  *      available block is located.
97  */
98 int
99 ffs_alloc(ip, lbn, bpref, size, cred, bnp)
100 	register struct inode *ip;
101 	ufs_daddr_t lbn, bpref;
102 	int size;
103 	struct ucred *cred;
104 	ufs_daddr_t *bnp;
105 {
106 	register struct fs *fs;
107 	ufs_daddr_t bno;
108 	int cg, reclaimed;
109 #ifdef QUOTA
110 	int error;
111 #endif
112 
113 	*bnp = 0;
114 	fs = ip->i_fs;
115 #ifdef DIAGNOSTIC
116 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
117 		printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
118 		    devtoname(ip->i_dev), (long)fs->fs_bsize, size,
119 		    fs->fs_fsmnt);
120 		panic("ffs_alloc: bad size");
121 	}
122 	if (cred == NOCRED)
123 		panic("ffs_alloc: missing credential");
124 #endif /* DIAGNOSTIC */
125 	reclaimed = 0;
126 retry:
127 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
128 		goto nospace;
129 	if (suser_cred(cred, PRISON_ROOT) &&
130 	    freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
131 		goto nospace;
132 #ifdef QUOTA
133 	error = chkdq(ip, (long)btodb(size), cred, 0);
134 	if (error)
135 		return (error);
136 #endif
137 	if (bpref >= fs->fs_size)
138 		bpref = 0;
139 	if (bpref == 0)
140 		cg = ino_to_cg(fs, ip->i_number);
141 	else
142 		cg = dtog(fs, bpref);
143 	bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size,
144 					 ffs_alloccg);
145 	if (bno > 0) {
146 		ip->i_blocks += btodb(size);
147 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
148 		*bnp = bno;
149 		return (0);
150 	}
151 #ifdef QUOTA
152 	/*
153 	 * Restore user's disk quota because allocation failed.
154 	 */
155 	(void) chkdq(ip, (long)-btodb(size), cred, FORCE);
156 #endif
157 nospace:
158 	if (fs->fs_pendingblocks > 0 && reclaimed == 0) {
159 		reclaimed = 1;
160 		softdep_request_cleanup(fs, ITOV(ip));
161 		goto retry;
162 	}
163 	ffs_fserr(fs, ip->i_number, "file system full");
164 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
165 	return (ENOSPC);
166 }
167 
168 /*
169  * Reallocate a fragment to a bigger size
170  *
171  * The number and size of the old block is given, and a preference
172  * and new size is also specified. The allocator attempts to extend
173  * the original block. Failing that, the regular block allocator is
174  * invoked to get an appropriate block.
175  */
176 int
177 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp)
178 	register struct inode *ip;
179 	ufs_daddr_t lbprev;
180 	ufs_daddr_t bpref;
181 	int osize, nsize;
182 	struct ucred *cred;
183 	struct buf **bpp;
184 {
185 	struct vnode *vp;
186 	struct fs *fs;
187 	struct buf *bp;
188 	int cg, request, error, reclaimed;
189 	ufs_daddr_t bprev, bno;
190 
191 	*bpp = 0;
192 	vp = ITOV(ip);
193 	fs = ip->i_fs;
194 #ifdef DIAGNOSTIC
195 	if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
196 		panic("ffs_realloccg: allocation on suspended filesystem");
197 	if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
198 	    (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
199 		printf(
200 		"dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
201 		    devtoname(ip->i_dev), (long)fs->fs_bsize, osize,
202 		    nsize, fs->fs_fsmnt);
203 		panic("ffs_realloccg: bad size");
204 	}
205 	if (cred == NOCRED)
206 		panic("ffs_realloccg: missing credential");
207 #endif /* DIAGNOSTIC */
208 	reclaimed = 0;
209 retry:
210 	if (suser_cred(cred, PRISON_ROOT) &&
211 	    freespace(fs, fs->fs_minfree) -  numfrags(fs, nsize - osize) < 0)
212 		goto nospace;
213 	if ((bprev = ip->i_db[lbprev]) == 0) {
214 		printf("dev = %s, bsize = %ld, bprev = %ld, fs = %s\n",
215 		    devtoname(ip->i_dev), (long)fs->fs_bsize, (long)bprev,
216 		    fs->fs_fsmnt);
217 		panic("ffs_realloccg: bad bprev");
218 	}
219 	/*
220 	 * Allocate the extra space in the buffer.
221 	 */
222 	error = bread(vp, lbprev, osize, NOCRED, &bp);
223 	if (error) {
224 		brelse(bp);
225 		return (error);
226 	}
227 
228 	if( bp->b_blkno == bp->b_lblkno) {
229 		if( lbprev >= NDADDR)
230 			panic("ffs_realloccg: lbprev out of range");
231 		bp->b_blkno = fsbtodb(fs, bprev);
232 	}
233 
234 #ifdef QUOTA
235 	error = chkdq(ip, (long)btodb(nsize - osize), cred, 0);
236 	if (error) {
237 		brelse(bp);
238 		return (error);
239 	}
240 #endif
241 	/*
242 	 * Check for extension in the existing location.
243 	 */
244 	cg = dtog(fs, bprev);
245 	bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize);
246 	if (bno) {
247 		if (bp->b_blkno != fsbtodb(fs, bno))
248 			panic("ffs_realloccg: bad blockno");
249 		ip->i_blocks += btodb(nsize - osize);
250 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
251 		allocbuf(bp, nsize);
252 		bp->b_flags |= B_DONE;
253 		bzero((char *)bp->b_data + osize, (u_int)nsize - osize);
254 		*bpp = bp;
255 		return (0);
256 	}
257 	/*
258 	 * Allocate a new disk location.
259 	 */
260 	if (bpref >= fs->fs_size)
261 		bpref = 0;
262 	switch ((int)fs->fs_optim) {
263 	case FS_OPTSPACE:
264 		/*
265 		 * Allocate an exact sized fragment. Although this makes
266 		 * best use of space, we will waste time relocating it if
267 		 * the file continues to grow. If the fragmentation is
268 		 * less than half of the minimum free reserve, we choose
269 		 * to begin optimizing for time.
270 		 */
271 		request = nsize;
272 		if (fs->fs_minfree <= 5 ||
273 		    fs->fs_cstotal.cs_nffree >
274 		    (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
275 			break;
276 		log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
277 			fs->fs_fsmnt);
278 		fs->fs_optim = FS_OPTTIME;
279 		break;
280 	case FS_OPTTIME:
281 		/*
282 		 * At this point we have discovered a file that is trying to
283 		 * grow a small fragment to a larger fragment. To save time,
284 		 * we allocate a full sized block, then free the unused portion.
285 		 * If the file continues to grow, the `ffs_fragextend' call
286 		 * above will be able to grow it in place without further
287 		 * copying. If aberrant programs cause disk fragmentation to
288 		 * grow within 2% of the free reserve, we choose to begin
289 		 * optimizing for space.
290 		 */
291 		request = fs->fs_bsize;
292 		if (fs->fs_cstotal.cs_nffree <
293 		    (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
294 			break;
295 		log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
296 			fs->fs_fsmnt);
297 		fs->fs_optim = FS_OPTSPACE;
298 		break;
299 	default:
300 		printf("dev = %s, optim = %ld, fs = %s\n",
301 		    devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt);
302 		panic("ffs_realloccg: bad optim");
303 		/* NOTREACHED */
304 	}
305 	bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request,
306 					 ffs_alloccg);
307 	if (bno > 0) {
308 		bp->b_blkno = fsbtodb(fs, bno);
309 		if (!DOINGSOFTDEP(vp))
310 			ffs_blkfree(fs, ip->i_devvp, bprev, (long)osize,
311 			    ip->i_number);
312 		if (nsize < request)
313 			ffs_blkfree(fs, ip->i_devvp, bno + numfrags(fs, nsize),
314 			    (long)(request - nsize), ip->i_number);
315 		ip->i_blocks += btodb(nsize - osize);
316 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
317 		allocbuf(bp, nsize);
318 		bp->b_flags |= B_DONE;
319 		bzero((char *)bp->b_data + osize, (u_int)nsize - osize);
320 		*bpp = bp;
321 		return (0);
322 	}
323 #ifdef QUOTA
324 	/*
325 	 * Restore user's disk quota because allocation failed.
326 	 */
327 	(void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE);
328 #endif
329 	brelse(bp);
330 nospace:
331 	/*
332 	 * no space available
333 	 */
334 	if (fs->fs_pendingblocks > 0 && reclaimed == 0) {
335 		reclaimed = 1;
336 		softdep_request_cleanup(fs, vp);
337 		goto retry;
338 	}
339 	ffs_fserr(fs, ip->i_number, "file system full");
340 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
341 	return (ENOSPC);
342 }
343 
344 /*
345  * Reallocate a sequence of blocks into a contiguous sequence of blocks.
346  *
347  * The vnode and an array of buffer pointers for a range of sequential
348  * logical blocks to be made contiguous is given. The allocator attempts
349  * to find a range of sequential blocks starting as close as possible to
350  * an fs_rotdelay offset from the end of the allocation for the logical
351  * block immediately preceding the current range. If successful, the
352  * physical block numbers in the buffer pointers and in the inode are
353  * changed to reflect the new allocation. If unsuccessful, the allocation
354  * is left unchanged. The success in doing the reallocation is returned.
355  * Note that the error return is not reflected back to the user. Rather
356  * the previous block allocation will be used.
357  */
358 
359 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
360 
361 static int doasyncfree = 1;
362 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, "");
363 
364 static int doreallocblks = 1;
365 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
366 
367 #ifdef DEBUG
368 static volatile int prtrealloc = 0;
369 #endif
370 
371 int
372 ffs_reallocblks(ap)
373 	struct vop_reallocblks_args /* {
374 		struct vnode *a_vp;
375 		struct cluster_save *a_buflist;
376 	} */ *ap;
377 {
378 	struct fs *fs;
379 	struct inode *ip;
380 	struct vnode *vp;
381 	struct buf *sbp, *ebp;
382 	ufs_daddr_t *bap, *sbap, *ebap = 0;
383 	struct cluster_save *buflist;
384 	ufs_daddr_t start_lbn, end_lbn, soff, newblk, blkno;
385 	struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
386 	int i, len, start_lvl, end_lvl, pref, ssize;
387 
388 	if (doreallocblks == 0)
389 		return (ENOSPC);
390 	vp = ap->a_vp;
391 	ip = VTOI(vp);
392 	fs = ip->i_fs;
393 	if (fs->fs_contigsumsize <= 0)
394 		return (ENOSPC);
395 	buflist = ap->a_buflist;
396 	len = buflist->bs_nchildren;
397 	start_lbn = buflist->bs_children[0]->b_lblkno;
398 	end_lbn = start_lbn + len - 1;
399 #ifdef DIAGNOSTIC
400 	for (i = 0; i < len; i++)
401 		if (!ffs_checkblk(ip,
402 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
403 			panic("ffs_reallocblks: unallocated block 1");
404 	for (i = 1; i < len; i++)
405 		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
406 			panic("ffs_reallocblks: non-logical cluster");
407 	blkno = buflist->bs_children[0]->b_blkno;
408 	ssize = fsbtodb(fs, fs->fs_frag);
409 	for (i = 1; i < len - 1; i++)
410 		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
411 			panic("ffs_reallocblks: non-physical cluster %d", i);
412 #endif
413 	/*
414 	 * If the latest allocation is in a new cylinder group, assume that
415 	 * the filesystem has decided to move and do not force it back to
416 	 * the previous cylinder group.
417 	 */
418 	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
419 	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
420 		return (ENOSPC);
421 	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
422 	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
423 		return (ENOSPC);
424 	/*
425 	 * Get the starting offset and block map for the first block.
426 	 */
427 	if (start_lvl == 0) {
428 		sbap = &ip->i_db[0];
429 		soff = start_lbn;
430 	} else {
431 		idp = &start_ap[start_lvl - 1];
432 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
433 			brelse(sbp);
434 			return (ENOSPC);
435 		}
436 		sbap = (ufs_daddr_t *)sbp->b_data;
437 		soff = idp->in_off;
438 	}
439 	/*
440 	 * Find the preferred location for the cluster.
441 	 */
442 	pref = ffs_blkpref(ip, start_lbn, soff, sbap);
443 	/*
444 	 * If the block range spans two block maps, get the second map.
445 	 */
446 	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
447 		ssize = len;
448 	} else {
449 #ifdef DIAGNOSTIC
450 		if (start_ap[start_lvl-1].in_lbn == idp->in_lbn)
451 			panic("ffs_reallocblk: start == end");
452 #endif
453 		ssize = len - (idp->in_off + 1);
454 		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
455 			goto fail;
456 		ebap = (ufs_daddr_t *)ebp->b_data;
457 	}
458 	/*
459 	 * Search the block map looking for an allocation of the desired size.
460 	 */
461 	if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref,
462 	    len, ffs_clusteralloc)) == 0)
463 		goto fail;
464 	/*
465 	 * We have found a new contiguous block.
466 	 *
467 	 * First we have to replace the old block pointers with the new
468 	 * block pointers in the inode and indirect blocks associated
469 	 * with the file.
470 	 */
471 #ifdef DEBUG
472 	if (prtrealloc)
473 		printf("realloc: ino %d, lbns %d-%d\n\told:", ip->i_number,
474 		    start_lbn, end_lbn);
475 #endif
476 	blkno = newblk;
477 	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
478 		if (i == ssize) {
479 			bap = ebap;
480 			soff = -i;
481 		}
482 #ifdef DIAGNOSTIC
483 		if (!ffs_checkblk(ip,
484 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
485 			panic("ffs_reallocblks: unallocated block 2");
486 		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
487 			panic("ffs_reallocblks: alloc mismatch");
488 #endif
489 #ifdef DEBUG
490 		if (prtrealloc)
491 			printf(" %d,", *bap);
492 #endif
493 		if (DOINGSOFTDEP(vp)) {
494 			if (sbap == &ip->i_db[0] && i < ssize)
495 				softdep_setup_allocdirect(ip, start_lbn + i,
496 				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
497 				    buflist->bs_children[i]);
498 			else
499 				softdep_setup_allocindir_page(ip, start_lbn + i,
500 				    i < ssize ? sbp : ebp, soff + i, blkno,
501 				    *bap, buflist->bs_children[i]);
502 		}
503 		*bap++ = blkno;
504 	}
505 	/*
506 	 * Next we must write out the modified inode and indirect blocks.
507 	 * For strict correctness, the writes should be synchronous since
508 	 * the old block values may have been written to disk. In practise
509 	 * they are almost never written, but if we are concerned about
510 	 * strict correctness, the `doasyncfree' flag should be set to zero.
511 	 *
512 	 * The test on `doasyncfree' should be changed to test a flag
513 	 * that shows whether the associated buffers and inodes have
514 	 * been written. The flag should be set when the cluster is
515 	 * started and cleared whenever the buffer or inode is flushed.
516 	 * We can then check below to see if it is set, and do the
517 	 * synchronous write only when it has been cleared.
518 	 */
519 	if (sbap != &ip->i_db[0]) {
520 		if (doasyncfree)
521 			bdwrite(sbp);
522 		else
523 			bwrite(sbp);
524 	} else {
525 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
526 		if (!doasyncfree)
527 			UFS_UPDATE(vp, 1);
528 	}
529 	if (ssize < len) {
530 		if (doasyncfree)
531 			bdwrite(ebp);
532 		else
533 			bwrite(ebp);
534 	}
535 	/*
536 	 * Last, free the old blocks and assign the new blocks to the buffers.
537 	 */
538 #ifdef DEBUG
539 	if (prtrealloc)
540 		printf("\n\tnew:");
541 #endif
542 	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
543 		if (!DOINGSOFTDEP(vp))
544 			ffs_blkfree(fs, ip->i_devvp,
545 			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
546 			    fs->fs_bsize, ip->i_number);
547 		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
548 #ifdef DIAGNOSTIC
549 		if (!ffs_checkblk(ip,
550 		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
551 			panic("ffs_reallocblks: unallocated block 3");
552 #endif
553 #ifdef DEBUG
554 		if (prtrealloc)
555 			printf(" %d,", blkno);
556 #endif
557 	}
558 #ifdef DEBUG
559 	if (prtrealloc) {
560 		prtrealloc--;
561 		printf("\n");
562 	}
563 #endif
564 	return (0);
565 
566 fail:
567 	if (ssize < len)
568 		brelse(ebp);
569 	if (sbap != &ip->i_db[0])
570 		brelse(sbp);
571 	return (ENOSPC);
572 }
573 
574 /*
575  * Allocate an inode in the file system.
576  *
577  * If allocating a directory, use ffs_dirpref to select the inode.
578  * If allocating in a directory, the following hierarchy is followed:
579  *   1) allocate the preferred inode.
580  *   2) allocate an inode in the same cylinder group.
581  *   3) quadradically rehash into other cylinder groups, until an
582  *      available inode is located.
583  * If no inode preference is given the following heirarchy is used
584  * to allocate an inode:
585  *   1) allocate an inode in cylinder group 0.
586  *   2) quadradically rehash into other cylinder groups, until an
587  *      available inode is located.
588  */
589 int
590 ffs_valloc(pvp, mode, cred, vpp)
591 	struct vnode *pvp;
592 	int mode;
593 	struct ucred *cred;
594 	struct vnode **vpp;
595 {
596 	register struct inode *pip;
597 	register struct fs *fs;
598 	register struct inode *ip;
599 	ino_t ino, ipref;
600 	int cg, error;
601 
602 	*vpp = NULL;
603 	pip = VTOI(pvp);
604 	fs = pip->i_fs;
605 	if (fs->fs_cstotal.cs_nifree == 0)
606 		goto noinodes;
607 
608 	if ((mode & IFMT) == IFDIR)
609 		ipref = ffs_dirpref(pip);
610 	else
611 		ipref = pip->i_number;
612 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
613 		ipref = 0;
614 	cg = ino_to_cg(fs, ipref);
615 	/*
616 	 * Track number of dirs created one after another
617 	 * in a same cg without intervening by files.
618 	 */
619 	if ((mode & IFMT) == IFDIR) {
620 		if (fs->fs_contigdirs[cg] < 255)
621 			fs->fs_contigdirs[cg]++;
622 	} else {
623 		if (fs->fs_contigdirs[cg] > 0)
624 			fs->fs_contigdirs[cg]--;
625 	}
626 	ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode,
627 					(allocfcn_t *)ffs_nodealloccg);
628 	if (ino == 0)
629 		goto noinodes;
630 	error = VFS_VGET(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
631 	if (error) {
632 		UFS_VFREE(pvp, ino, mode);
633 		return (error);
634 	}
635 	ip = VTOI(*vpp);
636 	if (ip->i_mode) {
637 		printf("mode = 0%o, inum = %lu, fs = %s\n",
638 		    ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt);
639 		panic("ffs_valloc: dup alloc");
640 	}
641 	if (ip->i_blocks && (fs->fs_flags & FS_UNCLEAN) == 0) {	    /* XXX */
642 		printf("free inode %s/%lu had %ld blocks\n",
643 		    fs->fs_fsmnt, (u_long)ino, (long)ip->i_blocks);
644 		ip->i_blocks = 0;
645 	}
646 	ip->i_flags = 0;
647 	/*
648 	 * Set up a new generation number for this inode.
649 	 */
650 	if (ip->i_gen == 0 || ++ip->i_gen == 0)
651 		ip->i_gen = random() / 2 + 1;
652 	return (0);
653 noinodes:
654 	ffs_fserr(fs, pip->i_number, "out of inodes");
655 	uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
656 	return (ENOSPC);
657 }
658 
659 /*
660  * Find a cylinder group to place a directory.
661  *
662  * The policy implemented by this algorithm is to allocate a
663  * directory inode in the same cylinder group as its parent
664  * directory, but also to reserve space for its files inodes
665  * and data. Restrict the number of directories which may be
666  * allocated one after another in the same cylinder group
667  * without intervening allocation of files.
668  *
669  * If we allocate a first level directory then force allocation
670  * in another cylinder group.
671  */
672 static ino_t
673 ffs_dirpref(pip)
674 	struct inode *pip;
675 {
676 	register struct fs *fs;
677 	int cg, prefcg, dirsize, cgsize;
678 	int avgifree, avgbfree, avgndir, curdirsize;
679 	int minifree, minbfree, maxndir;
680 	int mincg, minndir;
681 	int maxcontigdirs;
682 
683 	fs = pip->i_fs;
684 
685 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
686 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
687 	avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
688 
689 	/*
690 	 * Force allocation in another cg if creating a first level dir.
691 	 */
692 	if (ITOV(pip)->v_flag & VROOT) {
693 		prefcg = arc4random() % fs->fs_ncg;
694 		mincg = prefcg;
695 		minndir = fs->fs_ipg;
696 		for (cg = prefcg; cg < fs->fs_ncg; cg++)
697 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
698 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
699 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
700 				mincg = cg;
701 				minndir = fs->fs_cs(fs, cg).cs_ndir;
702 			}
703 		for (cg = 0; cg < prefcg; cg++)
704 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
705 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
706 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
707 				mincg = cg;
708 				minndir = fs->fs_cs(fs, cg).cs_ndir;
709 			}
710 		return ((ino_t)(fs->fs_ipg * mincg));
711 	}
712 
713 	/*
714 	 * Count various limits which used for
715 	 * optimal allocation of a directory inode.
716 	 */
717 	maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
718 	minifree = avgifree - fs->fs_ipg / 4;
719 	if (minifree < 0)
720 		minifree = 0;
721 	minbfree = avgbfree - fs->fs_fpg / fs->fs_frag / 4;
722 	if (minbfree < 0)
723 		minbfree = 0;
724 	cgsize = fs->fs_fsize * fs->fs_fpg;
725 	dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
726 	curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
727 	if (dirsize < curdirsize)
728 		dirsize = curdirsize;
729 	maxcontigdirs = min(cgsize / dirsize, 255);
730 	if (fs->fs_avgfpdir > 0)
731 		maxcontigdirs = min(maxcontigdirs,
732 				    fs->fs_ipg / fs->fs_avgfpdir);
733 	if (maxcontigdirs == 0)
734 		maxcontigdirs = 1;
735 
736 	/*
737 	 * Limit number of dirs in one cg and reserve space for
738 	 * regular files, but only if we have no deficit in
739 	 * inodes or space.
740 	 */
741 	prefcg = ino_to_cg(fs, pip->i_number);
742 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
743 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
744 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
745 	    	    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
746 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
747 				return ((ino_t)(fs->fs_ipg * cg));
748 		}
749 	for (cg = 0; cg < prefcg; cg++)
750 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
751 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
752 	    	    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
753 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
754 				return ((ino_t)(fs->fs_ipg * cg));
755 		}
756 	/*
757 	 * This is a backstop when we have deficit in space.
758 	 */
759 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
760 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
761 			return ((ino_t)(fs->fs_ipg * cg));
762 	for (cg = 0; cg < prefcg; cg++)
763 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
764 			break;
765 	return ((ino_t)(fs->fs_ipg * cg));
766 }
767 
768 /*
769  * Select the desired position for the next block in a file.  The file is
770  * logically divided into sections. The first section is composed of the
771  * direct blocks. Each additional section contains fs_maxbpg blocks.
772  *
773  * If no blocks have been allocated in the first section, the policy is to
774  * request a block in the same cylinder group as the inode that describes
775  * the file. If no blocks have been allocated in any other section, the
776  * policy is to place the section in a cylinder group with a greater than
777  * average number of free blocks.  An appropriate cylinder group is found
778  * by using a rotor that sweeps the cylinder groups. When a new group of
779  * blocks is needed, the sweep begins in the cylinder group following the
780  * cylinder group from which the previous allocation was made. The sweep
781  * continues until a cylinder group with greater than the average number
782  * of free blocks is found. If the allocation is for the first block in an
783  * indirect block, the information on the previous allocation is unavailable;
784  * here a best guess is made based upon the logical block number being
785  * allocated.
786  *
787  * If a section is already partially allocated, the policy is to
788  * contiguously allocate fs_maxcontig blocks.  The end of one of these
789  * contiguous blocks and the beginning of the next is physically separated
790  * so that the disk head will be in transit between them for at least
791  * fs_rotdelay milliseconds.  This is to allow time for the processor to
792  * schedule another I/O transfer.
793  */
794 ufs_daddr_t
795 ffs_blkpref(ip, lbn, indx, bap)
796 	struct inode *ip;
797 	ufs_daddr_t lbn;
798 	int indx;
799 	ufs_daddr_t *bap;
800 {
801 	register struct fs *fs;
802 	register int cg;
803 	int avgbfree, startcg;
804 	ufs_daddr_t nextblk;
805 
806 	fs = ip->i_fs;
807 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
808 		if (lbn < NDADDR + NINDIR(fs)) {
809 			cg = ino_to_cg(fs, ip->i_number);
810 			return (fs->fs_fpg * cg + fs->fs_frag);
811 		}
812 		/*
813 		 * Find a cylinder with greater than average number of
814 		 * unused data blocks.
815 		 */
816 		if (indx == 0 || bap[indx - 1] == 0)
817 			startcg =
818 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
819 		else
820 			startcg = dtog(fs, bap[indx - 1]) + 1;
821 		startcg %= fs->fs_ncg;
822 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
823 		for (cg = startcg; cg < fs->fs_ncg; cg++)
824 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
825 				fs->fs_cgrotor = cg;
826 				return (fs->fs_fpg * cg + fs->fs_frag);
827 			}
828 		for (cg = 0; cg <= startcg; cg++)
829 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
830 				fs->fs_cgrotor = cg;
831 				return (fs->fs_fpg * cg + fs->fs_frag);
832 			}
833 		return (0);
834 	}
835 	/*
836 	 * One or more previous blocks have been laid out. If less
837 	 * than fs_maxcontig previous blocks are contiguous, the
838 	 * next block is requested contiguously, otherwise it is
839 	 * requested rotationally delayed by fs_rotdelay milliseconds.
840 	 */
841 	nextblk = bap[indx - 1] + fs->fs_frag;
842 	if (fs->fs_rotdelay == 0 || indx < fs->fs_maxcontig ||
843 	    bap[indx - fs->fs_maxcontig] +
844 	    blkstofrags(fs, fs->fs_maxcontig) != nextblk)
845 		return (nextblk);
846 	/*
847 	 * Here we convert ms of delay to frags as:
848 	 * (frags) = (ms) * (rev/sec) * (sect/rev) /
849 	 *	((sect/frag) * (ms/sec))
850 	 * then round up to the next block.
851 	 */
852 	nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
853 	    (NSPF(fs) * 1000), fs->fs_frag);
854 	return (nextblk);
855 }
856 
857 /*
858  * Implement the cylinder overflow algorithm.
859  *
860  * The policy implemented by this algorithm is:
861  *   1) allocate the block in its requested cylinder group.
862  *   2) quadradically rehash on the cylinder group number.
863  *   3) brute force search for a free block.
864  */
865 /*VARARGS5*/
866 static u_long
867 ffs_hashalloc(ip, cg, pref, size, allocator)
868 	struct inode *ip;
869 	int cg;
870 	long pref;
871 	int size;	/* size for data blocks, mode for inodes */
872 	allocfcn_t *allocator;
873 {
874 	register struct fs *fs;
875 	long result;	/* XXX why not same type as we return? */
876 	int i, icg = cg;
877 
878 #ifdef DIAGNOSTIC
879 	if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
880 		panic("ffs_hashalloc: allocation on suspended filesystem");
881 #endif
882 	fs = ip->i_fs;
883 	/*
884 	 * 1: preferred cylinder group
885 	 */
886 	result = (*allocator)(ip, cg, pref, size);
887 	if (result)
888 		return (result);
889 	/*
890 	 * 2: quadratic rehash
891 	 */
892 	for (i = 1; i < fs->fs_ncg; i *= 2) {
893 		cg += i;
894 		if (cg >= fs->fs_ncg)
895 			cg -= fs->fs_ncg;
896 		result = (*allocator)(ip, cg, 0, size);
897 		if (result)
898 			return (result);
899 	}
900 	/*
901 	 * 3: brute force search
902 	 * Note that we start at i == 2, since 0 was checked initially,
903 	 * and 1 is always checked in the quadratic rehash.
904 	 */
905 	cg = (icg + 2) % fs->fs_ncg;
906 	for (i = 2; i < fs->fs_ncg; i++) {
907 		result = (*allocator)(ip, cg, 0, size);
908 		if (result)
909 			return (result);
910 		cg++;
911 		if (cg == fs->fs_ncg)
912 			cg = 0;
913 	}
914 	return (0);
915 }
916 
917 /*
918  * Determine whether a fragment can be extended.
919  *
920  * Check to see if the necessary fragments are available, and
921  * if they are, allocate them.
922  */
923 static ufs_daddr_t
924 ffs_fragextend(ip, cg, bprev, osize, nsize)
925 	struct inode *ip;
926 	int cg;
927 	long bprev;
928 	int osize, nsize;
929 {
930 	register struct fs *fs;
931 	register struct cg *cgp;
932 	struct buf *bp;
933 	long bno;
934 	int frags, bbase;
935 	int i, error;
936 	u_int8_t *blksfree;
937 
938 	fs = ip->i_fs;
939 	if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
940 		return (0);
941 	frags = numfrags(fs, nsize);
942 	bbase = fragnum(fs, bprev);
943 	if (bbase > fragnum(fs, (bprev + frags - 1))) {
944 		/* cannot extend across a block boundary */
945 		return (0);
946 	}
947 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
948 		(int)fs->fs_cgsize, NOCRED, &bp);
949 	if (error) {
950 		brelse(bp);
951 		return (0);
952 	}
953 	cgp = (struct cg *)bp->b_data;
954 	if (!cg_chkmagic(cgp)) {
955 		brelse(bp);
956 		return (0);
957 	}
958 	bp->b_xflags |= BX_BKGRDWRITE;
959 	cgp->cg_time = time_second;
960 	bno = dtogd(fs, bprev);
961 	blksfree = cg_blksfree(cgp);
962 	for (i = numfrags(fs, osize); i < frags; i++)
963 		if (isclr(blksfree, bno + i)) {
964 			brelse(bp);
965 			return (0);
966 		}
967 	/*
968 	 * the current fragment can be extended
969 	 * deduct the count on fragment being extended into
970 	 * increase the count on the remaining fragment (if any)
971 	 * allocate the extended piece
972 	 */
973 	for (i = frags; i < fs->fs_frag - bbase; i++)
974 		if (isclr(blksfree, bno + i))
975 			break;
976 	cgp->cg_frsum[i - numfrags(fs, osize)]--;
977 	if (i != frags)
978 		cgp->cg_frsum[i - frags]++;
979 	for (i = numfrags(fs, osize); i < frags; i++) {
980 		clrbit(blksfree, bno + i);
981 		cgp->cg_cs.cs_nffree--;
982 		fs->fs_cstotal.cs_nffree--;
983 		fs->fs_cs(fs, cg).cs_nffree--;
984 	}
985 	fs->fs_fmod = 1;
986 	if (DOINGSOFTDEP(ITOV(ip)))
987 		softdep_setup_blkmapdep(bp, fs, bprev);
988 	if (fs->fs_active != 0)
989 		atomic_clear_int(&ACTIVECGNUM(fs, cg), ACTIVECGOFF(cg));
990 	bdwrite(bp);
991 	return (bprev);
992 }
993 
994 /*
995  * Determine whether a block can be allocated.
996  *
997  * Check to see if a block of the appropriate size is available,
998  * and if it is, allocate it.
999  */
1000 static ufs_daddr_t
1001 ffs_alloccg(ip, cg, bpref, size)
1002 	struct inode *ip;
1003 	int cg;
1004 	ufs_daddr_t bpref;
1005 	int size;
1006 {
1007 	register struct fs *fs;
1008 	register struct cg *cgp;
1009 	struct buf *bp;
1010 	register int i;
1011 	ufs_daddr_t bno, blkno;
1012 	int allocsiz, error, frags;
1013 	u_int8_t *blksfree;
1014 
1015 	fs = ip->i_fs;
1016 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1017 		return (0);
1018 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1019 		(int)fs->fs_cgsize, NOCRED, &bp);
1020 	if (error) {
1021 		brelse(bp);
1022 		return (0);
1023 	}
1024 	cgp = (struct cg *)bp->b_data;
1025 	if (!cg_chkmagic(cgp) ||
1026 	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
1027 		brelse(bp);
1028 		return (0);
1029 	}
1030 	bp->b_xflags |= BX_BKGRDWRITE;
1031 	cgp->cg_time = time_second;
1032 	if (size == fs->fs_bsize) {
1033 		bno = ffs_alloccgblk(ip, bp, bpref);
1034 		if (fs->fs_active != 0)
1035 			atomic_clear_int(&ACTIVECGNUM(fs, cg), ACTIVECGOFF(cg));
1036 		bdwrite(bp);
1037 		return (bno);
1038 	}
1039 	/*
1040 	 * check to see if any fragments are already available
1041 	 * allocsiz is the size which will be allocated, hacking
1042 	 * it down to a smaller size if necessary
1043 	 */
1044 	blksfree = cg_blksfree(cgp);
1045 	frags = numfrags(fs, size);
1046 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1047 		if (cgp->cg_frsum[allocsiz] != 0)
1048 			break;
1049 	if (allocsiz == fs->fs_frag) {
1050 		/*
1051 		 * no fragments were available, so a block will be
1052 		 * allocated, and hacked up
1053 		 */
1054 		if (cgp->cg_cs.cs_nbfree == 0) {
1055 			brelse(bp);
1056 			return (0);
1057 		}
1058 		bno = ffs_alloccgblk(ip, bp, bpref);
1059 		bpref = dtogd(fs, bno);
1060 		for (i = frags; i < fs->fs_frag; i++)
1061 			setbit(blksfree, bpref + i);
1062 		i = fs->fs_frag - frags;
1063 		cgp->cg_cs.cs_nffree += i;
1064 		fs->fs_cstotal.cs_nffree += i;
1065 		fs->fs_cs(fs, cg).cs_nffree += i;
1066 		fs->fs_fmod = 1;
1067 		cgp->cg_frsum[i]++;
1068 		if (fs->fs_active != 0)
1069 			atomic_clear_int(&ACTIVECGNUM(fs, cg), ACTIVECGOFF(cg));
1070 		bdwrite(bp);
1071 		return (bno);
1072 	}
1073 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1074 	if (bno < 0) {
1075 		brelse(bp);
1076 		return (0);
1077 	}
1078 	for (i = 0; i < frags; i++)
1079 		clrbit(blksfree, bno + i);
1080 	cgp->cg_cs.cs_nffree -= frags;
1081 	fs->fs_cstotal.cs_nffree -= frags;
1082 	fs->fs_cs(fs, cg).cs_nffree -= frags;
1083 	fs->fs_fmod = 1;
1084 	cgp->cg_frsum[allocsiz]--;
1085 	if (frags != allocsiz)
1086 		cgp->cg_frsum[allocsiz - frags]++;
1087 	blkno = cg * fs->fs_fpg + bno;
1088 	if (DOINGSOFTDEP(ITOV(ip)))
1089 		softdep_setup_blkmapdep(bp, fs, blkno);
1090 	if (fs->fs_active != 0)
1091 		atomic_clear_int(&ACTIVECGNUM(fs, cg), ACTIVECGOFF(cg));
1092 	bdwrite(bp);
1093 	return ((u_long)blkno);
1094 }
1095 
1096 /*
1097  * Allocate a block in a cylinder group.
1098  *
1099  * This algorithm implements the following policy:
1100  *   1) allocate the requested block.
1101  *   2) allocate a rotationally optimal block in the same cylinder.
1102  *   3) allocate the next available block on the block rotor for the
1103  *      specified cylinder group.
1104  * Note that this routine only allocates fs_bsize blocks; these
1105  * blocks may be fragmented by the routine that allocates them.
1106  */
1107 static ufs_daddr_t
1108 ffs_alloccgblk(ip, bp, bpref)
1109 	struct inode *ip;
1110 	struct buf *bp;
1111 	ufs_daddr_t bpref;
1112 {
1113 	struct fs *fs;
1114 	struct cg *cgp;
1115 	ufs_daddr_t bno, blkno;
1116 	int cylno, pos, delta;
1117 	short *cylbp;
1118 	register int i;
1119 	u_int8_t *blksfree;
1120 
1121 	fs = ip->i_fs;
1122 	cgp = (struct cg *)bp->b_data;
1123 	blksfree = cg_blksfree(cgp);
1124 	if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) {
1125 		bpref = cgp->cg_rotor;
1126 		goto norot;
1127 	}
1128 	bpref = blknum(fs, bpref);
1129 	bpref = dtogd(fs, bpref);
1130 	/*
1131 	 * if the requested block is available, use it
1132 	 */
1133 	if (ffs_isblock(fs, blksfree, fragstoblks(fs, bpref))) {
1134 		bno = bpref;
1135 		goto gotit;
1136 	}
1137 	if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) {
1138 		/*
1139 		 * Block layout information is not available.
1140 		 * Leaving bpref unchanged means we take the
1141 		 * next available free block following the one
1142 		 * we just allocated. Hopefully this will at
1143 		 * least hit a track cache on drives of unknown
1144 		 * geometry (e.g. SCSI).
1145 		 */
1146 		goto norot;
1147 	}
1148 	/*
1149 	 * check for a block available on the same cylinder
1150 	 */
1151 	cylno = cbtocylno(fs, bpref);
1152 	if (cg_blktot(cgp)[cylno] == 0)
1153 		goto norot;
1154 	/*
1155 	 * check the summary information to see if a block is
1156 	 * available in the requested cylinder starting at the
1157 	 * requested rotational position and proceeding around.
1158 	 */
1159 	cylbp = cg_blks(fs, cgp, cylno);
1160 	pos = cbtorpos(fs, bpref);
1161 	for (i = pos; i < fs->fs_nrpos; i++)
1162 		if (cylbp[i] > 0)
1163 			break;
1164 	if (i == fs->fs_nrpos)
1165 		for (i = 0; i < pos; i++)
1166 			if (cylbp[i] > 0)
1167 				break;
1168 	if (cylbp[i] > 0) {
1169 		/*
1170 		 * found a rotational position, now find the actual
1171 		 * block. A panic if none is actually there.
1172 		 */
1173 		pos = cylno % fs->fs_cpc;
1174 		bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
1175 		if (fs_postbl(fs, pos)[i] == -1) {
1176 			printf("pos = %d, i = %d, fs = %s\n",
1177 			    pos, i, fs->fs_fsmnt);
1178 			panic("ffs_alloccgblk: cyl groups corrupted");
1179 		}
1180 		for (i = fs_postbl(fs, pos)[i];; ) {
1181 			if (ffs_isblock(fs, blksfree, bno + i)) {
1182 				bno = blkstofrags(fs, (bno + i));
1183 				goto gotit;
1184 			}
1185 			delta = fs_rotbl(fs)[i];
1186 			if (delta <= 0 ||
1187 			    delta + i > fragstoblks(fs, fs->fs_fpg))
1188 				break;
1189 			i += delta;
1190 		}
1191 		printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
1192 		panic("ffs_alloccgblk: can't find blk in cyl");
1193 	}
1194 norot:
1195 	/*
1196 	 * no blocks in the requested cylinder, so take next
1197 	 * available one in this cylinder group.
1198 	 */
1199 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1200 	if (bno < 0)
1201 		return (0);
1202 	cgp->cg_rotor = bno;
1203 gotit:
1204 	blkno = fragstoblks(fs, bno);
1205 	ffs_clrblock(fs, blksfree, (long)blkno);
1206 	ffs_clusteracct(fs, cgp, blkno, -1);
1207 	cgp->cg_cs.cs_nbfree--;
1208 	fs->fs_cstotal.cs_nbfree--;
1209 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1210 	cylno = cbtocylno(fs, bno);
1211 	cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--;
1212 	cg_blktot(cgp)[cylno]--;
1213 	fs->fs_fmod = 1;
1214 	blkno = cgp->cg_cgx * fs->fs_fpg + bno;
1215 	if (DOINGSOFTDEP(ITOV(ip)))
1216 		softdep_setup_blkmapdep(bp, fs, blkno);
1217 	return (blkno);
1218 }
1219 
1220 /*
1221  * Determine whether a cluster can be allocated.
1222  *
1223  * We do not currently check for optimal rotational layout if there
1224  * are multiple choices in the same cylinder group. Instead we just
1225  * take the first one that we find following bpref.
1226  */
1227 static ufs_daddr_t
1228 ffs_clusteralloc(ip, cg, bpref, len)
1229 	struct inode *ip;
1230 	int cg;
1231 	ufs_daddr_t bpref;
1232 	int len;
1233 {
1234 	register struct fs *fs;
1235 	register struct cg *cgp;
1236 	struct buf *bp;
1237 	int i, got, run, bno, bit, map;
1238 	u_char *mapp;
1239 	int32_t *lp;
1240 	u_int8_t *blksfree;
1241 
1242 	fs = ip->i_fs;
1243 	if (fs->fs_maxcluster[cg] < len)
1244 		return (0);
1245 	if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
1246 	    NOCRED, &bp))
1247 		goto fail;
1248 	cgp = (struct cg *)bp->b_data;
1249 	if (!cg_chkmagic(cgp))
1250 		goto fail;
1251 	bp->b_xflags |= BX_BKGRDWRITE;
1252 	/*
1253 	 * Check to see if a cluster of the needed size (or bigger) is
1254 	 * available in this cylinder group.
1255 	 */
1256 	lp = &cg_clustersum(cgp)[len];
1257 	for (i = len; i <= fs->fs_contigsumsize; i++)
1258 		if (*lp++ > 0)
1259 			break;
1260 	if (i > fs->fs_contigsumsize) {
1261 		/*
1262 		 * This is the first time looking for a cluster in this
1263 		 * cylinder group. Update the cluster summary information
1264 		 * to reflect the true maximum sized cluster so that
1265 		 * future cluster allocation requests can avoid reading
1266 		 * the cylinder group map only to find no clusters.
1267 		 */
1268 		lp = &cg_clustersum(cgp)[len - 1];
1269 		for (i = len - 1; i > 0; i--)
1270 			if (*lp-- > 0)
1271 				break;
1272 		fs->fs_maxcluster[cg] = i;
1273 		goto fail;
1274 	}
1275 	/*
1276 	 * Search the cluster map to find a big enough cluster.
1277 	 * We take the first one that we find, even if it is larger
1278 	 * than we need as we prefer to get one close to the previous
1279 	 * block allocation. We do not search before the current
1280 	 * preference point as we do not want to allocate a block
1281 	 * that is allocated before the previous one (as we will
1282 	 * then have to wait for another pass of the elevator
1283 	 * algorithm before it will be read). We prefer to fail and
1284 	 * be recalled to try an allocation in the next cylinder group.
1285 	 */
1286 	if (dtog(fs, bpref) != cg)
1287 		bpref = 0;
1288 	else
1289 		bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref)));
1290 	mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1291 	map = *mapp++;
1292 	bit = 1 << (bpref % NBBY);
1293 	for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1294 		if ((map & bit) == 0) {
1295 			run = 0;
1296 		} else {
1297 			run++;
1298 			if (run == len)
1299 				break;
1300 		}
1301 		if ((got & (NBBY - 1)) != (NBBY - 1)) {
1302 			bit <<= 1;
1303 		} else {
1304 			map = *mapp++;
1305 			bit = 1;
1306 		}
1307 	}
1308 	if (got >= cgp->cg_nclusterblks)
1309 		goto fail;
1310 	/*
1311 	 * Allocate the cluster that we have found.
1312 	 */
1313 	blksfree = cg_blksfree(cgp);
1314 	for (i = 1; i <= len; i++)
1315 		if (!ffs_isblock(fs, blksfree, got - run + i))
1316 			panic("ffs_clusteralloc: map mismatch");
1317 	bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1);
1318 	if (dtog(fs, bno) != cg)
1319 		panic("ffs_clusteralloc: allocated out of group");
1320 	len = blkstofrags(fs, len);
1321 	for (i = 0; i < len; i += fs->fs_frag)
1322 		if ((got = ffs_alloccgblk(ip, bp, bno + i)) != bno + i)
1323 			panic("ffs_clusteralloc: lost block");
1324 	if (fs->fs_active != 0)
1325 		atomic_clear_int(&ACTIVECGNUM(fs, cg), ACTIVECGOFF(cg));
1326 	bdwrite(bp);
1327 	return (bno);
1328 
1329 fail:
1330 	brelse(bp);
1331 	return (0);
1332 }
1333 
1334 /*
1335  * Determine whether an inode can be allocated.
1336  *
1337  * Check to see if an inode is available, and if it is,
1338  * allocate it using the following policy:
1339  *   1) allocate the requested inode.
1340  *   2) allocate the next available inode after the requested
1341  *      inode in the specified cylinder group.
1342  */
1343 static ino_t
1344 ffs_nodealloccg(ip, cg, ipref, mode)
1345 	struct inode *ip;
1346 	int cg;
1347 	ufs_daddr_t ipref;
1348 	int mode;
1349 {
1350 	register struct fs *fs;
1351 	register struct cg *cgp;
1352 	struct buf *bp;
1353 	u_int8_t *inosused;
1354 	int error, start, len, loc, map, i;
1355 
1356 	fs = ip->i_fs;
1357 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
1358 		return (0);
1359 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1360 		(int)fs->fs_cgsize, NOCRED, &bp);
1361 	if (error) {
1362 		brelse(bp);
1363 		return (0);
1364 	}
1365 	cgp = (struct cg *)bp->b_data;
1366 	if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
1367 		brelse(bp);
1368 		return (0);
1369 	}
1370 	bp->b_xflags |= BX_BKGRDWRITE;
1371 	cgp->cg_time = time_second;
1372 	inosused = cg_inosused(cgp);
1373 	if (ipref) {
1374 		ipref %= fs->fs_ipg;
1375 		if (isclr(inosused, ipref))
1376 			goto gotit;
1377 	}
1378 	start = cgp->cg_irotor / NBBY;
1379 	len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
1380 	loc = skpc(0xff, len, &inosused[start]);
1381 	if (loc == 0) {
1382 		len = start + 1;
1383 		start = 0;
1384 		loc = skpc(0xff, len, &inosused[0]);
1385 		if (loc == 0) {
1386 			printf("cg = %d, irotor = %ld, fs = %s\n",
1387 			    cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
1388 			panic("ffs_nodealloccg: map corrupted");
1389 			/* NOTREACHED */
1390 		}
1391 	}
1392 	i = start + len - loc;
1393 	map = inosused[i];
1394 	ipref = i * NBBY;
1395 	for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
1396 		if ((map & i) == 0) {
1397 			cgp->cg_irotor = ipref;
1398 			goto gotit;
1399 		}
1400 	}
1401 	printf("fs = %s\n", fs->fs_fsmnt);
1402 	panic("ffs_nodealloccg: block not in map");
1403 	/* NOTREACHED */
1404 gotit:
1405 	if (DOINGSOFTDEP(ITOV(ip)))
1406 		softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref);
1407 	setbit(inosused, ipref);
1408 	cgp->cg_cs.cs_nifree--;
1409 	fs->fs_cstotal.cs_nifree--;
1410 	fs->fs_cs(fs, cg).cs_nifree--;
1411 	fs->fs_fmod = 1;
1412 	if ((mode & IFMT) == IFDIR) {
1413 		cgp->cg_cs.cs_ndir++;
1414 		fs->fs_cstotal.cs_ndir++;
1415 		fs->fs_cs(fs, cg).cs_ndir++;
1416 	}
1417 	if (fs->fs_active != 0)
1418 		atomic_clear_int(&ACTIVECGNUM(fs, cg), ACTIVECGOFF(cg));
1419 	bdwrite(bp);
1420 	return (cg * fs->fs_ipg + ipref);
1421 }
1422 
1423 /*
1424  * Free a block or fragment.
1425  *
1426  * The specified block or fragment is placed back in the
1427  * free map. If a fragment is deallocated, a possible
1428  * block reassembly is checked.
1429  */
1430 void
1431 ffs_blkfree(fs, devvp, bno, size, inum)
1432 	struct fs *fs;
1433 	struct vnode *devvp;
1434 	ufs_daddr_t bno;
1435 	long size;
1436 	ino_t inum;
1437 {
1438 	struct cg *cgp;
1439 	struct buf *bp;
1440 	ufs_daddr_t fragno, cgbno;
1441 	int i, error, cg, blk, frags, bbase;
1442 	u_int8_t *blksfree;
1443 	dev_t dev;
1444 
1445 	cg = dtog(fs, bno);
1446 	if (devvp->v_type != VCHR) {
1447 		/* devvp is a snapshot */
1448 		dev = VTOI(devvp)->i_devvp->v_rdev;
1449 		cgbno = fragstoblks(fs, cgtod(fs, cg));
1450 	} else {
1451 		/* devvp is a normal disk device */
1452 		dev = devvp->v_rdev;
1453 		cgbno = fsbtodb(fs, cgtod(fs, cg));
1454 		if ((devvp->v_flag & VCOPYONWRITE) &&
1455 		    ffs_snapblkfree(fs, devvp, bno, size, inum))
1456 			return;
1457 		VOP_FREEBLKS(devvp, fsbtodb(fs, bno), size);
1458 	}
1459 #ifdef DIAGNOSTIC
1460 	if (dev->si_mountpoint &&
1461 	    (dev->si_mountpoint->mnt_kern_flag & MNTK_SUSPENDED))
1462 		panic("ffs_blkfree: deallocation on suspended filesystem");
1463 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
1464 	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
1465 		printf("dev=%s, bno = %ld, bsize = %ld, size = %ld, fs = %s\n",
1466 		    devtoname(dev), (long)bno, (long)fs->fs_bsize,
1467 		    size, fs->fs_fsmnt);
1468 		panic("ffs_blkfree: bad size");
1469 	}
1470 #endif
1471 	if ((u_int)bno >= fs->fs_size) {
1472 		printf("bad block %ld, ino %lu\n", (long)bno, (u_long)inum);
1473 		ffs_fserr(fs, inum, "bad block");
1474 		return;
1475 	}
1476 	if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) {
1477 		brelse(bp);
1478 		return;
1479 	}
1480 	cgp = (struct cg *)bp->b_data;
1481 	if (!cg_chkmagic(cgp)) {
1482 		brelse(bp);
1483 		return;
1484 	}
1485 	bp->b_xflags |= BX_BKGRDWRITE;
1486 	cgp->cg_time = time_second;
1487 	cgbno = dtogd(fs, bno);
1488 	blksfree = cg_blksfree(cgp);
1489 	if (size == fs->fs_bsize) {
1490 		fragno = fragstoblks(fs, cgbno);
1491 		if (!ffs_isfreeblock(fs, blksfree, fragno)) {
1492 			if (devvp->v_type != VCHR) {
1493 				/* devvp is a snapshot */
1494 				brelse(bp);
1495 				return;
1496 			}
1497 			printf("dev = %s, block = %ld, fs = %s\n",
1498 			    devtoname(dev), (long)bno, fs->fs_fsmnt);
1499 			panic("ffs_blkfree: freeing free block");
1500 		}
1501 		ffs_setblock(fs, blksfree, fragno);
1502 		ffs_clusteracct(fs, cgp, fragno, 1);
1503 		cgp->cg_cs.cs_nbfree++;
1504 		fs->fs_cstotal.cs_nbfree++;
1505 		fs->fs_cs(fs, cg).cs_nbfree++;
1506 		i = cbtocylno(fs, cgbno);
1507 		cg_blks(fs, cgp, i)[cbtorpos(fs, cgbno)]++;
1508 		cg_blktot(cgp)[i]++;
1509 	} else {
1510 		bbase = cgbno - fragnum(fs, cgbno);
1511 		/*
1512 		 * decrement the counts associated with the old frags
1513 		 */
1514 		blk = blkmap(fs, blksfree, bbase);
1515 		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
1516 		/*
1517 		 * deallocate the fragment
1518 		 */
1519 		frags = numfrags(fs, size);
1520 		for (i = 0; i < frags; i++) {
1521 			if (isset(blksfree, cgbno + i)) {
1522 				printf("dev = %s, block = %ld, fs = %s\n",
1523 				    devtoname(dev), (long)(bno + i),
1524 				    fs->fs_fsmnt);
1525 				panic("ffs_blkfree: freeing free frag");
1526 			}
1527 			setbit(blksfree, cgbno + i);
1528 		}
1529 		cgp->cg_cs.cs_nffree += i;
1530 		fs->fs_cstotal.cs_nffree += i;
1531 		fs->fs_cs(fs, cg).cs_nffree += i;
1532 		/*
1533 		 * add back in counts associated with the new frags
1534 		 */
1535 		blk = blkmap(fs, blksfree, bbase);
1536 		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
1537 		/*
1538 		 * if a complete block has been reassembled, account for it
1539 		 */
1540 		fragno = fragstoblks(fs, bbase);
1541 		if (ffs_isblock(fs, blksfree, fragno)) {
1542 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
1543 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
1544 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
1545 			ffs_clusteracct(fs, cgp, fragno, 1);
1546 			cgp->cg_cs.cs_nbfree++;
1547 			fs->fs_cstotal.cs_nbfree++;
1548 			fs->fs_cs(fs, cg).cs_nbfree++;
1549 			i = cbtocylno(fs, bbase);
1550 			cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++;
1551 			cg_blktot(cgp)[i]++;
1552 		}
1553 	}
1554 	fs->fs_fmod = 1;
1555 	if (fs->fs_active != 0)
1556 		atomic_clear_int(&ACTIVECGNUM(fs, cg), ACTIVECGOFF(cg));
1557 	bdwrite(bp);
1558 }
1559 
1560 #ifdef DIAGNOSTIC
1561 /*
1562  * Verify allocation of a block or fragment. Returns true if block or
1563  * fragment is allocated, false if it is free.
1564  */
1565 static int
1566 ffs_checkblk(ip, bno, size)
1567 	struct inode *ip;
1568 	ufs_daddr_t bno;
1569 	long size;
1570 {
1571 	struct fs *fs;
1572 	struct cg *cgp;
1573 	struct buf *bp;
1574 	int i, error, frags, free;
1575 	u_int8_t *blksfree;
1576 
1577 	fs = ip->i_fs;
1578 	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
1579 		printf("bsize = %ld, size = %ld, fs = %s\n",
1580 		    (long)fs->fs_bsize, size, fs->fs_fsmnt);
1581 		panic("ffs_checkblk: bad size");
1582 	}
1583 	if ((u_int)bno >= fs->fs_size)
1584 		panic("ffs_checkblk: bad block %d", bno);
1585 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))),
1586 		(int)fs->fs_cgsize, NOCRED, &bp);
1587 	if (error)
1588 		panic("ffs_checkblk: cg bread failed");
1589 	cgp = (struct cg *)bp->b_data;
1590 	if (!cg_chkmagic(cgp))
1591 		panic("ffs_checkblk: cg magic mismatch");
1592 	bp->b_xflags |= BX_BKGRDWRITE;
1593 	blksfree = cg_blksfree(cgp);
1594 	bno = dtogd(fs, bno);
1595 	if (size == fs->fs_bsize) {
1596 		free = ffs_isblock(fs, blksfree, fragstoblks(fs, bno));
1597 	} else {
1598 		frags = numfrags(fs, size);
1599 		for (free = 0, i = 0; i < frags; i++)
1600 			if (isset(blksfree, bno + i))
1601 				free++;
1602 		if (free != 0 && free != frags)
1603 			panic("ffs_checkblk: partially free fragment");
1604 	}
1605 	brelse(bp);
1606 	return (!free);
1607 }
1608 #endif /* DIAGNOSTIC */
1609 
1610 /*
1611  * Free an inode.
1612  */
1613 int
1614 ffs_vfree(pvp, ino, mode)
1615 	struct vnode *pvp;
1616 	ino_t ino;
1617 	int mode;
1618 {
1619 	if (DOINGSOFTDEP(pvp)) {
1620 		softdep_freefile(pvp, ino, mode);
1621 		return (0);
1622 	}
1623 	return (ffs_freefile(VTOI(pvp)->i_fs, VTOI(pvp)->i_devvp, ino, mode));
1624 }
1625 
1626 /*
1627  * Do the actual free operation.
1628  * The specified inode is placed back in the free map.
1629  */
1630 int
1631 ffs_freefile(fs, devvp, ino, mode)
1632 	struct fs *fs;
1633 	struct vnode *devvp;
1634 	ino_t ino;
1635 	int mode;
1636 {
1637 	struct cg *cgp;
1638 	struct buf *bp;
1639 	int error, cgbno, cg;
1640 	u_int8_t *inosused;
1641 	dev_t dev;
1642 
1643 	cg = ino_to_cg(fs, ino);
1644 	if (devvp->v_type != VCHR) {
1645 		/* devvp is a snapshot */
1646 		dev = VTOI(devvp)->i_devvp->v_rdev;
1647 		cgbno = fragstoblks(fs, cgtod(fs, cg));
1648 	} else {
1649 		/* devvp is a normal disk device */
1650 		dev = devvp->v_rdev;
1651 		cgbno = fsbtodb(fs, cgtod(fs, cg));
1652 	}
1653 	if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg)
1654 		panic("ffs_vfree: range: dev = %s, ino = %d, fs = %s",
1655 		    devtoname(dev), ino, fs->fs_fsmnt);
1656 	if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) {
1657 		brelse(bp);
1658 		return (error);
1659 	}
1660 	cgp = (struct cg *)bp->b_data;
1661 	if (!cg_chkmagic(cgp)) {
1662 		brelse(bp);
1663 		return (0);
1664 	}
1665 	bp->b_xflags |= BX_BKGRDWRITE;
1666 	cgp->cg_time = time_second;
1667 	inosused = cg_inosused(cgp);
1668 	ino %= fs->fs_ipg;
1669 	if (isclr(inosused, ino)) {
1670 		printf("dev = %s, ino = %lu, fs = %s\n", devtoname(dev),
1671 		    (u_long)ino + cg * fs->fs_ipg, fs->fs_fsmnt);
1672 		if (fs->fs_ronly == 0)
1673 			panic("ffs_vfree: freeing free inode");
1674 	}
1675 	clrbit(inosused, ino);
1676 	if (ino < cgp->cg_irotor)
1677 		cgp->cg_irotor = ino;
1678 	cgp->cg_cs.cs_nifree++;
1679 	fs->fs_cstotal.cs_nifree++;
1680 	fs->fs_cs(fs, cg).cs_nifree++;
1681 	if ((mode & IFMT) == IFDIR) {
1682 		cgp->cg_cs.cs_ndir--;
1683 		fs->fs_cstotal.cs_ndir--;
1684 		fs->fs_cs(fs, cg).cs_ndir--;
1685 	}
1686 	fs->fs_fmod = 1;
1687 	if (fs->fs_active != 0)
1688 		atomic_clear_int(&ACTIVECGNUM(fs, cg), ACTIVECGOFF(cg));
1689 	bdwrite(bp);
1690 	return (0);
1691 }
1692 
1693 /*
1694  * Find a block of the specified size in the specified cylinder group.
1695  *
1696  * It is a panic if a request is made to find a block if none are
1697  * available.
1698  */
1699 static ufs_daddr_t
1700 ffs_mapsearch(fs, cgp, bpref, allocsiz)
1701 	register struct fs *fs;
1702 	register struct cg *cgp;
1703 	ufs_daddr_t bpref;
1704 	int allocsiz;
1705 {
1706 	ufs_daddr_t bno;
1707 	int start, len, loc, i;
1708 	int blk, field, subfield, pos;
1709 	u_int8_t *blksfree;
1710 
1711 	/*
1712 	 * find the fragment by searching through the free block
1713 	 * map for an appropriate bit pattern
1714 	 */
1715 	if (bpref)
1716 		start = dtogd(fs, bpref) / NBBY;
1717 	else
1718 		start = cgp->cg_frotor / NBBY;
1719 	blksfree = cg_blksfree(cgp);
1720 	len = howmany(fs->fs_fpg, NBBY) - start;
1721 	loc = scanc((u_int)len, (u_char *)&blksfree[start],
1722 		(u_char *)fragtbl[fs->fs_frag],
1723 		(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
1724 	if (loc == 0) {
1725 		len = start + 1;
1726 		start = 0;
1727 		loc = scanc((u_int)len, (u_char *)&blksfree[0],
1728 			(u_char *)fragtbl[fs->fs_frag],
1729 			(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
1730 		if (loc == 0) {
1731 			printf("start = %d, len = %d, fs = %s\n",
1732 			    start, len, fs->fs_fsmnt);
1733 			panic("ffs_alloccg: map corrupted");
1734 			/* NOTREACHED */
1735 		}
1736 	}
1737 	bno = (start + len - loc) * NBBY;
1738 	cgp->cg_frotor = bno;
1739 	/*
1740 	 * found the byte in the map
1741 	 * sift through the bits to find the selected frag
1742 	 */
1743 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
1744 		blk = blkmap(fs, blksfree, bno);
1745 		blk <<= 1;
1746 		field = around[allocsiz];
1747 		subfield = inside[allocsiz];
1748 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
1749 			if ((blk & field) == subfield)
1750 				return (bno + pos);
1751 			field <<= 1;
1752 			subfield <<= 1;
1753 		}
1754 	}
1755 	printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
1756 	panic("ffs_alloccg: block not in map");
1757 	return (-1);
1758 }
1759 
1760 /*
1761  * Update the cluster map because of an allocation or free.
1762  *
1763  * Cnt == 1 means free; cnt == -1 means allocating.
1764  */
1765 void
1766 ffs_clusteracct(fs, cgp, blkno, cnt)
1767 	struct fs *fs;
1768 	struct cg *cgp;
1769 	ufs_daddr_t blkno;
1770 	int cnt;
1771 {
1772 	int32_t *sump;
1773 	int32_t *lp;
1774 	u_char *freemapp, *mapp;
1775 	int i, start, end, forw, back, map, bit;
1776 
1777 	if (fs->fs_contigsumsize <= 0)
1778 		return;
1779 	freemapp = cg_clustersfree(cgp);
1780 	sump = cg_clustersum(cgp);
1781 	/*
1782 	 * Allocate or clear the actual block.
1783 	 */
1784 	if (cnt > 0)
1785 		setbit(freemapp, blkno);
1786 	else
1787 		clrbit(freemapp, blkno);
1788 	/*
1789 	 * Find the size of the cluster going forward.
1790 	 */
1791 	start = blkno + 1;
1792 	end = start + fs->fs_contigsumsize;
1793 	if (end >= cgp->cg_nclusterblks)
1794 		end = cgp->cg_nclusterblks;
1795 	mapp = &freemapp[start / NBBY];
1796 	map = *mapp++;
1797 	bit = 1 << (start % NBBY);
1798 	for (i = start; i < end; i++) {
1799 		if ((map & bit) == 0)
1800 			break;
1801 		if ((i & (NBBY - 1)) != (NBBY - 1)) {
1802 			bit <<= 1;
1803 		} else {
1804 			map = *mapp++;
1805 			bit = 1;
1806 		}
1807 	}
1808 	forw = i - start;
1809 	/*
1810 	 * Find the size of the cluster going backward.
1811 	 */
1812 	start = blkno - 1;
1813 	end = start - fs->fs_contigsumsize;
1814 	if (end < 0)
1815 		end = -1;
1816 	mapp = &freemapp[start / NBBY];
1817 	map = *mapp--;
1818 	bit = 1 << (start % NBBY);
1819 	for (i = start; i > end; i--) {
1820 		if ((map & bit) == 0)
1821 			break;
1822 		if ((i & (NBBY - 1)) != 0) {
1823 			bit >>= 1;
1824 		} else {
1825 			map = *mapp--;
1826 			bit = 1 << (NBBY - 1);
1827 		}
1828 	}
1829 	back = start - i;
1830 	/*
1831 	 * Account for old cluster and the possibly new forward and
1832 	 * back clusters.
1833 	 */
1834 	i = back + forw + 1;
1835 	if (i > fs->fs_contigsumsize)
1836 		i = fs->fs_contigsumsize;
1837 	sump[i] += cnt;
1838 	if (back > 0)
1839 		sump[back] -= cnt;
1840 	if (forw > 0)
1841 		sump[forw] -= cnt;
1842 	/*
1843 	 * Update cluster summary information.
1844 	 */
1845 	lp = &sump[fs->fs_contigsumsize];
1846 	for (i = fs->fs_contigsumsize; i > 0; i--)
1847 		if (*lp-- > 0)
1848 			break;
1849 	fs->fs_maxcluster[cgp->cg_cgx] = i;
1850 }
1851 
1852 /*
1853  * Fserr prints the name of a file system with an error diagnostic.
1854  *
1855  * The form of the error message is:
1856  *	fs: error message
1857  */
1858 static void
1859 ffs_fserr(fs, inum, cp)
1860 	struct fs *fs;
1861 	ino_t inum;
1862 	char *cp;
1863 {
1864 	struct proc *p = curproc;	/* XXX */
1865 
1866 	log(LOG_ERR, "pid %d (%s), uid %d inumber %d on %s: %s\n",
1867 	    p ? p->p_pid : -1, p ? p->p_comm : "-",
1868 	    p ? p->p_ucred->cr_uid : 0, inum, fs->fs_fsmnt, cp);
1869 }
1870 
1871 /*
1872  * This function provides the capability for the fsck program to
1873  * update an active filesystem. Six operations are provided:
1874  *
1875  * adjrefcnt(inode, amt) - adjusts the reference count on the
1876  *	specified inode by the specified amount. Under normal
1877  *	operation the count should always go down. Decrementing
1878  *	the count to zero will cause the inode to be freed.
1879  * adjblkcnt(inode, amt) - adjust the number of blocks used to
1880  *	by the specifed amount.
1881  * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
1882  *	are marked as free. Inodes should never have to be marked
1883  *	as in use.
1884  * freefiles(inode, count) - file inodes [inode..inode + count - 1]
1885  *	are marked as free. Inodes should never have to be marked
1886  *	as in use.
1887  * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
1888  *	are marked as free. Blocks should never have to be marked
1889  *	as in use.
1890  * setflags(flags, set/clear) - the fs_flags field has the specified
1891  *	flags set (second parameter +1) or cleared (second parameter -1).
1892  */
1893 
1894 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
1895 
1896 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
1897 	0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
1898 
1899 SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
1900 	sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
1901 
1902 SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
1903 	sysctl_ffs_fsck, "Free Range of Directory Inodes");
1904 
1905 SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
1906 	sysctl_ffs_fsck, "Free Range of File Inodes");
1907 
1908 SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
1909 	sysctl_ffs_fsck, "Free Range of Blocks");
1910 
1911 SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
1912 	sysctl_ffs_fsck, "Change Filesystem Flags");
1913 
1914 #ifdef DEBUG
1915 static int fsckcmds = 0;
1916 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
1917 #endif /* DEBUG */
1918 
1919 static int
1920 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
1921 {
1922 	struct fsck_cmd cmd;
1923 	struct ufsmount *ump;
1924 	struct vnode *vp;
1925 	struct inode *ip;
1926 	struct mount *mp;
1927 	struct fs *fs;
1928 	ufs_daddr_t blkno;
1929 	long blkcnt, blksize;
1930 	struct file *fp;
1931 	int filetype, error;
1932 
1933 	if (req->newlen > sizeof cmd)
1934 		return (EBADRPC);
1935 	if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
1936 		return (error);
1937 	if (cmd.version != FFS_CMD_VERSION)
1938 		return (ERPCMISMATCH);
1939 	if ((error = getvnode(curproc->p_fd, cmd.handle, &fp)) != 0)
1940 		return (error);
1941 	vn_start_write((struct vnode *)fp->f_data, &mp, V_WAIT);
1942 	if (mp == 0 || strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
1943 		vn_finished_write(mp);
1944 		fdrop(fp, curthread);
1945 		return (EINVAL);
1946 	}
1947 	if (mp->mnt_flag & MNT_RDONLY) {
1948 		vn_finished_write(mp);
1949 		fdrop(fp, curthread);
1950 		return (EROFS);
1951 	}
1952 	ump = VFSTOUFS(mp);
1953 	fs = ump->um_fs;
1954 	filetype = IFREG;
1955 
1956 	switch (oidp->oid_number) {
1957 
1958 	case FFS_SET_FLAGS:
1959 #ifdef DEBUG
1960 		if (fsckcmds)
1961 			printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
1962 			    cmd.size > 0 ? "set" : "clear");
1963 #endif /* DEBUG */
1964 		if (cmd.size > 0)
1965 			fs->fs_flags |= (long)cmd.value;
1966 		else
1967 			fs->fs_flags &= ~(long)cmd.value;
1968 		break;
1969 
1970 	case FFS_ADJ_REFCNT:
1971 #ifdef DEBUG
1972 		if (fsckcmds) {
1973 			printf("%s: adjust inode %d count by %ld\n",
1974 			    mp->mnt_stat.f_mntonname, (ino_t)cmd.value,
1975 			    cmd.size);
1976 		}
1977 #endif /* DEBUG */
1978 		if ((error = VFS_VGET(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
1979 			break;
1980 		ip = VTOI(vp);
1981 		ip->i_nlink += cmd.size;
1982 		ip->i_effnlink += cmd.size;
1983 		ip->i_flag |= IN_CHANGE;
1984 		if (DOINGSOFTDEP(vp))
1985 			softdep_change_linkcnt(ip);
1986 		vput(vp);
1987 		break;
1988 
1989 	case FFS_ADJ_BLKCNT:
1990 #ifdef DEBUG
1991 		if (fsckcmds) {
1992 			printf("%s: adjust inode %d block count by %ld\n",
1993 			    mp->mnt_stat.f_mntonname, (ino_t)cmd.value,
1994 			    cmd.size);
1995 		}
1996 #endif /* DEBUG */
1997 		if ((error = VFS_VGET(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
1998 			break;
1999 		ip = VTOI(vp);
2000 		ip->i_blocks += cmd.size;
2001 		ip->i_flag |= IN_CHANGE;
2002 		vput(vp);
2003 		break;
2004 
2005 	case FFS_DIR_FREE:
2006 		filetype = IFDIR;
2007 		/* fall through */
2008 
2009 	case FFS_FILE_FREE:
2010 #ifdef DEBUG
2011 		if (fsckcmds) {
2012 			if (cmd.size == 1)
2013 				printf("%s: free %s inode %d\n",
2014 				    mp->mnt_stat.f_mntonname,
2015 				    filetype == IFDIR ? "directory" : "file",
2016 				    (ino_t)cmd.value);
2017 			else
2018 				printf("%s: free %s inodes %d-%d\n",
2019 				    mp->mnt_stat.f_mntonname,
2020 				    filetype == IFDIR ? "directory" : "file",
2021 				    (ino_t)cmd.value,
2022 				    (ino_t)(cmd.value + cmd.size - 1));
2023 		}
2024 #endif /* DEBUG */
2025 		while (cmd.size > 0) {
2026 			if ((error = ffs_freefile(fs, ump->um_devvp, cmd.value,
2027 			    filetype)))
2028 				break;
2029 			cmd.size -= 1;
2030 			cmd.value += 1;
2031 		}
2032 		break;
2033 
2034 	case FFS_BLK_FREE:
2035 #ifdef DEBUG
2036 		if (fsckcmds) {
2037 			if (cmd.size == 1)
2038 				printf("%s: free block %d\n",
2039 				    mp->mnt_stat.f_mntonname,
2040 				    (ufs_daddr_t)cmd.value);
2041 			else
2042 				printf("%s: free blocks %d-%ld\n",
2043 				    mp->mnt_stat.f_mntonname,
2044 				    (ufs_daddr_t)cmd.value,
2045 				    (ufs_daddr_t)cmd.value + cmd.size - 1);
2046 		}
2047 #endif /* DEBUG */
2048 		blkno = (ufs_daddr_t)cmd.value;
2049 		blkcnt = cmd.size;
2050 		blksize = fs->fs_frag - (blkno % fs->fs_frag);
2051 		while (blkcnt > 0) {
2052 			if (blksize > blkcnt)
2053 				blksize = blkcnt;
2054 			ffs_blkfree(fs, ump->um_devvp, blkno,
2055 			    blksize * fs->fs_fsize, ROOTINO);
2056 			blkno += blksize;
2057 			blkcnt -= blksize;
2058 			blksize = fs->fs_frag;
2059 		}
2060 		break;
2061 
2062 	default:
2063 #ifdef DEBUG
2064 		if (fsckcmds) {
2065 			printf("Invalid request %d from fsck\n",
2066 			    oidp->oid_number);
2067 		}
2068 #endif /* DEBUG */
2069 		error = EINVAL;
2070 		break;
2071 
2072 	}
2073 	fdrop(fp, curthread);
2074 	vn_finished_write(mp);
2075 	return (error);
2076 }
2077