xref: /freebsd/usr.sbin/makefs/ffs/ffs_alloc.c (revision a2f733abcff64628b7771a47089628b7327a88bd)
1 /*	$NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $	*/
2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3 
4 /*-
5  * SPDX-License-Identifier: BSD-3-Clause
6  *
7  * Copyright (c) 2002 Networks Associates Technology, Inc.
8  * All rights reserved.
9  *
10  * This software was developed for the FreeBSD Project by Marshall
11  * Kirk McKusick and Network Associates Laboratories, the Security
12  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
13  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
14  * research program
15  *
16  * Copyright (c) 1982, 1986, 1989, 1993
17  *	The Regents of the University of California.  All rights reserved.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  * 1. Redistributions of source code must retain the above copyright
23  *    notice, this list of conditions and the following disclaimer.
24  * 2. Redistributions in binary form must reproduce the above copyright
25  *    notice, this list of conditions and the following disclaimer in the
26  *    documentation and/or other materials provided with the distribution.
27  * 3. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  */
43 
44 #include <sys/cdefs.h>
45 #include <sys/param.h>
46 #include <sys/time.h>
47 
48 #include <errno.h>
49 #include <stdint.h>
50 
51 #include "makefs.h"
52 
53 #include <ufs/ufs/dinode.h>
54 #include <ufs/ffs/fs.h>
55 
56 #include "ffs/ufs_bswap.h"
57 #include "ffs/buf.h"
58 #include "ffs/ufs_inode.h"
59 #include "ffs/ffs_extern.h"
60 
61 static int scanc(u_int, const u_char *, const u_char *, int);
62 
63 static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
64 static daddr_t ffs_alloccgblk(struct inode *, struct m_buf *, daddr_t);
65 static daddr_t ffs_hashalloc(struct inode *, u_int, daddr_t, int,
66 		     daddr_t (*)(struct inode *, int, daddr_t, int));
67 static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
68 
69 /*
70  * Allocate a block in the file system.
71  *
72  * The size of the requested block is given, which must be some
73  * multiple of fs_fsize and <= fs_bsize.
74  * A preference may be optionally specified. If a preference is given
75  * the following hierarchy is used to allocate a block:
76  *   1) allocate the requested block.
77  *   2) allocate a rotationally optimal block in the same cylinder.
78  *   3) allocate a block in the same cylinder group.
79  *   4) quadratically rehash into other cylinder groups, until an
80  *      available block is located.
81  * If no block preference is given the following hierarchy is used
82  * to allocate a block:
83  *   1) allocate a block in the cylinder group that contains the
84  *      inode for the file.
85  *   2) quadratically rehash into other cylinder groups, until an
86  *      available block is located.
87  */
88 int
89 ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size,
90     daddr_t *bnp)
91 {
92 	struct fs *fs = ip->i_fs;
93 	daddr_t bno;
94 	int cg;
95 
96 	*bnp = 0;
97 	if (size > fs->fs_bsize || fragoff(fs, size) != 0) {
98 		errx(1, "ffs_alloc: bad size: bsize %d size %d",
99 		    fs->fs_bsize, size);
100 	}
101 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
102 		goto nospace;
103 	if (bpref >= fs->fs_size)
104 		bpref = 0;
105 	if (bpref == 0)
106 		cg = ino_to_cg(fs, ip->i_number);
107 	else
108 		cg = dtog(fs, bpref);
109 	bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
110 	if (bno > 0) {
111 		if (ip->i_fs->fs_magic == FS_UFS1_MAGIC)
112 			ip->i_ffs1_blocks += size / DEV_BSIZE;
113 		else
114 			ip->i_ffs2_blocks += size / DEV_BSIZE;
115 		*bnp = bno;
116 		return (0);
117 	}
118 nospace:
119 	return (ENOSPC);
120 }
121 
122 /*
123  * Select the desired position for the next block in a file.  The file is
124  * logically divided into sections. The first section is composed of the
125  * direct blocks. Each additional section contains fs_maxbpg blocks.
126  *
127  * If no blocks have been allocated in the first section, the policy is to
128  * request a block in the same cylinder group as the inode that describes
129  * the file. If no blocks have been allocated in any other section, the
130  * policy is to place the section in a cylinder group with a greater than
131  * average number of free blocks.  An appropriate cylinder group is found
132  * by using a rotor that sweeps the cylinder groups. When a new group of
133  * blocks is needed, the sweep begins in the cylinder group following the
134  * cylinder group from which the previous allocation was made. The sweep
135  * continues until a cylinder group with greater than the average number
136  * of free blocks is found. If the allocation is for the first block in an
137  * indirect block, the information on the previous allocation is unavailable;
138  * here a best guess is made based upon the logical block number being
139  * allocated.
140  *
141  * If a section is already partially allocated, the policy is to
142  * contiguously allocate fs_maxcontig blocks.  The end of one of these
143  * contiguous blocks and the beginning of the next is physically separated
144  * so that the disk head will be in transit between them for at least
145  * fs_rotdelay milliseconds.  This is to allow time for the processor to
146  * schedule another I/O transfer.
147  */
148 /* XXX ondisk32 */
149 daddr_t
150 ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
151 {
152 	struct fs *fs;
153 	u_int cg, startcg;
154 	int avgbfree;
155 
156 	fs = ip->i_fs;
157 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
158 		if (lbn < UFS_NDADDR + NINDIR(fs)) {
159 			cg = ino_to_cg(fs, ip->i_number);
160 			return (fs->fs_fpg * cg + fs->fs_frag);
161 		}
162 		/*
163 		 * Find a cylinder with greater than average number of
164 		 * unused data blocks.
165 		 */
166 		if (indx == 0 || bap[indx - 1] == 0)
167 			startcg =
168 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
169 		else
170 			startcg = dtog(fs,
171 				ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
172 		startcg %= fs->fs_ncg;
173 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
174 		for (cg = startcg; cg < fs->fs_ncg; cg++)
175 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
176 				return (fs->fs_fpg * cg + fs->fs_frag);
177 		for (cg = 0; cg <= startcg; cg++)
178 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
179 				return (fs->fs_fpg * cg + fs->fs_frag);
180 		return (0);
181 	}
182 	/*
183 	 * We just always try to lay things out contiguously.
184 	 */
185 	return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
186 }
187 
188 daddr_t
189 ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap)
190 {
191 	struct fs *fs;
192 	u_int cg, startcg;
193 	int avgbfree;
194 
195 	fs = ip->i_fs;
196 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
197 		if (lbn < UFS_NDADDR + NINDIR(fs)) {
198 			cg = ino_to_cg(fs, ip->i_number);
199 			return (fs->fs_fpg * cg + fs->fs_frag);
200 		}
201 		/*
202 		 * Find a cylinder with greater than average number of
203 		 * unused data blocks.
204 		 */
205 		if (indx == 0 || bap[indx - 1] == 0)
206 			startcg =
207 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
208 		else
209 			startcg = dtog(fs,
210 				ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
211 		startcg %= fs->fs_ncg;
212 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
213 		for (cg = startcg; cg < fs->fs_ncg; cg++)
214 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
215 				return (fs->fs_fpg * cg + fs->fs_frag);
216 			}
217 		for (cg = 0; cg < startcg; cg++)
218 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
219 				return (fs->fs_fpg * cg + fs->fs_frag);
220 			}
221 		return (0);
222 	}
223 	/*
224 	 * We just always try to lay things out contiguously.
225 	 */
226 	return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
227 }
228 
229 /*
230  * Implement the cylinder overflow algorithm.
231  *
232  * The policy implemented by this algorithm is:
233  *   1) allocate the block in its requested cylinder group.
234  *   2) quadratically rehash on the cylinder group number.
235  *   3) brute force search for a free block.
236  *
237  * `size':	size for data blocks, mode for inodes
238  */
239 /*VARARGS5*/
240 static daddr_t
241 ffs_hashalloc(struct inode *ip, u_int cg, daddr_t pref, int size,
242     daddr_t (*allocator)(struct inode *, int, daddr_t, int))
243 {
244 	struct fs *fs;
245 	daddr_t result;
246 	u_int i, icg = cg;
247 
248 	fs = ip->i_fs;
249 	/*
250 	 * 1: preferred cylinder group
251 	 */
252 	result = (*allocator)(ip, cg, pref, size);
253 	if (result)
254 		return (result);
255 	/*
256 	 * 2: quadratic rehash
257 	 */
258 	for (i = 1; i < fs->fs_ncg; i *= 2) {
259 		cg += i;
260 		if (cg >= fs->fs_ncg)
261 			cg -= fs->fs_ncg;
262 		result = (*allocator)(ip, cg, 0, size);
263 		if (result)
264 			return (result);
265 	}
266 	/*
267 	 * 3: brute force search
268 	 * Note that we start at i == 2, since 0 was checked initially,
269 	 * and 1 is always checked in the quadratic rehash.
270 	 */
271 	cg = (icg + 2) % fs->fs_ncg;
272 	for (i = 2; i < fs->fs_ncg; i++) {
273 		result = (*allocator)(ip, cg, 0, size);
274 		if (result)
275 			return (result);
276 		cg++;
277 		if (cg == fs->fs_ncg)
278 			cg = 0;
279 	}
280 	return (0);
281 }
282 
283 /*
284  * Determine whether a block can be allocated.
285  *
286  * Check to see if a block of the appropriate size is available,
287  * and if it is, allocate it.
288  */
289 static daddr_t
290 ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
291 {
292 	struct cg *cgp;
293 	struct m_buf *bp;
294 	daddr_t bno, blkno;
295 	int error, frags, allocsiz, i;
296 	struct fs *fs = ip->i_fs;
297 	const int needswap = UFS_FSNEEDSWAP(fs);
298 
299 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
300 		return (0);
301 	error = bread((void *)ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
302 	    (int)fs->fs_cgsize, NULL, &bp);
303 	if (error) {
304 		return (0);
305 	}
306 	cgp = (struct cg *)bp->b_data;
307 	if (!cg_chkmagic_swap(cgp, needswap) ||
308 	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
309 		brelse(bp);
310 		return (0);
311 	}
312 	if (size == fs->fs_bsize) {
313 		bno = ffs_alloccgblk(ip, bp, bpref);
314 		bdwrite(bp);
315 		return (bno);
316 	}
317 	/*
318 	 * check to see if any fragments are already available
319 	 * allocsiz is the size which will be allocated, hacking
320 	 * it down to a smaller size if necessary
321 	 */
322 	frags = numfrags(fs, size);
323 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
324 		if (cgp->cg_frsum[allocsiz] != 0)
325 			break;
326 	if (allocsiz == fs->fs_frag) {
327 		/*
328 		 * no fragments were available, so a block will be
329 		 * allocated, and hacked up
330 		 */
331 		if (cgp->cg_cs.cs_nbfree == 0) {
332 			brelse(bp);
333 			return (0);
334 		}
335 		bno = ffs_alloccgblk(ip, bp, bpref);
336 		bpref = dtogd(fs, bno);
337 		for (i = frags; i < fs->fs_frag; i++)
338 			setbit(cg_blksfree_swap(cgp, needswap), bpref + i);
339 		i = fs->fs_frag - frags;
340 		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
341 		fs->fs_cstotal.cs_nffree += i;
342 		fs->fs_cs(fs, cg).cs_nffree += i;
343 		fs->fs_fmod = 1;
344 		ufs_add32(cgp->cg_frsum[i], 1, needswap);
345 		bdwrite(bp);
346 		return (bno);
347 	}
348 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
349 	for (i = 0; i < frags; i++)
350 		clrbit(cg_blksfree_swap(cgp, needswap), bno + i);
351 	ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
352 	fs->fs_cstotal.cs_nffree -= frags;
353 	fs->fs_cs(fs, cg).cs_nffree -= frags;
354 	fs->fs_fmod = 1;
355 	ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
356 	if (frags != allocsiz)
357 		ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
358 	blkno = cg * fs->fs_fpg + bno;
359 	bdwrite(bp);
360 	return blkno;
361 }
362 
363 /*
364  * Allocate a block in a cylinder group.
365  *
366  * This algorithm implements the following policy:
367  *   1) allocate the requested block.
368  *   2) allocate a rotationally optimal block in the same cylinder.
369  *   3) allocate the next available block on the block rotor for the
370  *      specified cylinder group.
371  * Note that this routine only allocates fs_bsize blocks; these
372  * blocks may be fragmented by the routine that allocates them.
373  */
374 static daddr_t
375 ffs_alloccgblk(struct inode *ip, struct m_buf *bp, daddr_t bpref)
376 {
377 	struct cg *cgp;
378 	daddr_t blkno;
379 	int32_t bno;
380 	struct fs *fs = ip->i_fs;
381 	const int needswap = UFS_FSNEEDSWAP(fs);
382 	u_int8_t *blksfree_swap;
383 
384 	cgp = (struct cg *)bp->b_data;
385 	blksfree_swap = cg_blksfree_swap(cgp, needswap);
386 	if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
387 		bpref = ufs_rw32(cgp->cg_rotor, needswap);
388 	} else {
389 		bpref = blknum(fs, bpref);
390 		bno = dtogd(fs, bpref);
391 		/*
392 		 * if the requested block is available, use it
393 		 */
394 		if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno)))
395 			goto gotit;
396 	}
397 	/*
398 	 * Take the next available one in this cylinder group.
399 	 */
400 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
401 	if (bno < 0)
402 		return (0);
403 	cgp->cg_rotor = ufs_rw32(bno, needswap);
404 gotit:
405 	blkno = fragstoblks(fs, bno);
406 	ffs_clrblock(fs, blksfree_swap, (long)blkno);
407 	ffs_clusteracct(fs, cgp, blkno, -1);
408 	ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
409 	fs->fs_cstotal.cs_nbfree--;
410 	fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
411 	fs->fs_fmod = 1;
412 	blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
413 	return (blkno);
414 }
415 
416 /*
417  * Free a block or fragment.
418  *
419  * The specified block or fragment is placed back in the
420  * free map. If a fragment is deallocated, a possible
421  * block reassembly is checked.
422  */
423 void
424 ffs_blkfree(struct inode *ip, daddr_t bno, long size)
425 {
426 	struct cg *cgp;
427 	struct m_buf *bp;
428 	int32_t fragno, cgbno;
429 	int i, error, cg, blk, frags, bbase;
430 	struct fs *fs = ip->i_fs;
431 	const int needswap = UFS_FSNEEDSWAP(fs);
432 
433 	if (size > fs->fs_bsize || fragoff(fs, size) != 0 ||
434 	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
435 		errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
436 		    (long long)bno, fs->fs_bsize, size);
437 	}
438 	cg = dtog(fs, bno);
439 	if (bno >= fs->fs_size) {
440 		warnx("bad block %lld, ino %ju", (long long)bno,
441 		    (uintmax_t)ip->i_number);
442 		return;
443 	}
444 	error = bread((void *)ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
445 	    (int)fs->fs_cgsize, NULL, &bp);
446 	if (error) {
447 		return;
448 	}
449 	cgp = (struct cg *)bp->b_data;
450 	if (!cg_chkmagic_swap(cgp, needswap)) {
451 		brelse(bp);
452 		return;
453 	}
454 	cgbno = dtogd(fs, bno);
455 	if (size == fs->fs_bsize) {
456 		fragno = fragstoblks(fs, cgbno);
457 		if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
458 			errx(1, "blkfree: freeing free block %lld",
459 			    (long long)bno);
460 		}
461 		ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno);
462 		ffs_clusteracct(fs, cgp, fragno, 1);
463 		ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
464 		fs->fs_cstotal.cs_nbfree++;
465 		fs->fs_cs(fs, cg).cs_nbfree++;
466 	} else {
467 		bbase = cgbno - fragnum(fs, cgbno);
468 		/*
469 		 * decrement the counts associated with the old frags
470 		 */
471 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
472 		ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap);
473 		/*
474 		 * deallocate the fragment
475 		 */
476 		frags = numfrags(fs, size);
477 		for (i = 0; i < frags; i++) {
478 			if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) {
479 				errx(1, "blkfree: freeing free frag: block %lld",
480 				    (long long)(cgbno + i));
481 			}
482 			setbit(cg_blksfree_swap(cgp, needswap), cgbno + i);
483 		}
484 		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
485 		fs->fs_cstotal.cs_nffree += i;
486 		fs->fs_cs(fs, cg).cs_nffree += i;
487 		/*
488 		 * add back in counts associated with the new frags
489 		 */
490 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
491 		ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap);
492 		/*
493 		 * if a complete block has been reassembled, account for it
494 		 */
495 		fragno = fragstoblks(fs, bbase);
496 		if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
497 			ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
498 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
499 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
500 			ffs_clusteracct(fs, cgp, fragno, 1);
501 			ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
502 			fs->fs_cstotal.cs_nbfree++;
503 			fs->fs_cs(fs, cg).cs_nbfree++;
504 		}
505 	}
506 	fs->fs_fmod = 1;
507 	bdwrite(bp);
508 }
509 
510 
511 static int
512 scanc(u_int size, const u_char *cp, const u_char table[], int mask)
513 {
514 	const u_char *end = &cp[size];
515 
516 	while (cp < end && (table[*cp] & mask) == 0)
517 		cp++;
518 	return (end - cp);
519 }
520 
521 /*
522  * Find a block of the specified size in the specified cylinder group.
523  *
524  * It is a panic if a request is made to find a block if none are
525  * available.
526  */
527 static int32_t
528 ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
529 {
530 	int32_t bno;
531 	int start, len, loc, i;
532 	int blk, field, subfield, pos;
533 	int ostart, olen;
534 	const int needswap = UFS_FSNEEDSWAP(fs);
535 
536 	/*
537 	 * find the fragment by searching through the free block
538 	 * map for an appropriate bit pattern
539 	 */
540 	if (bpref)
541 		start = dtogd(fs, bpref) / NBBY;
542 	else
543 		start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
544 	len = howmany(fs->fs_fpg, NBBY) - start;
545 	ostart = start;
546 	olen = len;
547 	loc = scanc((u_int)len,
548 		(const u_char *)&cg_blksfree_swap(cgp, needswap)[start],
549 		(const u_char *)fragtbl[fs->fs_frag],
550 		(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
551 	if (loc == 0) {
552 		len = start + 1;
553 		start = 0;
554 		loc = scanc((u_int)len,
555 			(const u_char *)&cg_blksfree_swap(cgp, needswap)[0],
556 			(const u_char *)fragtbl[fs->fs_frag],
557 			(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
558 		if (loc == 0) {
559 			errx(1,
560     "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
561 				ostart, olen,
562 				ufs_rw32(cgp->cg_freeoff, needswap),
563 				(long)cg_blksfree_swap(cgp, needswap) - (long)cgp);
564 			/* NOTREACHED */
565 		}
566 	}
567 	bno = (start + len - loc) * NBBY;
568 	cgp->cg_frotor = ufs_rw32(bno, needswap);
569 	/*
570 	 * found the byte in the map
571 	 * sift through the bits to find the selected frag
572 	 */
573 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
574 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno);
575 		blk <<= 1;
576 		field = around[allocsiz];
577 		subfield = inside[allocsiz];
578 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
579 			if ((blk & field) == subfield)
580 				return (bno + pos);
581 			field <<= 1;
582 			subfield <<= 1;
583 		}
584 	}
585 	errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
586 	return (-1);
587 }
588 
589 /*
590  * Update the cluster map because of an allocation or free.
591  *
592  * Cnt == 1 means free; cnt == -1 means allocating.
593  */
594 void
595 ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt)
596 {
597 	int32_t *sump;
598 	int32_t *lp;
599 	u_char *freemapp, *mapp;
600 	int i, start, end, forw, back, map, bit;
601 	const int needswap = UFS_FSNEEDSWAP(fs);
602 
603 	if (fs->fs_contigsumsize <= 0)
604 		return;
605 	freemapp = cg_clustersfree_swap(cgp, needswap);
606 	sump = cg_clustersum_swap(cgp, needswap);
607 	/*
608 	 * Allocate or clear the actual block.
609 	 */
610 	if (cnt > 0)
611 		setbit(freemapp, blkno);
612 	else
613 		clrbit(freemapp, blkno);
614 	/*
615 	 * Find the size of the cluster going forward.
616 	 */
617 	start = blkno + 1;
618 	end = start + fs->fs_contigsumsize;
619 	if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
620 		end = ufs_rw32(cgp->cg_nclusterblks, needswap);
621 	mapp = &freemapp[start / NBBY];
622 	map = *mapp++;
623 	bit = 1 << (start % NBBY);
624 	for (i = start; i < end; i++) {
625 		if ((map & bit) == 0)
626 			break;
627 		if ((i & (NBBY - 1)) != (NBBY - 1)) {
628 			bit <<= 1;
629 		} else {
630 			map = *mapp++;
631 			bit = 1;
632 		}
633 	}
634 	forw = i - start;
635 	/*
636 	 * Find the size of the cluster going backward.
637 	 */
638 	start = blkno - 1;
639 	end = start - fs->fs_contigsumsize;
640 	if (end < 0)
641 		end = -1;
642 	mapp = &freemapp[start / NBBY];
643 	map = *mapp--;
644 	bit = 1 << (start % NBBY);
645 	for (i = start; i > end; i--) {
646 		if ((map & bit) == 0)
647 			break;
648 		if ((i & (NBBY - 1)) != 0) {
649 			bit >>= 1;
650 		} else {
651 			map = *mapp--;
652 			bit = 1 << (NBBY - 1);
653 		}
654 	}
655 	back = start - i;
656 	/*
657 	 * Account for old cluster and the possibly new forward and
658 	 * back clusters.
659 	 */
660 	i = back + forw + 1;
661 	if (i > fs->fs_contigsumsize)
662 		i = fs->fs_contigsumsize;
663 	ufs_add32(sump[i], cnt, needswap);
664 	if (back > 0)
665 		ufs_add32(sump[back], -cnt, needswap);
666 	if (forw > 0)
667 		ufs_add32(sump[forw], -cnt, needswap);
668 
669 	/*
670 	 * Update cluster summary information.
671 	 */
672 	lp = &sump[fs->fs_contigsumsize];
673 	for (i = fs->fs_contigsumsize; i > 0; i--)
674 		if (ufs_rw32(*lp--, needswap) > 0)
675 			break;
676 	fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;
677 }
678