xref: /freebsd/usr.sbin/makefs/ffs/ffs_alloc.c (revision 4e1bc9a039df516be13abb902ab76677fef81b1d)
1 /*	$NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $	*/
2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3 
4 /*
5  * Copyright (c) 2002 Networks Associates Technology, Inc.
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Marshall
9  * Kirk McKusick and Network Associates Laboratories, the Security
10  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
11  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
12  * research program
13  *
14  * Copyright (c) 1982, 1986, 1989, 1993
15  *	The Regents of the University of California.  All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)ffs_alloc.c	8.19 (Berkeley) 7/13/95
42  */
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #include <sys/param.h>
48 #include <sys/time.h>
49 
50 #include <errno.h>
51 #include <stdint.h>
52 
53 #include "makefs.h"
54 
55 #include <ufs/ufs/dinode.h>
56 #include <ufs/ffs/fs.h>
57 
58 #include "ffs/ufs_bswap.h"
59 #include "ffs/buf.h"
60 #include "ffs/ufs_inode.h"
61 #include "ffs/ffs_extern.h"
62 
63 static int scanc(u_int, const u_char *, const u_char *, int);
64 
65 static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
66 static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t);
67 static daddr_t ffs_hashalloc(struct inode *, int, daddr_t, int,
68 		     daddr_t (*)(struct inode *, int, daddr_t, int));
69 static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
70 
71 /*
72  * Allocate a block in the file system.
73  *
74  * The size of the requested block is given, which must be some
75  * multiple of fs_fsize and <= fs_bsize.
76  * A preference may be optionally specified. If a preference is given
77  * the following hierarchy is used to allocate a block:
78  *   1) allocate the requested block.
79  *   2) allocate a rotationally optimal block in the same cylinder.
80  *   3) allocate a block in the same cylinder group.
81  *   4) quadradically rehash into other cylinder groups, until an
82  *      available block is located.
83  * If no block preference is given the following hierarchy is used
84  * to allocate a block:
85  *   1) allocate a block in the cylinder group that contains the
86  *      inode for the file.
87  *   2) quadradically rehash into other cylinder groups, until an
88  *      available block is located.
89  */
90 int
91 ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size,
92     daddr_t *bnp)
93 {
94 	struct fs *fs = ip->i_fs;
95 	daddr_t bno;
96 	int cg;
97 
98 	*bnp = 0;
99 	if (size > fs->fs_bsize || fragoff(fs, size) != 0) {
100 		errx(1, "ffs_alloc: bad size: bsize %d size %d",
101 		    fs->fs_bsize, size);
102 	}
103 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
104 		goto nospace;
105 	if (bpref >= fs->fs_size)
106 		bpref = 0;
107 	if (bpref == 0)
108 		cg = ino_to_cg(fs, ip->i_number);
109 	else
110 		cg = dtog(fs, bpref);
111 	bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
112 	if (bno > 0) {
113 		if (ip->i_fs->fs_magic == FS_UFS1_MAGIC)
114 			ip->i_ffs1_blocks += size / DEV_BSIZE;
115 		else
116 			ip->i_ffs2_blocks += size / DEV_BSIZE;
117 		*bnp = bno;
118 		return (0);
119 	}
120 nospace:
121 	return (ENOSPC);
122 }
123 
124 /*
125  * Select the desired position for the next block in a file.  The file is
126  * logically divided into sections. The first section is composed of the
127  * direct blocks. Each additional section contains fs_maxbpg blocks.
128  *
129  * If no blocks have been allocated in the first section, the policy is to
130  * request a block in the same cylinder group as the inode that describes
131  * the file. If no blocks have been allocated in any other section, the
132  * policy is to place the section in a cylinder group with a greater than
133  * average number of free blocks.  An appropriate cylinder group is found
134  * by using a rotor that sweeps the cylinder groups. When a new group of
135  * blocks is needed, the sweep begins in the cylinder group following the
136  * cylinder group from which the previous allocation was made. The sweep
137  * continues until a cylinder group with greater than the average number
138  * of free blocks is found. If the allocation is for the first block in an
139  * indirect block, the information on the previous allocation is unavailable;
140  * here a best guess is made based upon the logical block number being
141  * allocated.
142  *
143  * If a section is already partially allocated, the policy is to
144  * contiguously allocate fs_maxcontig blocks.  The end of one of these
145  * contiguous blocks and the beginning of the next is physically separated
146  * so that the disk head will be in transit between them for at least
147  * fs_rotdelay milliseconds.  This is to allow time for the processor to
148  * schedule another I/O transfer.
149  */
150 /* XXX ondisk32 */
151 daddr_t
152 ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
153 {
154 	struct fs *fs;
155 	int cg;
156 	int avgbfree, startcg;
157 
158 	fs = ip->i_fs;
159 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
160 		if (lbn < NDADDR + NINDIR(fs)) {
161 			cg = ino_to_cg(fs, ip->i_number);
162 			return (fs->fs_fpg * cg + fs->fs_frag);
163 		}
164 		/*
165 		 * Find a cylinder with greater than average number of
166 		 * unused data blocks.
167 		 */
168 		if (indx == 0 || bap[indx - 1] == 0)
169 			startcg =
170 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
171 		else
172 			startcg = dtog(fs,
173 				ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
174 		startcg %= fs->fs_ncg;
175 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
176 		for (cg = startcg; cg < fs->fs_ncg; cg++)
177 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
178 				return (fs->fs_fpg * cg + fs->fs_frag);
179 		for (cg = 0; cg <= startcg; cg++)
180 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
181 				return (fs->fs_fpg * cg + fs->fs_frag);
182 		return (0);
183 	}
184 	/*
185 	 * We just always try to lay things out contiguously.
186 	 */
187 	return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
188 }
189 
190 daddr_t
191 ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap)
192 {
193 	struct fs *fs;
194 	int cg;
195 	int avgbfree, startcg;
196 
197 	fs = ip->i_fs;
198 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
199 		if (lbn < NDADDR + NINDIR(fs)) {
200 			cg = ino_to_cg(fs, ip->i_number);
201 			return (fs->fs_fpg * cg + fs->fs_frag);
202 		}
203 		/*
204 		 * Find a cylinder with greater than average number of
205 		 * unused data blocks.
206 		 */
207 		if (indx == 0 || bap[indx - 1] == 0)
208 			startcg =
209 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
210 		else
211 			startcg = dtog(fs,
212 				ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
213 		startcg %= fs->fs_ncg;
214 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
215 		for (cg = startcg; cg < fs->fs_ncg; cg++)
216 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
217 				return (fs->fs_fpg * cg + fs->fs_frag);
218 			}
219 		for (cg = 0; cg < startcg; cg++)
220 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
221 				return (fs->fs_fpg * cg + fs->fs_frag);
222 			}
223 		return (0);
224 	}
225 	/*
226 	 * We just always try to lay things out contiguously.
227 	 */
228 	return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
229 }
230 
231 /*
232  * Implement the cylinder overflow algorithm.
233  *
234  * The policy implemented by this algorithm is:
235  *   1) allocate the block in its requested cylinder group.
236  *   2) quadradically rehash on the cylinder group number.
237  *   3) brute force search for a free block.
238  *
239  * `size':	size for data blocks, mode for inodes
240  */
241 /*VARARGS5*/
242 static daddr_t
243 ffs_hashalloc(struct inode *ip, int cg, daddr_t pref, int size,
244     daddr_t (*allocator)(struct inode *, int, daddr_t, int))
245 {
246 	struct fs *fs;
247 	daddr_t result;
248 	int i, icg = cg;
249 
250 	fs = ip->i_fs;
251 	/*
252 	 * 1: preferred cylinder group
253 	 */
254 	result = (*allocator)(ip, cg, pref, size);
255 	if (result)
256 		return (result);
257 	/*
258 	 * 2: quadratic rehash
259 	 */
260 	for (i = 1; i < fs->fs_ncg; i *= 2) {
261 		cg += i;
262 		if (cg >= fs->fs_ncg)
263 			cg -= fs->fs_ncg;
264 		result = (*allocator)(ip, cg, 0, size);
265 		if (result)
266 			return (result);
267 	}
268 	/*
269 	 * 3: brute force search
270 	 * Note that we start at i == 2, since 0 was checked initially,
271 	 * and 1 is always checked in the quadratic rehash.
272 	 */
273 	cg = (icg + 2) % fs->fs_ncg;
274 	for (i = 2; i < fs->fs_ncg; i++) {
275 		result = (*allocator)(ip, cg, 0, size);
276 		if (result)
277 			return (result);
278 		cg++;
279 		if (cg == fs->fs_ncg)
280 			cg = 0;
281 	}
282 	return (0);
283 }
284 
285 /*
286  * Determine whether a block can be allocated.
287  *
288  * Check to see if a block of the appropriate size is available,
289  * and if it is, allocate it.
290  */
291 static daddr_t
292 ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
293 {
294 	struct cg *cgp;
295 	struct buf *bp;
296 	daddr_t bno, blkno;
297 	int error, frags, allocsiz, i;
298 	struct fs *fs = ip->i_fs;
299 	const int needswap = UFS_FSNEEDSWAP(fs);
300 
301 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
302 		return (0);
303 	error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
304 		(int)fs->fs_cgsize, &bp);
305 	if (error) {
306 		brelse(bp);
307 		return (0);
308 	}
309 	cgp = (struct cg *)bp->b_data;
310 	if (!cg_chkmagic_swap(cgp, needswap) ||
311 	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
312 		brelse(bp);
313 		return (0);
314 	}
315 	if (size == fs->fs_bsize) {
316 		bno = ffs_alloccgblk(ip, bp, bpref);
317 		bdwrite(bp);
318 		return (bno);
319 	}
320 	/*
321 	 * check to see if any fragments are already available
322 	 * allocsiz is the size which will be allocated, hacking
323 	 * it down to a smaller size if necessary
324 	 */
325 	frags = numfrags(fs, size);
326 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
327 		if (cgp->cg_frsum[allocsiz] != 0)
328 			break;
329 	if (allocsiz == fs->fs_frag) {
330 		/*
331 		 * no fragments were available, so a block will be
332 		 * allocated, and hacked up
333 		 */
334 		if (cgp->cg_cs.cs_nbfree == 0) {
335 			brelse(bp);
336 			return (0);
337 		}
338 		bno = ffs_alloccgblk(ip, bp, bpref);
339 		bpref = dtogd(fs, bno);
340 		for (i = frags; i < fs->fs_frag; i++)
341 			setbit(cg_blksfree_swap(cgp, needswap), bpref + i);
342 		i = fs->fs_frag - frags;
343 		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
344 		fs->fs_cstotal.cs_nffree += i;
345 		fs->fs_cs(fs, cg).cs_nffree += i;
346 		fs->fs_fmod = 1;
347 		ufs_add32(cgp->cg_frsum[i], 1, needswap);
348 		bdwrite(bp);
349 		return (bno);
350 	}
351 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
352 	for (i = 0; i < frags; i++)
353 		clrbit(cg_blksfree_swap(cgp, needswap), bno + i);
354 	ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
355 	fs->fs_cstotal.cs_nffree -= frags;
356 	fs->fs_cs(fs, cg).cs_nffree -= frags;
357 	fs->fs_fmod = 1;
358 	ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
359 	if (frags != allocsiz)
360 		ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
361 	blkno = cg * fs->fs_fpg + bno;
362 	bdwrite(bp);
363 	return blkno;
364 }
365 
366 /*
367  * Allocate a block in a cylinder group.
368  *
369  * This algorithm implements the following policy:
370  *   1) allocate the requested block.
371  *   2) allocate a rotationally optimal block in the same cylinder.
372  *   3) allocate the next available block on the block rotor for the
373  *      specified cylinder group.
374  * Note that this routine only allocates fs_bsize blocks; these
375  * blocks may be fragmented by the routine that allocates them.
376  */
377 static daddr_t
378 ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref)
379 {
380 	struct cg *cgp;
381 	daddr_t blkno;
382 	int32_t bno;
383 	struct fs *fs = ip->i_fs;
384 	const int needswap = UFS_FSNEEDSWAP(fs);
385 	u_int8_t *blksfree_swap;
386 
387 	cgp = (struct cg *)bp->b_data;
388 	blksfree_swap = cg_blksfree_swap(cgp, needswap);
389 	if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
390 		bpref = ufs_rw32(cgp->cg_rotor, needswap);
391 	} else {
392 		bpref = blknum(fs, bpref);
393 		bno = dtogd(fs, bpref);
394 		/*
395 		 * if the requested block is available, use it
396 		 */
397 		if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno)))
398 			goto gotit;
399 	}
400 	/*
401 	 * Take the next available one in this cylinder group.
402 	 */
403 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
404 	if (bno < 0)
405 		return (0);
406 	cgp->cg_rotor = ufs_rw32(bno, needswap);
407 gotit:
408 	blkno = fragstoblks(fs, bno);
409 	ffs_clrblock(fs, blksfree_swap, (long)blkno);
410 	ffs_clusteracct(fs, cgp, blkno, -1);
411 	ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
412 	fs->fs_cstotal.cs_nbfree--;
413 	fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
414 	fs->fs_fmod = 1;
415 	blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
416 	return (blkno);
417 }
418 
419 /*
420  * Free a block or fragment.
421  *
422  * The specified block or fragment is placed back in the
423  * free map. If a fragment is deallocated, a possible
424  * block reassembly is checked.
425  */
426 void
427 ffs_blkfree(struct inode *ip, daddr_t bno, long size)
428 {
429 	struct cg *cgp;
430 	struct buf *bp;
431 	int32_t fragno, cgbno;
432 	int i, error, cg, blk, frags, bbase;
433 	struct fs *fs = ip->i_fs;
434 	const int needswap = UFS_FSNEEDSWAP(fs);
435 
436 	if (size > fs->fs_bsize || fragoff(fs, size) != 0 ||
437 	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
438 		errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
439 		    (long long)bno, fs->fs_bsize, size);
440 	}
441 	cg = dtog(fs, bno);
442 	if (bno >= fs->fs_size) {
443 		warnx("bad block %lld, ino %ju", (long long)bno,
444 		    (uintmax_t)ip->i_number);
445 		return;
446 	}
447 	error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
448 		(int)fs->fs_cgsize, &bp);
449 	if (error) {
450 		brelse(bp);
451 		return;
452 	}
453 	cgp = (struct cg *)bp->b_data;
454 	if (!cg_chkmagic_swap(cgp, needswap)) {
455 		brelse(bp);
456 		return;
457 	}
458 	cgbno = dtogd(fs, bno);
459 	if (size == fs->fs_bsize) {
460 		fragno = fragstoblks(fs, cgbno);
461 		if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
462 			errx(1, "blkfree: freeing free block %lld",
463 			    (long long)bno);
464 		}
465 		ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno);
466 		ffs_clusteracct(fs, cgp, fragno, 1);
467 		ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
468 		fs->fs_cstotal.cs_nbfree++;
469 		fs->fs_cs(fs, cg).cs_nbfree++;
470 	} else {
471 		bbase = cgbno - fragnum(fs, cgbno);
472 		/*
473 		 * decrement the counts associated with the old frags
474 		 */
475 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
476 		ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap);
477 		/*
478 		 * deallocate the fragment
479 		 */
480 		frags = numfrags(fs, size);
481 		for (i = 0; i < frags; i++) {
482 			if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) {
483 				errx(1, "blkfree: freeing free frag: block %lld",
484 				    (long long)(cgbno + i));
485 			}
486 			setbit(cg_blksfree_swap(cgp, needswap), cgbno + i);
487 		}
488 		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
489 		fs->fs_cstotal.cs_nffree += i;
490 		fs->fs_cs(fs, cg).cs_nffree += i;
491 		/*
492 		 * add back in counts associated with the new frags
493 		 */
494 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
495 		ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap);
496 		/*
497 		 * if a complete block has been reassembled, account for it
498 		 */
499 		fragno = fragstoblks(fs, bbase);
500 		if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
501 			ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
502 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
503 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
504 			ffs_clusteracct(fs, cgp, fragno, 1);
505 			ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
506 			fs->fs_cstotal.cs_nbfree++;
507 			fs->fs_cs(fs, cg).cs_nbfree++;
508 		}
509 	}
510 	fs->fs_fmod = 1;
511 	bdwrite(bp);
512 }
513 
514 
515 static int
516 scanc(u_int size, const u_char *cp, const u_char table[], int mask)
517 {
518 	const u_char *end = &cp[size];
519 
520 	while (cp < end && (table[*cp] & mask) == 0)
521 		cp++;
522 	return (end - cp);
523 }
524 
525 /*
526  * Find a block of the specified size in the specified cylinder group.
527  *
528  * It is a panic if a request is made to find a block if none are
529  * available.
530  */
531 static int32_t
532 ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
533 {
534 	int32_t bno;
535 	int start, len, loc, i;
536 	int blk, field, subfield, pos;
537 	int ostart, olen;
538 	const int needswap = UFS_FSNEEDSWAP(fs);
539 
540 	/*
541 	 * find the fragment by searching through the free block
542 	 * map for an appropriate bit pattern
543 	 */
544 	if (bpref)
545 		start = dtogd(fs, bpref) / NBBY;
546 	else
547 		start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
548 	len = howmany(fs->fs_fpg, NBBY) - start;
549 	ostart = start;
550 	olen = len;
551 	loc = scanc((u_int)len,
552 		(const u_char *)&cg_blksfree_swap(cgp, needswap)[start],
553 		(const u_char *)fragtbl[fs->fs_frag],
554 		(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
555 	if (loc == 0) {
556 		len = start + 1;
557 		start = 0;
558 		loc = scanc((u_int)len,
559 			(const u_char *)&cg_blksfree_swap(cgp, needswap)[0],
560 			(const u_char *)fragtbl[fs->fs_frag],
561 			(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
562 		if (loc == 0) {
563 			errx(1,
564     "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
565 				ostart, olen,
566 				ufs_rw32(cgp->cg_freeoff, needswap),
567 				(long)cg_blksfree_swap(cgp, needswap) - (long)cgp);
568 			/* NOTREACHED */
569 		}
570 	}
571 	bno = (start + len - loc) * NBBY;
572 	cgp->cg_frotor = ufs_rw32(bno, needswap);
573 	/*
574 	 * found the byte in the map
575 	 * sift through the bits to find the selected frag
576 	 */
577 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
578 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno);
579 		blk <<= 1;
580 		field = around[allocsiz];
581 		subfield = inside[allocsiz];
582 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
583 			if ((blk & field) == subfield)
584 				return (bno + pos);
585 			field <<= 1;
586 			subfield <<= 1;
587 		}
588 	}
589 	errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
590 	return (-1);
591 }
592 
593 /*
594  * Update the cluster map because of an allocation or free.
595  *
596  * Cnt == 1 means free; cnt == -1 means allocating.
597  */
598 void
599 ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt)
600 {
601 	int32_t *sump;
602 	int32_t *lp;
603 	u_char *freemapp, *mapp;
604 	int i, start, end, forw, back, map, bit;
605 	const int needswap = UFS_FSNEEDSWAP(fs);
606 
607 	if (fs->fs_contigsumsize <= 0)
608 		return;
609 	freemapp = cg_clustersfree_swap(cgp, needswap);
610 	sump = cg_clustersum_swap(cgp, needswap);
611 	/*
612 	 * Allocate or clear the actual block.
613 	 */
614 	if (cnt > 0)
615 		setbit(freemapp, blkno);
616 	else
617 		clrbit(freemapp, blkno);
618 	/*
619 	 * Find the size of the cluster going forward.
620 	 */
621 	start = blkno + 1;
622 	end = start + fs->fs_contigsumsize;
623 	if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
624 		end = ufs_rw32(cgp->cg_nclusterblks, needswap);
625 	mapp = &freemapp[start / NBBY];
626 	map = *mapp++;
627 	bit = 1 << (start % NBBY);
628 	for (i = start; i < end; i++) {
629 		if ((map & bit) == 0)
630 			break;
631 		if ((i & (NBBY - 1)) != (NBBY - 1)) {
632 			bit <<= 1;
633 		} else {
634 			map = *mapp++;
635 			bit = 1;
636 		}
637 	}
638 	forw = i - start;
639 	/*
640 	 * Find the size of the cluster going backward.
641 	 */
642 	start = blkno - 1;
643 	end = start - fs->fs_contigsumsize;
644 	if (end < 0)
645 		end = -1;
646 	mapp = &freemapp[start / NBBY];
647 	map = *mapp--;
648 	bit = 1 << (start % NBBY);
649 	for (i = start; i > end; i--) {
650 		if ((map & bit) == 0)
651 			break;
652 		if ((i & (NBBY - 1)) != 0) {
653 			bit >>= 1;
654 		} else {
655 			map = *mapp--;
656 			bit = 1 << (NBBY - 1);
657 		}
658 	}
659 	back = start - i;
660 	/*
661 	 * Account for old cluster and the possibly new forward and
662 	 * back clusters.
663 	 */
664 	i = back + forw + 1;
665 	if (i > fs->fs_contigsumsize)
666 		i = fs->fs_contigsumsize;
667 	ufs_add32(sump[i], cnt, needswap);
668 	if (back > 0)
669 		ufs_add32(sump[back], -cnt, needswap);
670 	if (forw > 0)
671 		ufs_add32(sump[forw], -cnt, needswap);
672 
673 	/*
674 	 * Update cluster summary information.
675 	 */
676 	lp = &sump[fs->fs_contigsumsize];
677 	for (i = fs->fs_contigsumsize; i > 0; i--)
678 		if (ufs_rw32(*lp--, needswap) > 0)
679 			break;
680 	fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;
681 }
682