xref: /freebsd/usr.sbin/makefs/ffs/ffs_alloc.c (revision 4d65a7c6951cea0333f1a0c1b32c38489cdfa6c5)
1d347a0daSSam Leffler /*	$NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $	*/
2d347a0daSSam Leffler /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3d347a0daSSam Leffler 
48a16b7a1SPedro F. Giffuni /*-
58a16b7a1SPedro F. Giffuni  * SPDX-License-Identifier: BSD-3-Clause
68a16b7a1SPedro F. Giffuni  *
7d347a0daSSam Leffler  * Copyright (c) 2002 Networks Associates Technology, Inc.
8d347a0daSSam Leffler  * All rights reserved.
9d347a0daSSam Leffler  *
10d347a0daSSam Leffler  * This software was developed for the FreeBSD Project by Marshall
11d347a0daSSam Leffler  * Kirk McKusick and Network Associates Laboratories, the Security
12d347a0daSSam Leffler  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
13d347a0daSSam Leffler  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
14d347a0daSSam Leffler  * research program
15d347a0daSSam Leffler  *
16d347a0daSSam Leffler  * Copyright (c) 1982, 1986, 1989, 1993
17d347a0daSSam Leffler  *	The Regents of the University of California.  All rights reserved.
18d347a0daSSam Leffler  *
19d347a0daSSam Leffler  * Redistribution and use in source and binary forms, with or without
20d347a0daSSam Leffler  * modification, are permitted provided that the following conditions
21d347a0daSSam Leffler  * are met:
22d347a0daSSam Leffler  * 1. Redistributions of source code must retain the above copyright
23d347a0daSSam Leffler  *    notice, this list of conditions and the following disclaimer.
24d347a0daSSam Leffler  * 2. Redistributions in binary form must reproduce the above copyright
25d347a0daSSam Leffler  *    notice, this list of conditions and the following disclaimer in the
26d347a0daSSam Leffler  *    documentation and/or other materials provided with the distribution.
27d347a0daSSam Leffler  * 3. Neither the name of the University nor the names of its contributors
28d347a0daSSam Leffler  *    may be used to endorse or promote products derived from this software
29d347a0daSSam Leffler  *    without specific prior written permission.
30d347a0daSSam Leffler  *
31d347a0daSSam Leffler  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32d347a0daSSam Leffler  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33d347a0daSSam Leffler  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34d347a0daSSam Leffler  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35d347a0daSSam Leffler  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36d347a0daSSam Leffler  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37d347a0daSSam Leffler  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38d347a0daSSam Leffler  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39d347a0daSSam Leffler  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40d347a0daSSam Leffler  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41d347a0daSSam Leffler  * SUCH DAMAGE.
42d347a0daSSam Leffler  */
43d347a0daSSam Leffler 
44d347a0daSSam Leffler #include <sys/param.h>
45d347a0daSSam Leffler #include <sys/time.h>
46d347a0daSSam Leffler 
47d347a0daSSam Leffler #include <errno.h>
48b424efd5SMatthew D Fleming #include <stdint.h>
49d347a0daSSam Leffler 
50d347a0daSSam Leffler #include "makefs.h"
51d347a0daSSam Leffler 
52d347a0daSSam Leffler #include <ufs/ufs/dinode.h>
53d347a0daSSam Leffler #include <ufs/ffs/fs.h>
54d347a0daSSam Leffler 
55d347a0daSSam Leffler #include "ffs/ufs_bswap.h"
56d347a0daSSam Leffler #include "ffs/buf.h"
57d347a0daSSam Leffler #include "ffs/ufs_inode.h"
58d347a0daSSam Leffler #include "ffs/ffs_extern.h"
59d347a0daSSam Leffler 
60d347a0daSSam Leffler static int scanc(u_int, const u_char *, const u_char *, int);
61d347a0daSSam Leffler 
62d347a0daSSam Leffler static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
63d485c77fSKonstantin Belousov static daddr_t ffs_alloccgblk(struct inode *, struct m_buf *, daddr_t);
643afe6a68SEd Maste static daddr_t ffs_hashalloc(struct inode *, u_int, daddr_t, int,
65d347a0daSSam Leffler 		     daddr_t (*)(struct inode *, int, daddr_t, int));
66d347a0daSSam Leffler static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
67d347a0daSSam Leffler 
68d347a0daSSam Leffler /*
69d347a0daSSam Leffler  * Allocate a block in the file system.
70d347a0daSSam Leffler  *
71d347a0daSSam Leffler  * The size of the requested block is given, which must be some
72d347a0daSSam Leffler  * multiple of fs_fsize and <= fs_bsize.
73d347a0daSSam Leffler  * A preference may be optionally specified. If a preference is given
74d347a0daSSam Leffler  * the following hierarchy is used to allocate a block:
75d347a0daSSam Leffler  *   1) allocate the requested block.
76d347a0daSSam Leffler  *   2) allocate a rotationally optimal block in the same cylinder.
77d347a0daSSam Leffler  *   3) allocate a block in the same cylinder group.
78*164fa411SGordon Bergling  *   4) quadratically rehash into other cylinder groups, until an
79d347a0daSSam Leffler  *      available block is located.
80d347a0daSSam Leffler  * If no block preference is given the following hierarchy is used
81d347a0daSSam Leffler  * to allocate a block:
82d347a0daSSam Leffler  *   1) allocate a block in the cylinder group that contains the
83d347a0daSSam Leffler  *      inode for the file.
84*164fa411SGordon Bergling  *   2) quadratically rehash into other cylinder groups, until an
85d347a0daSSam Leffler  *      available block is located.
86d347a0daSSam Leffler  */
87d347a0daSSam Leffler int
ffs_alloc(struct inode * ip,daddr_t lbn __unused,daddr_t bpref,int size,daddr_t * bnp)8801a0f853SOlivier Houchard ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size,
89d347a0daSSam Leffler     daddr_t *bnp)
90d347a0daSSam Leffler {
91d347a0daSSam Leffler 	struct fs *fs = ip->i_fs;
92d347a0daSSam Leffler 	daddr_t bno;
93d347a0daSSam Leffler 	int cg;
94d347a0daSSam Leffler 
95d347a0daSSam Leffler 	*bnp = 0;
9601a0f853SOlivier Houchard 	if (size > fs->fs_bsize || fragoff(fs, size) != 0) {
97d347a0daSSam Leffler 		errx(1, "ffs_alloc: bad size: bsize %d size %d",
98d347a0daSSam Leffler 		    fs->fs_bsize, size);
99d347a0daSSam Leffler 	}
100d347a0daSSam Leffler 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
101d347a0daSSam Leffler 		goto nospace;
102d347a0daSSam Leffler 	if (bpref >= fs->fs_size)
103d347a0daSSam Leffler 		bpref = 0;
104d347a0daSSam Leffler 	if (bpref == 0)
105d347a0daSSam Leffler 		cg = ino_to_cg(fs, ip->i_number);
106d347a0daSSam Leffler 	else
107d347a0daSSam Leffler 		cg = dtog(fs, bpref);
108d347a0daSSam Leffler 	bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
109d347a0daSSam Leffler 	if (bno > 0) {
110d347a0daSSam Leffler 		if (ip->i_fs->fs_magic == FS_UFS1_MAGIC)
111d347a0daSSam Leffler 			ip->i_ffs1_blocks += size / DEV_BSIZE;
112d347a0daSSam Leffler 		else
113d347a0daSSam Leffler 			ip->i_ffs2_blocks += size / DEV_BSIZE;
114d347a0daSSam Leffler 		*bnp = bno;
115d347a0daSSam Leffler 		return (0);
116d347a0daSSam Leffler 	}
117d347a0daSSam Leffler nospace:
118d347a0daSSam Leffler 	return (ENOSPC);
119d347a0daSSam Leffler }
120d347a0daSSam Leffler 
121d347a0daSSam Leffler /*
122d347a0daSSam Leffler  * Select the desired position for the next block in a file.  The file is
123d347a0daSSam Leffler  * logically divided into sections. The first section is composed of the
124d347a0daSSam Leffler  * direct blocks. Each additional section contains fs_maxbpg blocks.
125d347a0daSSam Leffler  *
126d347a0daSSam Leffler  * If no blocks have been allocated in the first section, the policy is to
127d347a0daSSam Leffler  * request a block in the same cylinder group as the inode that describes
128d347a0daSSam Leffler  * the file. If no blocks have been allocated in any other section, the
129d347a0daSSam Leffler  * policy is to place the section in a cylinder group with a greater than
130d347a0daSSam Leffler  * average number of free blocks.  An appropriate cylinder group is found
131d347a0daSSam Leffler  * by using a rotor that sweeps the cylinder groups. When a new group of
132d347a0daSSam Leffler  * blocks is needed, the sweep begins in the cylinder group following the
133d347a0daSSam Leffler  * cylinder group from which the previous allocation was made. The sweep
134d347a0daSSam Leffler  * continues until a cylinder group with greater than the average number
135d347a0daSSam Leffler  * of free blocks is found. If the allocation is for the first block in an
136d347a0daSSam Leffler  * indirect block, the information on the previous allocation is unavailable;
137d347a0daSSam Leffler  * here a best guess is made based upon the logical block number being
138d347a0daSSam Leffler  * allocated.
139d347a0daSSam Leffler  *
140d347a0daSSam Leffler  * If a section is already partially allocated, the policy is to
141d347a0daSSam Leffler  * contiguously allocate fs_maxcontig blocks.  The end of one of these
142d347a0daSSam Leffler  * contiguous blocks and the beginning of the next is physically separated
143d347a0daSSam Leffler  * so that the disk head will be in transit between them for at least
144d347a0daSSam Leffler  * fs_rotdelay milliseconds.  This is to allow time for the processor to
145d347a0daSSam Leffler  * schedule another I/O transfer.
146d347a0daSSam Leffler  */
147d347a0daSSam Leffler /* XXX ondisk32 */
148d347a0daSSam Leffler daddr_t
ffs_blkpref_ufs1(struct inode * ip,daddr_t lbn,int indx,int32_t * bap)149d347a0daSSam Leffler ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
150d347a0daSSam Leffler {
151d347a0daSSam Leffler 	struct fs *fs;
1523afe6a68SEd Maste 	u_int cg, startcg;
1533afe6a68SEd Maste 	int avgbfree;
154d347a0daSSam Leffler 
155d347a0daSSam Leffler 	fs = ip->i_fs;
156d347a0daSSam Leffler 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1571dc349abSEd Maste 		if (lbn < UFS_NDADDR + NINDIR(fs)) {
158d347a0daSSam Leffler 			cg = ino_to_cg(fs, ip->i_number);
159d347a0daSSam Leffler 			return (fs->fs_fpg * cg + fs->fs_frag);
160d347a0daSSam Leffler 		}
161d347a0daSSam Leffler 		/*
162d347a0daSSam Leffler 		 * Find a cylinder with greater than average number of
163d347a0daSSam Leffler 		 * unused data blocks.
164d347a0daSSam Leffler 		 */
165d347a0daSSam Leffler 		if (indx == 0 || bap[indx - 1] == 0)
166d347a0daSSam Leffler 			startcg =
167d347a0daSSam Leffler 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
168d347a0daSSam Leffler 		else
169d347a0daSSam Leffler 			startcg = dtog(fs,
170d347a0daSSam Leffler 				ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
171d347a0daSSam Leffler 		startcg %= fs->fs_ncg;
172d347a0daSSam Leffler 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
173d347a0daSSam Leffler 		for (cg = startcg; cg < fs->fs_ncg; cg++)
174d347a0daSSam Leffler 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
175d347a0daSSam Leffler 				return (fs->fs_fpg * cg + fs->fs_frag);
176d347a0daSSam Leffler 		for (cg = 0; cg <= startcg; cg++)
177d347a0daSSam Leffler 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
178d347a0daSSam Leffler 				return (fs->fs_fpg * cg + fs->fs_frag);
179d347a0daSSam Leffler 		return (0);
180d347a0daSSam Leffler 	}
181d347a0daSSam Leffler 	/*
182d347a0daSSam Leffler 	 * We just always try to lay things out contiguously.
183d347a0daSSam Leffler 	 */
184d347a0daSSam Leffler 	return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
185d347a0daSSam Leffler }
186d347a0daSSam Leffler 
187d347a0daSSam Leffler daddr_t
ffs_blkpref_ufs2(struct inode * ip,daddr_t lbn,int indx,int64_t * bap)18801a0f853SOlivier Houchard ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap)
189d347a0daSSam Leffler {
190d347a0daSSam Leffler 	struct fs *fs;
1913afe6a68SEd Maste 	u_int cg, startcg;
1923afe6a68SEd Maste 	int avgbfree;
193d347a0daSSam Leffler 
194d347a0daSSam Leffler 	fs = ip->i_fs;
195d347a0daSSam Leffler 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1961dc349abSEd Maste 		if (lbn < UFS_NDADDR + NINDIR(fs)) {
197d347a0daSSam Leffler 			cg = ino_to_cg(fs, ip->i_number);
198d347a0daSSam Leffler 			return (fs->fs_fpg * cg + fs->fs_frag);
199d347a0daSSam Leffler 		}
200d347a0daSSam Leffler 		/*
201d347a0daSSam Leffler 		 * Find a cylinder with greater than average number of
202d347a0daSSam Leffler 		 * unused data blocks.
203d347a0daSSam Leffler 		 */
204d347a0daSSam Leffler 		if (indx == 0 || bap[indx - 1] == 0)
205d347a0daSSam Leffler 			startcg =
206d347a0daSSam Leffler 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
207d347a0daSSam Leffler 		else
208d347a0daSSam Leffler 			startcg = dtog(fs,
209d347a0daSSam Leffler 				ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
210d347a0daSSam Leffler 		startcg %= fs->fs_ncg;
211d347a0daSSam Leffler 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
212d347a0daSSam Leffler 		for (cg = startcg; cg < fs->fs_ncg; cg++)
213d347a0daSSam Leffler 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
214d347a0daSSam Leffler 				return (fs->fs_fpg * cg + fs->fs_frag);
215d347a0daSSam Leffler 			}
216d347a0daSSam Leffler 		for (cg = 0; cg < startcg; cg++)
217d347a0daSSam Leffler 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
218d347a0daSSam Leffler 				return (fs->fs_fpg * cg + fs->fs_frag);
219d347a0daSSam Leffler 			}
220d347a0daSSam Leffler 		return (0);
221d347a0daSSam Leffler 	}
222d347a0daSSam Leffler 	/*
223d347a0daSSam Leffler 	 * We just always try to lay things out contiguously.
224d347a0daSSam Leffler 	 */
225d347a0daSSam Leffler 	return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
226d347a0daSSam Leffler }
227d347a0daSSam Leffler 
228d347a0daSSam Leffler /*
229d347a0daSSam Leffler  * Implement the cylinder overflow algorithm.
230d347a0daSSam Leffler  *
231d347a0daSSam Leffler  * The policy implemented by this algorithm is:
232d347a0daSSam Leffler  *   1) allocate the block in its requested cylinder group.
233*164fa411SGordon Bergling  *   2) quadratically rehash on the cylinder group number.
234d347a0daSSam Leffler  *   3) brute force search for a free block.
235d347a0daSSam Leffler  *
236d347a0daSSam Leffler  * `size':	size for data blocks, mode for inodes
237d347a0daSSam Leffler  */
238d347a0daSSam Leffler /*VARARGS5*/
239d347a0daSSam Leffler static daddr_t
ffs_hashalloc(struct inode * ip,u_int cg,daddr_t pref,int size,daddr_t (* allocator)(struct inode *,int,daddr_t,int))2403afe6a68SEd Maste ffs_hashalloc(struct inode *ip, u_int cg, daddr_t pref, int size,
241d347a0daSSam Leffler     daddr_t (*allocator)(struct inode *, int, daddr_t, int))
242d347a0daSSam Leffler {
243d347a0daSSam Leffler 	struct fs *fs;
244d347a0daSSam Leffler 	daddr_t result;
2453afe6a68SEd Maste 	u_int i, icg = cg;
246d347a0daSSam Leffler 
247d347a0daSSam Leffler 	fs = ip->i_fs;
248d347a0daSSam Leffler 	/*
249d347a0daSSam Leffler 	 * 1: preferred cylinder group
250d347a0daSSam Leffler 	 */
251d347a0daSSam Leffler 	result = (*allocator)(ip, cg, pref, size);
252d347a0daSSam Leffler 	if (result)
253d347a0daSSam Leffler 		return (result);
254d347a0daSSam Leffler 	/*
255d347a0daSSam Leffler 	 * 2: quadratic rehash
256d347a0daSSam Leffler 	 */
257d347a0daSSam Leffler 	for (i = 1; i < fs->fs_ncg; i *= 2) {
258d347a0daSSam Leffler 		cg += i;
259d347a0daSSam Leffler 		if (cg >= fs->fs_ncg)
260d347a0daSSam Leffler 			cg -= fs->fs_ncg;
261d347a0daSSam Leffler 		result = (*allocator)(ip, cg, 0, size);
262d347a0daSSam Leffler 		if (result)
263d347a0daSSam Leffler 			return (result);
264d347a0daSSam Leffler 	}
265d347a0daSSam Leffler 	/*
266d347a0daSSam Leffler 	 * 3: brute force search
267d347a0daSSam Leffler 	 * Note that we start at i == 2, since 0 was checked initially,
268d347a0daSSam Leffler 	 * and 1 is always checked in the quadratic rehash.
269d347a0daSSam Leffler 	 */
270d347a0daSSam Leffler 	cg = (icg + 2) % fs->fs_ncg;
271d347a0daSSam Leffler 	for (i = 2; i < fs->fs_ncg; i++) {
272d347a0daSSam Leffler 		result = (*allocator)(ip, cg, 0, size);
273d347a0daSSam Leffler 		if (result)
274d347a0daSSam Leffler 			return (result);
275d347a0daSSam Leffler 		cg++;
276d347a0daSSam Leffler 		if (cg == fs->fs_ncg)
277d347a0daSSam Leffler 			cg = 0;
278d347a0daSSam Leffler 	}
279d347a0daSSam Leffler 	return (0);
280d347a0daSSam Leffler }
281d347a0daSSam Leffler 
282d347a0daSSam Leffler /*
283d347a0daSSam Leffler  * Determine whether a block can be allocated.
284d347a0daSSam Leffler  *
285d347a0daSSam Leffler  * Check to see if a block of the appropriate size is available,
286d347a0daSSam Leffler  * and if it is, allocate it.
287d347a0daSSam Leffler  */
288d347a0daSSam Leffler static daddr_t
ffs_alloccg(struct inode * ip,int cg,daddr_t bpref,int size)289d347a0daSSam Leffler ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
290d347a0daSSam Leffler {
291d347a0daSSam Leffler 	struct cg *cgp;
292d485c77fSKonstantin Belousov 	struct m_buf *bp;
293d347a0daSSam Leffler 	daddr_t bno, blkno;
294d347a0daSSam Leffler 	int error, frags, allocsiz, i;
295d347a0daSSam Leffler 	struct fs *fs = ip->i_fs;
296d347a0daSSam Leffler 	const int needswap = UFS_FSNEEDSWAP(fs);
297d347a0daSSam Leffler 
298d347a0daSSam Leffler 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
299d347a0daSSam Leffler 		return (0);
300d485c77fSKonstantin Belousov 	error = bread((void *)ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
301d485c77fSKonstantin Belousov 	    (int)fs->fs_cgsize, NULL, &bp);
302d347a0daSSam Leffler 	if (error) {
303d347a0daSSam Leffler 		return (0);
304d347a0daSSam Leffler 	}
305d347a0daSSam Leffler 	cgp = (struct cg *)bp->b_data;
306d347a0daSSam Leffler 	if (!cg_chkmagic_swap(cgp, needswap) ||
307d347a0daSSam Leffler 	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
3085b292f9aSEd Maste 		brelse(bp);
309d347a0daSSam Leffler 		return (0);
310d347a0daSSam Leffler 	}
311d347a0daSSam Leffler 	if (size == fs->fs_bsize) {
312d347a0daSSam Leffler 		bno = ffs_alloccgblk(ip, bp, bpref);
313d347a0daSSam Leffler 		bdwrite(bp);
314d347a0daSSam Leffler 		return (bno);
315d347a0daSSam Leffler 	}
316d347a0daSSam Leffler 	/*
317d347a0daSSam Leffler 	 * check to see if any fragments are already available
318d347a0daSSam Leffler 	 * allocsiz is the size which will be allocated, hacking
319d347a0daSSam Leffler 	 * it down to a smaller size if necessary
320d347a0daSSam Leffler 	 */
321d347a0daSSam Leffler 	frags = numfrags(fs, size);
322d347a0daSSam Leffler 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
323d347a0daSSam Leffler 		if (cgp->cg_frsum[allocsiz] != 0)
324d347a0daSSam Leffler 			break;
325d347a0daSSam Leffler 	if (allocsiz == fs->fs_frag) {
326d347a0daSSam Leffler 		/*
327d347a0daSSam Leffler 		 * no fragments were available, so a block will be
328d347a0daSSam Leffler 		 * allocated, and hacked up
329d347a0daSSam Leffler 		 */
330d347a0daSSam Leffler 		if (cgp->cg_cs.cs_nbfree == 0) {
3315b292f9aSEd Maste 			brelse(bp);
332d347a0daSSam Leffler 			return (0);
333d347a0daSSam Leffler 		}
334d347a0daSSam Leffler 		bno = ffs_alloccgblk(ip, bp, bpref);
335d347a0daSSam Leffler 		bpref = dtogd(fs, bno);
336d347a0daSSam Leffler 		for (i = frags; i < fs->fs_frag; i++)
337d347a0daSSam Leffler 			setbit(cg_blksfree_swap(cgp, needswap), bpref + i);
338d347a0daSSam Leffler 		i = fs->fs_frag - frags;
339d347a0daSSam Leffler 		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
340d347a0daSSam Leffler 		fs->fs_cstotal.cs_nffree += i;
341d347a0daSSam Leffler 		fs->fs_cs(fs, cg).cs_nffree += i;
342d347a0daSSam Leffler 		fs->fs_fmod = 1;
343d347a0daSSam Leffler 		ufs_add32(cgp->cg_frsum[i], 1, needswap);
344d347a0daSSam Leffler 		bdwrite(bp);
345d347a0daSSam Leffler 		return (bno);
346d347a0daSSam Leffler 	}
347d347a0daSSam Leffler 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
348d347a0daSSam Leffler 	for (i = 0; i < frags; i++)
349d347a0daSSam Leffler 		clrbit(cg_blksfree_swap(cgp, needswap), bno + i);
350d347a0daSSam Leffler 	ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
351d347a0daSSam Leffler 	fs->fs_cstotal.cs_nffree -= frags;
352d347a0daSSam Leffler 	fs->fs_cs(fs, cg).cs_nffree -= frags;
353d347a0daSSam Leffler 	fs->fs_fmod = 1;
354d347a0daSSam Leffler 	ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
355d347a0daSSam Leffler 	if (frags != allocsiz)
356d347a0daSSam Leffler 		ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
357d347a0daSSam Leffler 	blkno = cg * fs->fs_fpg + bno;
358d347a0daSSam Leffler 	bdwrite(bp);
359d347a0daSSam Leffler 	return blkno;
360d347a0daSSam Leffler }
361d347a0daSSam Leffler 
362d347a0daSSam Leffler /*
363d347a0daSSam Leffler  * Allocate a block in a cylinder group.
364d347a0daSSam Leffler  *
365d347a0daSSam Leffler  * This algorithm implements the following policy:
366d347a0daSSam Leffler  *   1) allocate the requested block.
367d347a0daSSam Leffler  *   2) allocate a rotationally optimal block in the same cylinder.
368d347a0daSSam Leffler  *   3) allocate the next available block on the block rotor for the
369d347a0daSSam Leffler  *      specified cylinder group.
370d347a0daSSam Leffler  * Note that this routine only allocates fs_bsize blocks; these
371d347a0daSSam Leffler  * blocks may be fragmented by the routine that allocates them.
372d347a0daSSam Leffler  */
373d347a0daSSam Leffler static daddr_t
ffs_alloccgblk(struct inode * ip,struct m_buf * bp,daddr_t bpref)374d485c77fSKonstantin Belousov ffs_alloccgblk(struct inode *ip, struct m_buf *bp, daddr_t bpref)
375d347a0daSSam Leffler {
376d347a0daSSam Leffler 	struct cg *cgp;
377d347a0daSSam Leffler 	daddr_t blkno;
378d347a0daSSam Leffler 	int32_t bno;
379d347a0daSSam Leffler 	struct fs *fs = ip->i_fs;
380d347a0daSSam Leffler 	const int needswap = UFS_FSNEEDSWAP(fs);
38101a0f853SOlivier Houchard 	u_int8_t *blksfree_swap;
382d347a0daSSam Leffler 
383d347a0daSSam Leffler 	cgp = (struct cg *)bp->b_data;
38401a0f853SOlivier Houchard 	blksfree_swap = cg_blksfree_swap(cgp, needswap);
38501a0f853SOlivier Houchard 	if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
386d347a0daSSam Leffler 		bpref = ufs_rw32(cgp->cg_rotor, needswap);
387d347a0daSSam Leffler 	} else {
388d347a0daSSam Leffler 		bpref = blknum(fs, bpref);
389d347a0daSSam Leffler 		bno = dtogd(fs, bpref);
390d347a0daSSam Leffler 		/*
391d347a0daSSam Leffler 		 * if the requested block is available, use it
392d347a0daSSam Leffler 		 */
39301a0f853SOlivier Houchard 		if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno)))
394d347a0daSSam Leffler 			goto gotit;
395d347a0daSSam Leffler 	}
396d347a0daSSam Leffler 	/*
397d347a0daSSam Leffler 	 * Take the next available one in this cylinder group.
398d347a0daSSam Leffler 	 */
399d347a0daSSam Leffler 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
400d347a0daSSam Leffler 	if (bno < 0)
401d347a0daSSam Leffler 		return (0);
402d347a0daSSam Leffler 	cgp->cg_rotor = ufs_rw32(bno, needswap);
403d347a0daSSam Leffler gotit:
404d347a0daSSam Leffler 	blkno = fragstoblks(fs, bno);
40501a0f853SOlivier Houchard 	ffs_clrblock(fs, blksfree_swap, (long)blkno);
406d347a0daSSam Leffler 	ffs_clusteracct(fs, cgp, blkno, -1);
407d347a0daSSam Leffler 	ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
408d347a0daSSam Leffler 	fs->fs_cstotal.cs_nbfree--;
409d347a0daSSam Leffler 	fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
410d347a0daSSam Leffler 	fs->fs_fmod = 1;
411d347a0daSSam Leffler 	blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
412d347a0daSSam Leffler 	return (blkno);
413d347a0daSSam Leffler }
414d347a0daSSam Leffler 
415d347a0daSSam Leffler /*
416d347a0daSSam Leffler  * Free a block or fragment.
417d347a0daSSam Leffler  *
418d347a0daSSam Leffler  * The specified block or fragment is placed back in the
419d347a0daSSam Leffler  * free map. If a fragment is deallocated, a possible
420d347a0daSSam Leffler  * block reassembly is checked.
421d347a0daSSam Leffler  */
422d347a0daSSam Leffler void
ffs_blkfree(struct inode * ip,daddr_t bno,long size)423d347a0daSSam Leffler ffs_blkfree(struct inode *ip, daddr_t bno, long size)
424d347a0daSSam Leffler {
425d347a0daSSam Leffler 	struct cg *cgp;
426d485c77fSKonstantin Belousov 	struct m_buf *bp;
427d347a0daSSam Leffler 	int32_t fragno, cgbno;
428d347a0daSSam Leffler 	int i, error, cg, blk, frags, bbase;
429d347a0daSSam Leffler 	struct fs *fs = ip->i_fs;
430d347a0daSSam Leffler 	const int needswap = UFS_FSNEEDSWAP(fs);
431d347a0daSSam Leffler 
43201a0f853SOlivier Houchard 	if (size > fs->fs_bsize || fragoff(fs, size) != 0 ||
433d347a0daSSam Leffler 	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
434d347a0daSSam Leffler 		errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
435d347a0daSSam Leffler 		    (long long)bno, fs->fs_bsize, size);
436d347a0daSSam Leffler 	}
437d347a0daSSam Leffler 	cg = dtog(fs, bno);
438d347a0daSSam Leffler 	if (bno >= fs->fs_size) {
439b424efd5SMatthew D Fleming 		warnx("bad block %lld, ino %ju", (long long)bno,
440b424efd5SMatthew D Fleming 		    (uintmax_t)ip->i_number);
441d347a0daSSam Leffler 		return;
442d347a0daSSam Leffler 	}
443d485c77fSKonstantin Belousov 	error = bread((void *)ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
444d485c77fSKonstantin Belousov 	    (int)fs->fs_cgsize, NULL, &bp);
445d347a0daSSam Leffler 	if (error) {
446d347a0daSSam Leffler 		return;
447d347a0daSSam Leffler 	}
448d347a0daSSam Leffler 	cgp = (struct cg *)bp->b_data;
449d347a0daSSam Leffler 	if (!cg_chkmagic_swap(cgp, needswap)) {
4505b292f9aSEd Maste 		brelse(bp);
451d347a0daSSam Leffler 		return;
452d347a0daSSam Leffler 	}
453d347a0daSSam Leffler 	cgbno = dtogd(fs, bno);
454d347a0daSSam Leffler 	if (size == fs->fs_bsize) {
455d347a0daSSam Leffler 		fragno = fragstoblks(fs, cgbno);
456d347a0daSSam Leffler 		if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
457d347a0daSSam Leffler 			errx(1, "blkfree: freeing free block %lld",
458d347a0daSSam Leffler 			    (long long)bno);
459d347a0daSSam Leffler 		}
460d347a0daSSam Leffler 		ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno);
461d347a0daSSam Leffler 		ffs_clusteracct(fs, cgp, fragno, 1);
462d347a0daSSam Leffler 		ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
463d347a0daSSam Leffler 		fs->fs_cstotal.cs_nbfree++;
464d347a0daSSam Leffler 		fs->fs_cs(fs, cg).cs_nbfree++;
465d347a0daSSam Leffler 	} else {
466d347a0daSSam Leffler 		bbase = cgbno - fragnum(fs, cgbno);
467d347a0daSSam Leffler 		/*
468d347a0daSSam Leffler 		 * decrement the counts associated with the old frags
469d347a0daSSam Leffler 		 */
470d347a0daSSam Leffler 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
471d347a0daSSam Leffler 		ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap);
472d347a0daSSam Leffler 		/*
473d347a0daSSam Leffler 		 * deallocate the fragment
474d347a0daSSam Leffler 		 */
475d347a0daSSam Leffler 		frags = numfrags(fs, size);
476d347a0daSSam Leffler 		for (i = 0; i < frags; i++) {
477d347a0daSSam Leffler 			if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) {
478d347a0daSSam Leffler 				errx(1, "blkfree: freeing free frag: block %lld",
479d347a0daSSam Leffler 				    (long long)(cgbno + i));
480d347a0daSSam Leffler 			}
481d347a0daSSam Leffler 			setbit(cg_blksfree_swap(cgp, needswap), cgbno + i);
482d347a0daSSam Leffler 		}
483d347a0daSSam Leffler 		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
484d347a0daSSam Leffler 		fs->fs_cstotal.cs_nffree += i;
485d347a0daSSam Leffler 		fs->fs_cs(fs, cg).cs_nffree += i;
486d347a0daSSam Leffler 		/*
487d347a0daSSam Leffler 		 * add back in counts associated with the new frags
488d347a0daSSam Leffler 		 */
489d347a0daSSam Leffler 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
490d347a0daSSam Leffler 		ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap);
491d347a0daSSam Leffler 		/*
492d347a0daSSam Leffler 		 * if a complete block has been reassembled, account for it
493d347a0daSSam Leffler 		 */
494d347a0daSSam Leffler 		fragno = fragstoblks(fs, bbase);
495d347a0daSSam Leffler 		if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
496d347a0daSSam Leffler 			ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
497d347a0daSSam Leffler 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
498d347a0daSSam Leffler 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
499d347a0daSSam Leffler 			ffs_clusteracct(fs, cgp, fragno, 1);
500d347a0daSSam Leffler 			ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
501d347a0daSSam Leffler 			fs->fs_cstotal.cs_nbfree++;
502d347a0daSSam Leffler 			fs->fs_cs(fs, cg).cs_nbfree++;
503d347a0daSSam Leffler 		}
504d347a0daSSam Leffler 	}
505d347a0daSSam Leffler 	fs->fs_fmod = 1;
506d347a0daSSam Leffler 	bdwrite(bp);
507d347a0daSSam Leffler }
508d347a0daSSam Leffler 
509d347a0daSSam Leffler 
510d347a0daSSam Leffler static int
scanc(u_int size,const u_char * cp,const u_char table[],int mask)511d347a0daSSam Leffler scanc(u_int size, const u_char *cp, const u_char table[], int mask)
512d347a0daSSam Leffler {
513d347a0daSSam Leffler 	const u_char *end = &cp[size];
514d347a0daSSam Leffler 
515d347a0daSSam Leffler 	while (cp < end && (table[*cp] & mask) == 0)
516d347a0daSSam Leffler 		cp++;
517d347a0daSSam Leffler 	return (end - cp);
518d347a0daSSam Leffler }
519d347a0daSSam Leffler 
520d347a0daSSam Leffler /*
521d347a0daSSam Leffler  * Find a block of the specified size in the specified cylinder group.
522d347a0daSSam Leffler  *
523d347a0daSSam Leffler  * It is a panic if a request is made to find a block if none are
524d347a0daSSam Leffler  * available.
525d347a0daSSam Leffler  */
526d347a0daSSam Leffler static int32_t
ffs_mapsearch(struct fs * fs,struct cg * cgp,daddr_t bpref,int allocsiz)527d347a0daSSam Leffler ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
528d347a0daSSam Leffler {
529d347a0daSSam Leffler 	int32_t bno;
530d347a0daSSam Leffler 	int start, len, loc, i;
531d347a0daSSam Leffler 	int blk, field, subfield, pos;
532d347a0daSSam Leffler 	int ostart, olen;
533d347a0daSSam Leffler 	const int needswap = UFS_FSNEEDSWAP(fs);
534d347a0daSSam Leffler 
535d347a0daSSam Leffler 	/*
536d347a0daSSam Leffler 	 * find the fragment by searching through the free block
537d347a0daSSam Leffler 	 * map for an appropriate bit pattern
538d347a0daSSam Leffler 	 */
539d347a0daSSam Leffler 	if (bpref)
540d347a0daSSam Leffler 		start = dtogd(fs, bpref) / NBBY;
541d347a0daSSam Leffler 	else
542d347a0daSSam Leffler 		start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
543d347a0daSSam Leffler 	len = howmany(fs->fs_fpg, NBBY) - start;
544d347a0daSSam Leffler 	ostart = start;
545d347a0daSSam Leffler 	olen = len;
546d347a0daSSam Leffler 	loc = scanc((u_int)len,
547d347a0daSSam Leffler 		(const u_char *)&cg_blksfree_swap(cgp, needswap)[start],
548d347a0daSSam Leffler 		(const u_char *)fragtbl[fs->fs_frag],
549d347a0daSSam Leffler 		(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
550d347a0daSSam Leffler 	if (loc == 0) {
551d347a0daSSam Leffler 		len = start + 1;
552d347a0daSSam Leffler 		start = 0;
553d347a0daSSam Leffler 		loc = scanc((u_int)len,
554d347a0daSSam Leffler 			(const u_char *)&cg_blksfree_swap(cgp, needswap)[0],
555d347a0daSSam Leffler 			(const u_char *)fragtbl[fs->fs_frag],
556d347a0daSSam Leffler 			(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
557d347a0daSSam Leffler 		if (loc == 0) {
558d347a0daSSam Leffler 			errx(1,
559d347a0daSSam Leffler     "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
560d347a0daSSam Leffler 				ostart, olen,
561d347a0daSSam Leffler 				ufs_rw32(cgp->cg_freeoff, needswap),
562d347a0daSSam Leffler 				(long)cg_blksfree_swap(cgp, needswap) - (long)cgp);
563d347a0daSSam Leffler 			/* NOTREACHED */
564d347a0daSSam Leffler 		}
565d347a0daSSam Leffler 	}
566d347a0daSSam Leffler 	bno = (start + len - loc) * NBBY;
567d347a0daSSam Leffler 	cgp->cg_frotor = ufs_rw32(bno, needswap);
568d347a0daSSam Leffler 	/*
569d347a0daSSam Leffler 	 * found the byte in the map
570d347a0daSSam Leffler 	 * sift through the bits to find the selected frag
571d347a0daSSam Leffler 	 */
572d347a0daSSam Leffler 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
573d347a0daSSam Leffler 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno);
574d347a0daSSam Leffler 		blk <<= 1;
575d347a0daSSam Leffler 		field = around[allocsiz];
576d347a0daSSam Leffler 		subfield = inside[allocsiz];
577d347a0daSSam Leffler 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
578d347a0daSSam Leffler 			if ((blk & field) == subfield)
579d347a0daSSam Leffler 				return (bno + pos);
580d347a0daSSam Leffler 			field <<= 1;
581d347a0daSSam Leffler 			subfield <<= 1;
582d347a0daSSam Leffler 		}
583d347a0daSSam Leffler 	}
584d347a0daSSam Leffler 	errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
585d347a0daSSam Leffler 	return (-1);
586d347a0daSSam Leffler }
587d347a0daSSam Leffler 
588d347a0daSSam Leffler /*
589d347a0daSSam Leffler  * Update the cluster map because of an allocation or free.
590d347a0daSSam Leffler  *
591d347a0daSSam Leffler  * Cnt == 1 means free; cnt == -1 means allocating.
592d347a0daSSam Leffler  */
593d347a0daSSam Leffler void
ffs_clusteracct(struct fs * fs,struct cg * cgp,int32_t blkno,int cnt)594d347a0daSSam Leffler ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt)
595d347a0daSSam Leffler {
596d347a0daSSam Leffler 	int32_t *sump;
597d347a0daSSam Leffler 	int32_t *lp;
598d347a0daSSam Leffler 	u_char *freemapp, *mapp;
599d347a0daSSam Leffler 	int i, start, end, forw, back, map, bit;
600d347a0daSSam Leffler 	const int needswap = UFS_FSNEEDSWAP(fs);
601d347a0daSSam Leffler 
602d347a0daSSam Leffler 	if (fs->fs_contigsumsize <= 0)
603d347a0daSSam Leffler 		return;
604d347a0daSSam Leffler 	freemapp = cg_clustersfree_swap(cgp, needswap);
605d347a0daSSam Leffler 	sump = cg_clustersum_swap(cgp, needswap);
606d347a0daSSam Leffler 	/*
607d347a0daSSam Leffler 	 * Allocate or clear the actual block.
608d347a0daSSam Leffler 	 */
609d347a0daSSam Leffler 	if (cnt > 0)
610d347a0daSSam Leffler 		setbit(freemapp, blkno);
611d347a0daSSam Leffler 	else
612d347a0daSSam Leffler 		clrbit(freemapp, blkno);
613d347a0daSSam Leffler 	/*
614d347a0daSSam Leffler 	 * Find the size of the cluster going forward.
615d347a0daSSam Leffler 	 */
616d347a0daSSam Leffler 	start = blkno + 1;
617d347a0daSSam Leffler 	end = start + fs->fs_contigsumsize;
61801a0f853SOlivier Houchard 	if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
619d347a0daSSam Leffler 		end = ufs_rw32(cgp->cg_nclusterblks, needswap);
620d347a0daSSam Leffler 	mapp = &freemapp[start / NBBY];
621d347a0daSSam Leffler 	map = *mapp++;
622d347a0daSSam Leffler 	bit = 1 << (start % NBBY);
623d347a0daSSam Leffler 	for (i = start; i < end; i++) {
624d347a0daSSam Leffler 		if ((map & bit) == 0)
625d347a0daSSam Leffler 			break;
626d347a0daSSam Leffler 		if ((i & (NBBY - 1)) != (NBBY - 1)) {
627d347a0daSSam Leffler 			bit <<= 1;
628d347a0daSSam Leffler 		} else {
629d347a0daSSam Leffler 			map = *mapp++;
630d347a0daSSam Leffler 			bit = 1;
631d347a0daSSam Leffler 		}
632d347a0daSSam Leffler 	}
633d347a0daSSam Leffler 	forw = i - start;
634d347a0daSSam Leffler 	/*
635d347a0daSSam Leffler 	 * Find the size of the cluster going backward.
636d347a0daSSam Leffler 	 */
637d347a0daSSam Leffler 	start = blkno - 1;
638d347a0daSSam Leffler 	end = start - fs->fs_contigsumsize;
639d347a0daSSam Leffler 	if (end < 0)
640d347a0daSSam Leffler 		end = -1;
641d347a0daSSam Leffler 	mapp = &freemapp[start / NBBY];
642d347a0daSSam Leffler 	map = *mapp--;
643d347a0daSSam Leffler 	bit = 1 << (start % NBBY);
644d347a0daSSam Leffler 	for (i = start; i > end; i--) {
645d347a0daSSam Leffler 		if ((map & bit) == 0)
646d347a0daSSam Leffler 			break;
647d347a0daSSam Leffler 		if ((i & (NBBY - 1)) != 0) {
648d347a0daSSam Leffler 			bit >>= 1;
649d347a0daSSam Leffler 		} else {
650d347a0daSSam Leffler 			map = *mapp--;
651d347a0daSSam Leffler 			bit = 1 << (NBBY - 1);
652d347a0daSSam Leffler 		}
653d347a0daSSam Leffler 	}
654d347a0daSSam Leffler 	back = start - i;
655d347a0daSSam Leffler 	/*
656d347a0daSSam Leffler 	 * Account for old cluster and the possibly new forward and
657d347a0daSSam Leffler 	 * back clusters.
658d347a0daSSam Leffler 	 */
659d347a0daSSam Leffler 	i = back + forw + 1;
660d347a0daSSam Leffler 	if (i > fs->fs_contigsumsize)
661d347a0daSSam Leffler 		i = fs->fs_contigsumsize;
662d347a0daSSam Leffler 	ufs_add32(sump[i], cnt, needswap);
663d347a0daSSam Leffler 	if (back > 0)
664d347a0daSSam Leffler 		ufs_add32(sump[back], -cnt, needswap);
665d347a0daSSam Leffler 	if (forw > 0)
666d347a0daSSam Leffler 		ufs_add32(sump[forw], -cnt, needswap);
667d347a0daSSam Leffler 
668d347a0daSSam Leffler 	/*
669d347a0daSSam Leffler 	 * Update cluster summary information.
670d347a0daSSam Leffler 	 */
671d347a0daSSam Leffler 	lp = &sump[fs->fs_contigsumsize];
672d347a0daSSam Leffler 	for (i = fs->fs_contigsumsize; i > 0; i--)
673d347a0daSSam Leffler 		if (ufs_rw32(*lp--, needswap) > 0)
674d347a0daSSam Leffler 			break;
675d347a0daSSam Leffler 	fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;
676d347a0daSSam Leffler }
677