1 /* $NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $ */ 2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */ 3 4 /*- 5 * SPDX-License-Identifier: BSD-3-Clause 6 * 7 * Copyright (c) 2002 Networks Associates Technology, Inc. 8 * All rights reserved. 9 * 10 * This software was developed for the FreeBSD Project by Marshall 11 * Kirk McKusick and Network Associates Laboratories, the Security 12 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 13 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 14 * research program 15 * 16 * Copyright (c) 1982, 1986, 1989, 1993 17 * The Regents of the University of California. All rights reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 3. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/time.h> 51 52 #include <errno.h> 53 #include <stdint.h> 54 55 #include "makefs.h" 56 57 #include <ufs/ufs/dinode.h> 58 #include <ufs/ffs/fs.h> 59 60 #include "ffs/ufs_bswap.h" 61 #include "ffs/buf.h" 62 #include "ffs/ufs_inode.h" 63 #include "ffs/ffs_extern.h" 64 65 static int scanc(u_int, const u_char *, const u_char *, int); 66 67 static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int); 68 static daddr_t ffs_alloccgblk(struct inode *, struct m_buf *, daddr_t); 69 static daddr_t ffs_hashalloc(struct inode *, u_int, daddr_t, int, 70 daddr_t (*)(struct inode *, int, daddr_t, int)); 71 static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int); 72 73 /* 74 * Allocate a block in the file system. 75 * 76 * The size of the requested block is given, which must be some 77 * multiple of fs_fsize and <= fs_bsize. 78 * A preference may be optionally specified. If a preference is given 79 * the following hierarchy is used to allocate a block: 80 * 1) allocate the requested block. 81 * 2) allocate a rotationally optimal block in the same cylinder. 82 * 3) allocate a block in the same cylinder group. 83 * 4) quadratically rehash into other cylinder groups, until an 84 * available block is located. 85 * If no block preference is given the following hierarchy is used 86 * to allocate a block: 87 * 1) allocate a block in the cylinder group that contains the 88 * inode for the file. 89 * 2) quadratically rehash into other cylinder groups, until an 90 * available block is located. 91 */ 92 int 93 ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size, 94 daddr_t *bnp) 95 { 96 struct fs *fs = ip->i_fs; 97 daddr_t bno; 98 int cg; 99 100 *bnp = 0; 101 if (size > fs->fs_bsize || fragoff(fs, size) != 0) { 102 errx(1, "ffs_alloc: bad size: bsize %d size %d", 103 fs->fs_bsize, size); 104 } 105 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 106 goto nospace; 107 if (bpref >= fs->fs_size) 108 bpref = 0; 109 if (bpref == 0) 110 cg = ino_to_cg(fs, ip->i_number); 111 else 112 cg = dtog(fs, bpref); 113 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg); 114 if (bno > 0) { 115 if (ip->i_fs->fs_magic == FS_UFS1_MAGIC) 116 ip->i_ffs1_blocks += size / DEV_BSIZE; 117 else 118 ip->i_ffs2_blocks += size / DEV_BSIZE; 119 *bnp = bno; 120 return (0); 121 } 122 nospace: 123 return (ENOSPC); 124 } 125 126 /* 127 * Select the desired position for the next block in a file. The file is 128 * logically divided into sections. The first section is composed of the 129 * direct blocks. Each additional section contains fs_maxbpg blocks. 130 * 131 * If no blocks have been allocated in the first section, the policy is to 132 * request a block in the same cylinder group as the inode that describes 133 * the file. If no blocks have been allocated in any other section, the 134 * policy is to place the section in a cylinder group with a greater than 135 * average number of free blocks. An appropriate cylinder group is found 136 * by using a rotor that sweeps the cylinder groups. When a new group of 137 * blocks is needed, the sweep begins in the cylinder group following the 138 * cylinder group from which the previous allocation was made. The sweep 139 * continues until a cylinder group with greater than the average number 140 * of free blocks is found. If the allocation is for the first block in an 141 * indirect block, the information on the previous allocation is unavailable; 142 * here a best guess is made based upon the logical block number being 143 * allocated. 144 * 145 * If a section is already partially allocated, the policy is to 146 * contiguously allocate fs_maxcontig blocks. The end of one of these 147 * contiguous blocks and the beginning of the next is physically separated 148 * so that the disk head will be in transit between them for at least 149 * fs_rotdelay milliseconds. This is to allow time for the processor to 150 * schedule another I/O transfer. 151 */ 152 /* XXX ondisk32 */ 153 daddr_t 154 ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap) 155 { 156 struct fs *fs; 157 u_int cg, startcg; 158 int avgbfree; 159 160 fs = ip->i_fs; 161 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 162 if (lbn < UFS_NDADDR + NINDIR(fs)) { 163 cg = ino_to_cg(fs, ip->i_number); 164 return (fs->fs_fpg * cg + fs->fs_frag); 165 } 166 /* 167 * Find a cylinder with greater than average number of 168 * unused data blocks. 169 */ 170 if (indx == 0 || bap[indx - 1] == 0) 171 startcg = 172 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 173 else 174 startcg = dtog(fs, 175 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 176 startcg %= fs->fs_ncg; 177 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 178 for (cg = startcg; cg < fs->fs_ncg; cg++) 179 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) 180 return (fs->fs_fpg * cg + fs->fs_frag); 181 for (cg = 0; cg <= startcg; cg++) 182 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) 183 return (fs->fs_fpg * cg + fs->fs_frag); 184 return (0); 185 } 186 /* 187 * We just always try to lay things out contiguously. 188 */ 189 return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 190 } 191 192 daddr_t 193 ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap) 194 { 195 struct fs *fs; 196 u_int cg, startcg; 197 int avgbfree; 198 199 fs = ip->i_fs; 200 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 201 if (lbn < UFS_NDADDR + NINDIR(fs)) { 202 cg = ino_to_cg(fs, ip->i_number); 203 return (fs->fs_fpg * cg + fs->fs_frag); 204 } 205 /* 206 * Find a cylinder with greater than average number of 207 * unused data blocks. 208 */ 209 if (indx == 0 || bap[indx - 1] == 0) 210 startcg = 211 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 212 else 213 startcg = dtog(fs, 214 ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 215 startcg %= fs->fs_ncg; 216 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 217 for (cg = startcg; cg < fs->fs_ncg; cg++) 218 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 219 return (fs->fs_fpg * cg + fs->fs_frag); 220 } 221 for (cg = 0; cg < startcg; cg++) 222 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 223 return (fs->fs_fpg * cg + fs->fs_frag); 224 } 225 return (0); 226 } 227 /* 228 * We just always try to lay things out contiguously. 229 */ 230 return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 231 } 232 233 /* 234 * Implement the cylinder overflow algorithm. 235 * 236 * The policy implemented by this algorithm is: 237 * 1) allocate the block in its requested cylinder group. 238 * 2) quadratically rehash on the cylinder group number. 239 * 3) brute force search for a free block. 240 * 241 * `size': size for data blocks, mode for inodes 242 */ 243 /*VARARGS5*/ 244 static daddr_t 245 ffs_hashalloc(struct inode *ip, u_int cg, daddr_t pref, int size, 246 daddr_t (*allocator)(struct inode *, int, daddr_t, int)) 247 { 248 struct fs *fs; 249 daddr_t result; 250 u_int i, icg = cg; 251 252 fs = ip->i_fs; 253 /* 254 * 1: preferred cylinder group 255 */ 256 result = (*allocator)(ip, cg, pref, size); 257 if (result) 258 return (result); 259 /* 260 * 2: quadratic rehash 261 */ 262 for (i = 1; i < fs->fs_ncg; i *= 2) { 263 cg += i; 264 if (cg >= fs->fs_ncg) 265 cg -= fs->fs_ncg; 266 result = (*allocator)(ip, cg, 0, size); 267 if (result) 268 return (result); 269 } 270 /* 271 * 3: brute force search 272 * Note that we start at i == 2, since 0 was checked initially, 273 * and 1 is always checked in the quadratic rehash. 274 */ 275 cg = (icg + 2) % fs->fs_ncg; 276 for (i = 2; i < fs->fs_ncg; i++) { 277 result = (*allocator)(ip, cg, 0, size); 278 if (result) 279 return (result); 280 cg++; 281 if (cg == fs->fs_ncg) 282 cg = 0; 283 } 284 return (0); 285 } 286 287 /* 288 * Determine whether a block can be allocated. 289 * 290 * Check to see if a block of the appropriate size is available, 291 * and if it is, allocate it. 292 */ 293 static daddr_t 294 ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) 295 { 296 struct cg *cgp; 297 struct m_buf *bp; 298 daddr_t bno, blkno; 299 int error, frags, allocsiz, i; 300 struct fs *fs = ip->i_fs; 301 const int needswap = UFS_FSNEEDSWAP(fs); 302 303 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 304 return (0); 305 error = bread((void *)ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 306 (int)fs->fs_cgsize, NULL, &bp); 307 if (error) { 308 return (0); 309 } 310 cgp = (struct cg *)bp->b_data; 311 if (!cg_chkmagic_swap(cgp, needswap) || 312 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 313 brelse(bp); 314 return (0); 315 } 316 if (size == fs->fs_bsize) { 317 bno = ffs_alloccgblk(ip, bp, bpref); 318 bdwrite(bp); 319 return (bno); 320 } 321 /* 322 * check to see if any fragments are already available 323 * allocsiz is the size which will be allocated, hacking 324 * it down to a smaller size if necessary 325 */ 326 frags = numfrags(fs, size); 327 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 328 if (cgp->cg_frsum[allocsiz] != 0) 329 break; 330 if (allocsiz == fs->fs_frag) { 331 /* 332 * no fragments were available, so a block will be 333 * allocated, and hacked up 334 */ 335 if (cgp->cg_cs.cs_nbfree == 0) { 336 brelse(bp); 337 return (0); 338 } 339 bno = ffs_alloccgblk(ip, bp, bpref); 340 bpref = dtogd(fs, bno); 341 for (i = frags; i < fs->fs_frag; i++) 342 setbit(cg_blksfree_swap(cgp, needswap), bpref + i); 343 i = fs->fs_frag - frags; 344 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 345 fs->fs_cstotal.cs_nffree += i; 346 fs->fs_cs(fs, cg).cs_nffree += i; 347 fs->fs_fmod = 1; 348 ufs_add32(cgp->cg_frsum[i], 1, needswap); 349 bdwrite(bp); 350 return (bno); 351 } 352 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 353 for (i = 0; i < frags; i++) 354 clrbit(cg_blksfree_swap(cgp, needswap), bno + i); 355 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap); 356 fs->fs_cstotal.cs_nffree -= frags; 357 fs->fs_cs(fs, cg).cs_nffree -= frags; 358 fs->fs_fmod = 1; 359 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap); 360 if (frags != allocsiz) 361 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap); 362 blkno = cg * fs->fs_fpg + bno; 363 bdwrite(bp); 364 return blkno; 365 } 366 367 /* 368 * Allocate a block in a cylinder group. 369 * 370 * This algorithm implements the following policy: 371 * 1) allocate the requested block. 372 * 2) allocate a rotationally optimal block in the same cylinder. 373 * 3) allocate the next available block on the block rotor for the 374 * specified cylinder group. 375 * Note that this routine only allocates fs_bsize blocks; these 376 * blocks may be fragmented by the routine that allocates them. 377 */ 378 static daddr_t 379 ffs_alloccgblk(struct inode *ip, struct m_buf *bp, daddr_t bpref) 380 { 381 struct cg *cgp; 382 daddr_t blkno; 383 int32_t bno; 384 struct fs *fs = ip->i_fs; 385 const int needswap = UFS_FSNEEDSWAP(fs); 386 u_int8_t *blksfree_swap; 387 388 cgp = (struct cg *)bp->b_data; 389 blksfree_swap = cg_blksfree_swap(cgp, needswap); 390 if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) { 391 bpref = ufs_rw32(cgp->cg_rotor, needswap); 392 } else { 393 bpref = blknum(fs, bpref); 394 bno = dtogd(fs, bpref); 395 /* 396 * if the requested block is available, use it 397 */ 398 if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno))) 399 goto gotit; 400 } 401 /* 402 * Take the next available one in this cylinder group. 403 */ 404 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 405 if (bno < 0) 406 return (0); 407 cgp->cg_rotor = ufs_rw32(bno, needswap); 408 gotit: 409 blkno = fragstoblks(fs, bno); 410 ffs_clrblock(fs, blksfree_swap, (long)blkno); 411 ffs_clusteracct(fs, cgp, blkno, -1); 412 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap); 413 fs->fs_cstotal.cs_nbfree--; 414 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--; 415 fs->fs_fmod = 1; 416 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno; 417 return (blkno); 418 } 419 420 /* 421 * Free a block or fragment. 422 * 423 * The specified block or fragment is placed back in the 424 * free map. If a fragment is deallocated, a possible 425 * block reassembly is checked. 426 */ 427 void 428 ffs_blkfree(struct inode *ip, daddr_t bno, long size) 429 { 430 struct cg *cgp; 431 struct m_buf *bp; 432 int32_t fragno, cgbno; 433 int i, error, cg, blk, frags, bbase; 434 struct fs *fs = ip->i_fs; 435 const int needswap = UFS_FSNEEDSWAP(fs); 436 437 if (size > fs->fs_bsize || fragoff(fs, size) != 0 || 438 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 439 errx(1, "blkfree: bad size: bno %lld bsize %d size %ld", 440 (long long)bno, fs->fs_bsize, size); 441 } 442 cg = dtog(fs, bno); 443 if (bno >= fs->fs_size) { 444 warnx("bad block %lld, ino %ju", (long long)bno, 445 (uintmax_t)ip->i_number); 446 return; 447 } 448 error = bread((void *)ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 449 (int)fs->fs_cgsize, NULL, &bp); 450 if (error) { 451 return; 452 } 453 cgp = (struct cg *)bp->b_data; 454 if (!cg_chkmagic_swap(cgp, needswap)) { 455 brelse(bp); 456 return; 457 } 458 cgbno = dtogd(fs, bno); 459 if (size == fs->fs_bsize) { 460 fragno = fragstoblks(fs, cgbno); 461 if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) { 462 errx(1, "blkfree: freeing free block %lld", 463 (long long)bno); 464 } 465 ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno); 466 ffs_clusteracct(fs, cgp, fragno, 1); 467 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 468 fs->fs_cstotal.cs_nbfree++; 469 fs->fs_cs(fs, cg).cs_nbfree++; 470 } else { 471 bbase = cgbno - fragnum(fs, cgbno); 472 /* 473 * decrement the counts associated with the old frags 474 */ 475 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase); 476 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap); 477 /* 478 * deallocate the fragment 479 */ 480 frags = numfrags(fs, size); 481 for (i = 0; i < frags; i++) { 482 if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) { 483 errx(1, "blkfree: freeing free frag: block %lld", 484 (long long)(cgbno + i)); 485 } 486 setbit(cg_blksfree_swap(cgp, needswap), cgbno + i); 487 } 488 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 489 fs->fs_cstotal.cs_nffree += i; 490 fs->fs_cs(fs, cg).cs_nffree += i; 491 /* 492 * add back in counts associated with the new frags 493 */ 494 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase); 495 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap); 496 /* 497 * if a complete block has been reassembled, account for it 498 */ 499 fragno = fragstoblks(fs, bbase); 500 if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) { 501 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); 502 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 503 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 504 ffs_clusteracct(fs, cgp, fragno, 1); 505 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 506 fs->fs_cstotal.cs_nbfree++; 507 fs->fs_cs(fs, cg).cs_nbfree++; 508 } 509 } 510 fs->fs_fmod = 1; 511 bdwrite(bp); 512 } 513 514 515 static int 516 scanc(u_int size, const u_char *cp, const u_char table[], int mask) 517 { 518 const u_char *end = &cp[size]; 519 520 while (cp < end && (table[*cp] & mask) == 0) 521 cp++; 522 return (end - cp); 523 } 524 525 /* 526 * Find a block of the specified size in the specified cylinder group. 527 * 528 * It is a panic if a request is made to find a block if none are 529 * available. 530 */ 531 static int32_t 532 ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz) 533 { 534 int32_t bno; 535 int start, len, loc, i; 536 int blk, field, subfield, pos; 537 int ostart, olen; 538 const int needswap = UFS_FSNEEDSWAP(fs); 539 540 /* 541 * find the fragment by searching through the free block 542 * map for an appropriate bit pattern 543 */ 544 if (bpref) 545 start = dtogd(fs, bpref) / NBBY; 546 else 547 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY; 548 len = howmany(fs->fs_fpg, NBBY) - start; 549 ostart = start; 550 olen = len; 551 loc = scanc((u_int)len, 552 (const u_char *)&cg_blksfree_swap(cgp, needswap)[start], 553 (const u_char *)fragtbl[fs->fs_frag], 554 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 555 if (loc == 0) { 556 len = start + 1; 557 start = 0; 558 loc = scanc((u_int)len, 559 (const u_char *)&cg_blksfree_swap(cgp, needswap)[0], 560 (const u_char *)fragtbl[fs->fs_frag], 561 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 562 if (loc == 0) { 563 errx(1, 564 "ffs_alloccg: map corrupted: start %d len %d offset %d %ld", 565 ostart, olen, 566 ufs_rw32(cgp->cg_freeoff, needswap), 567 (long)cg_blksfree_swap(cgp, needswap) - (long)cgp); 568 /* NOTREACHED */ 569 } 570 } 571 bno = (start + len - loc) * NBBY; 572 cgp->cg_frotor = ufs_rw32(bno, needswap); 573 /* 574 * found the byte in the map 575 * sift through the bits to find the selected frag 576 */ 577 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 578 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno); 579 blk <<= 1; 580 field = around[allocsiz]; 581 subfield = inside[allocsiz]; 582 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 583 if ((blk & field) == subfield) 584 return (bno + pos); 585 field <<= 1; 586 subfield <<= 1; 587 } 588 } 589 errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno); 590 return (-1); 591 } 592 593 /* 594 * Update the cluster map because of an allocation or free. 595 * 596 * Cnt == 1 means free; cnt == -1 means allocating. 597 */ 598 void 599 ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt) 600 { 601 int32_t *sump; 602 int32_t *lp; 603 u_char *freemapp, *mapp; 604 int i, start, end, forw, back, map, bit; 605 const int needswap = UFS_FSNEEDSWAP(fs); 606 607 if (fs->fs_contigsumsize <= 0) 608 return; 609 freemapp = cg_clustersfree_swap(cgp, needswap); 610 sump = cg_clustersum_swap(cgp, needswap); 611 /* 612 * Allocate or clear the actual block. 613 */ 614 if (cnt > 0) 615 setbit(freemapp, blkno); 616 else 617 clrbit(freemapp, blkno); 618 /* 619 * Find the size of the cluster going forward. 620 */ 621 start = blkno + 1; 622 end = start + fs->fs_contigsumsize; 623 if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap)) 624 end = ufs_rw32(cgp->cg_nclusterblks, needswap); 625 mapp = &freemapp[start / NBBY]; 626 map = *mapp++; 627 bit = 1 << (start % NBBY); 628 for (i = start; i < end; i++) { 629 if ((map & bit) == 0) 630 break; 631 if ((i & (NBBY - 1)) != (NBBY - 1)) { 632 bit <<= 1; 633 } else { 634 map = *mapp++; 635 bit = 1; 636 } 637 } 638 forw = i - start; 639 /* 640 * Find the size of the cluster going backward. 641 */ 642 start = blkno - 1; 643 end = start - fs->fs_contigsumsize; 644 if (end < 0) 645 end = -1; 646 mapp = &freemapp[start / NBBY]; 647 map = *mapp--; 648 bit = 1 << (start % NBBY); 649 for (i = start; i > end; i--) { 650 if ((map & bit) == 0) 651 break; 652 if ((i & (NBBY - 1)) != 0) { 653 bit >>= 1; 654 } else { 655 map = *mapp--; 656 bit = 1 << (NBBY - 1); 657 } 658 } 659 back = start - i; 660 /* 661 * Account for old cluster and the possibly new forward and 662 * back clusters. 663 */ 664 i = back + forw + 1; 665 if (i > fs->fs_contigsumsize) 666 i = fs->fs_contigsumsize; 667 ufs_add32(sump[i], cnt, needswap); 668 if (back > 0) 669 ufs_add32(sump[back], -cnt, needswap); 670 if (forw > 0) 671 ufs_add32(sump[forw], -cnt, needswap); 672 673 /* 674 * Update cluster summary information. 675 */ 676 lp = &sump[fs->fs_contigsumsize]; 677 for (i = fs->fs_contigsumsize; i > 0; i--) 678 if (ufs_rw32(*lp--, needswap) > 0) 679 break; 680 fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i; 681 } 682