1 /* $NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $ */ 2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */ 3 4 /*- 5 * SPDX-License-Identifier: BSD-3-Clause 6 * 7 * Copyright (c) 2002 Networks Associates Technology, Inc. 8 * All rights reserved. 9 * 10 * This software was developed for the FreeBSD Project by Marshall 11 * Kirk McKusick and Network Associates Laboratories, the Security 12 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 13 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 14 * research program 15 * 16 * Copyright (c) 1982, 1986, 1989, 1993 17 * The Regents of the University of California. All rights reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 3. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/time.h> 51 52 #include <errno.h> 53 #include <stdint.h> 54 55 #include "makefs.h" 56 57 #include <ufs/ufs/dinode.h> 58 #include <ufs/ffs/fs.h> 59 60 #include "ffs/ufs_bswap.h" 61 #include "ffs/buf.h" 62 #include "ffs/ufs_inode.h" 63 #include "ffs/ffs_extern.h" 64 65 static int scanc(u_int, const u_char *, const u_char *, int); 66 67 static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int); 68 static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t); 69 static daddr_t ffs_hashalloc(struct inode *, u_int, daddr_t, int, 70 daddr_t (*)(struct inode *, int, daddr_t, int)); 71 static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int); 72 73 /* 74 * Allocate a block in the file system. 75 * 76 * The size of the requested block is given, which must be some 77 * multiple of fs_fsize and <= fs_bsize. 78 * A preference may be optionally specified. If a preference is given 79 * the following hierarchy is used to allocate a block: 80 * 1) allocate the requested block. 81 * 2) allocate a rotationally optimal block in the same cylinder. 82 * 3) allocate a block in the same cylinder group. 83 * 4) quadradically rehash into other cylinder groups, until an 84 * available block is located. 85 * If no block preference is given the following hierarchy is used 86 * to allocate a block: 87 * 1) allocate a block in the cylinder group that contains the 88 * inode for the file. 89 * 2) quadradically rehash into other cylinder groups, until an 90 * available block is located. 91 */ 92 int 93 ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size, 94 daddr_t *bnp) 95 { 96 struct fs *fs = ip->i_fs; 97 daddr_t bno; 98 int cg; 99 100 *bnp = 0; 101 if (size > fs->fs_bsize || fragoff(fs, size) != 0) { 102 errx(1, "ffs_alloc: bad size: bsize %d size %d", 103 fs->fs_bsize, size); 104 } 105 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 106 goto nospace; 107 if (bpref >= fs->fs_size) 108 bpref = 0; 109 if (bpref == 0) 110 cg = ino_to_cg(fs, ip->i_number); 111 else 112 cg = dtog(fs, bpref); 113 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg); 114 if (bno > 0) { 115 if (ip->i_fs->fs_magic == FS_UFS1_MAGIC) 116 ip->i_ffs1_blocks += size / DEV_BSIZE; 117 else 118 ip->i_ffs2_blocks += size / DEV_BSIZE; 119 *bnp = bno; 120 return (0); 121 } 122 nospace: 123 return (ENOSPC); 124 } 125 126 /* 127 * Select the desired position for the next block in a file. The file is 128 * logically divided into sections. The first section is composed of the 129 * direct blocks. Each additional section contains fs_maxbpg blocks. 130 * 131 * If no blocks have been allocated in the first section, the policy is to 132 * request a block in the same cylinder group as the inode that describes 133 * the file. If no blocks have been allocated in any other section, the 134 * policy is to place the section in a cylinder group with a greater than 135 * average number of free blocks. An appropriate cylinder group is found 136 * by using a rotor that sweeps the cylinder groups. When a new group of 137 * blocks is needed, the sweep begins in the cylinder group following the 138 * cylinder group from which the previous allocation was made. The sweep 139 * continues until a cylinder group with greater than the average number 140 * of free blocks is found. If the allocation is for the first block in an 141 * indirect block, the information on the previous allocation is unavailable; 142 * here a best guess is made based upon the logical block number being 143 * allocated. 144 * 145 * If a section is already partially allocated, the policy is to 146 * contiguously allocate fs_maxcontig blocks. The end of one of these 147 * contiguous blocks and the beginning of the next is physically separated 148 * so that the disk head will be in transit between them for at least 149 * fs_rotdelay milliseconds. This is to allow time for the processor to 150 * schedule another I/O transfer. 151 */ 152 /* XXX ondisk32 */ 153 daddr_t 154 ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap) 155 { 156 struct fs *fs; 157 u_int cg, startcg; 158 int avgbfree; 159 160 fs = ip->i_fs; 161 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 162 if (lbn < UFS_NDADDR + NINDIR(fs)) { 163 cg = ino_to_cg(fs, ip->i_number); 164 return (fs->fs_fpg * cg + fs->fs_frag); 165 } 166 /* 167 * Find a cylinder with greater than average number of 168 * unused data blocks. 169 */ 170 if (indx == 0 || bap[indx - 1] == 0) 171 startcg = 172 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 173 else 174 startcg = dtog(fs, 175 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 176 startcg %= fs->fs_ncg; 177 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 178 for (cg = startcg; cg < fs->fs_ncg; cg++) 179 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) 180 return (fs->fs_fpg * cg + fs->fs_frag); 181 for (cg = 0; cg <= startcg; cg++) 182 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) 183 return (fs->fs_fpg * cg + fs->fs_frag); 184 return (0); 185 } 186 /* 187 * We just always try to lay things out contiguously. 188 */ 189 return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 190 } 191 192 daddr_t 193 ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap) 194 { 195 struct fs *fs; 196 u_int cg, startcg; 197 int avgbfree; 198 199 fs = ip->i_fs; 200 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 201 if (lbn < UFS_NDADDR + NINDIR(fs)) { 202 cg = ino_to_cg(fs, ip->i_number); 203 return (fs->fs_fpg * cg + fs->fs_frag); 204 } 205 /* 206 * Find a cylinder with greater than average number of 207 * unused data blocks. 208 */ 209 if (indx == 0 || bap[indx - 1] == 0) 210 startcg = 211 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 212 else 213 startcg = dtog(fs, 214 ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 215 startcg %= fs->fs_ncg; 216 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 217 for (cg = startcg; cg < fs->fs_ncg; cg++) 218 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 219 return (fs->fs_fpg * cg + fs->fs_frag); 220 } 221 for (cg = 0; cg < startcg; cg++) 222 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 223 return (fs->fs_fpg * cg + fs->fs_frag); 224 } 225 return (0); 226 } 227 /* 228 * We just always try to lay things out contiguously. 229 */ 230 return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 231 } 232 233 /* 234 * Implement the cylinder overflow algorithm. 235 * 236 * The policy implemented by this algorithm is: 237 * 1) allocate the block in its requested cylinder group. 238 * 2) quadradically rehash on the cylinder group number. 239 * 3) brute force search for a free block. 240 * 241 * `size': size for data blocks, mode for inodes 242 */ 243 /*VARARGS5*/ 244 static daddr_t 245 ffs_hashalloc(struct inode *ip, u_int cg, daddr_t pref, int size, 246 daddr_t (*allocator)(struct inode *, int, daddr_t, int)) 247 { 248 struct fs *fs; 249 daddr_t result; 250 u_int i, icg = cg; 251 252 fs = ip->i_fs; 253 /* 254 * 1: preferred cylinder group 255 */ 256 result = (*allocator)(ip, cg, pref, size); 257 if (result) 258 return (result); 259 /* 260 * 2: quadratic rehash 261 */ 262 for (i = 1; i < fs->fs_ncg; i *= 2) { 263 cg += i; 264 if (cg >= fs->fs_ncg) 265 cg -= fs->fs_ncg; 266 result = (*allocator)(ip, cg, 0, size); 267 if (result) 268 return (result); 269 } 270 /* 271 * 3: brute force search 272 * Note that we start at i == 2, since 0 was checked initially, 273 * and 1 is always checked in the quadratic rehash. 274 */ 275 cg = (icg + 2) % fs->fs_ncg; 276 for (i = 2; i < fs->fs_ncg; i++) { 277 result = (*allocator)(ip, cg, 0, size); 278 if (result) 279 return (result); 280 cg++; 281 if (cg == fs->fs_ncg) 282 cg = 0; 283 } 284 return (0); 285 } 286 287 /* 288 * Determine whether a block can be allocated. 289 * 290 * Check to see if a block of the appropriate size is available, 291 * and if it is, allocate it. 292 */ 293 static daddr_t 294 ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) 295 { 296 struct cg *cgp; 297 struct buf *bp; 298 daddr_t bno, blkno; 299 int error, frags, allocsiz, i; 300 struct fs *fs = ip->i_fs; 301 const int needswap = UFS_FSNEEDSWAP(fs); 302 303 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 304 return (0); 305 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 306 NULL, &bp); 307 if (error) { 308 brelse(bp, 0); 309 return (0); 310 } 311 cgp = (struct cg *)bp->b_data; 312 if (!cg_chkmagic_swap(cgp, needswap) || 313 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 314 brelse(bp, 0); 315 return (0); 316 } 317 if (size == fs->fs_bsize) { 318 bno = ffs_alloccgblk(ip, bp, bpref); 319 bdwrite(bp); 320 return (bno); 321 } 322 /* 323 * check to see if any fragments are already available 324 * allocsiz is the size which will be allocated, hacking 325 * it down to a smaller size if necessary 326 */ 327 frags = numfrags(fs, size); 328 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 329 if (cgp->cg_frsum[allocsiz] != 0) 330 break; 331 if (allocsiz == fs->fs_frag) { 332 /* 333 * no fragments were available, so a block will be 334 * allocated, and hacked up 335 */ 336 if (cgp->cg_cs.cs_nbfree == 0) { 337 brelse(bp, 0); 338 return (0); 339 } 340 bno = ffs_alloccgblk(ip, bp, bpref); 341 bpref = dtogd(fs, bno); 342 for (i = frags; i < fs->fs_frag; i++) 343 setbit(cg_blksfree_swap(cgp, needswap), bpref + i); 344 i = fs->fs_frag - frags; 345 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 346 fs->fs_cstotal.cs_nffree += i; 347 fs->fs_cs(fs, cg).cs_nffree += i; 348 fs->fs_fmod = 1; 349 ufs_add32(cgp->cg_frsum[i], 1, needswap); 350 bdwrite(bp); 351 return (bno); 352 } 353 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 354 for (i = 0; i < frags; i++) 355 clrbit(cg_blksfree_swap(cgp, needswap), bno + i); 356 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap); 357 fs->fs_cstotal.cs_nffree -= frags; 358 fs->fs_cs(fs, cg).cs_nffree -= frags; 359 fs->fs_fmod = 1; 360 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap); 361 if (frags != allocsiz) 362 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap); 363 blkno = cg * fs->fs_fpg + bno; 364 bdwrite(bp); 365 return blkno; 366 } 367 368 /* 369 * Allocate a block in a cylinder group. 370 * 371 * This algorithm implements the following policy: 372 * 1) allocate the requested block. 373 * 2) allocate a rotationally optimal block in the same cylinder. 374 * 3) allocate the next available block on the block rotor for the 375 * specified cylinder group. 376 * Note that this routine only allocates fs_bsize blocks; these 377 * blocks may be fragmented by the routine that allocates them. 378 */ 379 static daddr_t 380 ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref) 381 { 382 struct cg *cgp; 383 daddr_t blkno; 384 int32_t bno; 385 struct fs *fs = ip->i_fs; 386 const int needswap = UFS_FSNEEDSWAP(fs); 387 u_int8_t *blksfree_swap; 388 389 cgp = (struct cg *)bp->b_data; 390 blksfree_swap = cg_blksfree_swap(cgp, needswap); 391 if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) { 392 bpref = ufs_rw32(cgp->cg_rotor, needswap); 393 } else { 394 bpref = blknum(fs, bpref); 395 bno = dtogd(fs, bpref); 396 /* 397 * if the requested block is available, use it 398 */ 399 if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno))) 400 goto gotit; 401 } 402 /* 403 * Take the next available one in this cylinder group. 404 */ 405 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 406 if (bno < 0) 407 return (0); 408 cgp->cg_rotor = ufs_rw32(bno, needswap); 409 gotit: 410 blkno = fragstoblks(fs, bno); 411 ffs_clrblock(fs, blksfree_swap, (long)blkno); 412 ffs_clusteracct(fs, cgp, blkno, -1); 413 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap); 414 fs->fs_cstotal.cs_nbfree--; 415 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--; 416 fs->fs_fmod = 1; 417 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno; 418 return (blkno); 419 } 420 421 /* 422 * Free a block or fragment. 423 * 424 * The specified block or fragment is placed back in the 425 * free map. If a fragment is deallocated, a possible 426 * block reassembly is checked. 427 */ 428 void 429 ffs_blkfree(struct inode *ip, daddr_t bno, long size) 430 { 431 struct cg *cgp; 432 struct buf *bp; 433 int32_t fragno, cgbno; 434 int i, error, cg, blk, frags, bbase; 435 struct fs *fs = ip->i_fs; 436 const int needswap = UFS_FSNEEDSWAP(fs); 437 438 if (size > fs->fs_bsize || fragoff(fs, size) != 0 || 439 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 440 errx(1, "blkfree: bad size: bno %lld bsize %d size %ld", 441 (long long)bno, fs->fs_bsize, size); 442 } 443 cg = dtog(fs, bno); 444 if (bno >= fs->fs_size) { 445 warnx("bad block %lld, ino %ju", (long long)bno, 446 (uintmax_t)ip->i_number); 447 return; 448 } 449 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 450 NULL, &bp); 451 if (error) { 452 brelse(bp, 0); 453 return; 454 } 455 cgp = (struct cg *)bp->b_data; 456 if (!cg_chkmagic_swap(cgp, needswap)) { 457 brelse(bp, 0); 458 return; 459 } 460 cgbno = dtogd(fs, bno); 461 if (size == fs->fs_bsize) { 462 fragno = fragstoblks(fs, cgbno); 463 if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) { 464 errx(1, "blkfree: freeing free block %lld", 465 (long long)bno); 466 } 467 ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno); 468 ffs_clusteracct(fs, cgp, fragno, 1); 469 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 470 fs->fs_cstotal.cs_nbfree++; 471 fs->fs_cs(fs, cg).cs_nbfree++; 472 } else { 473 bbase = cgbno - fragnum(fs, cgbno); 474 /* 475 * decrement the counts associated with the old frags 476 */ 477 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase); 478 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap); 479 /* 480 * deallocate the fragment 481 */ 482 frags = numfrags(fs, size); 483 for (i = 0; i < frags; i++) { 484 if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) { 485 errx(1, "blkfree: freeing free frag: block %lld", 486 (long long)(cgbno + i)); 487 } 488 setbit(cg_blksfree_swap(cgp, needswap), cgbno + i); 489 } 490 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 491 fs->fs_cstotal.cs_nffree += i; 492 fs->fs_cs(fs, cg).cs_nffree += i; 493 /* 494 * add back in counts associated with the new frags 495 */ 496 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase); 497 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap); 498 /* 499 * if a complete block has been reassembled, account for it 500 */ 501 fragno = fragstoblks(fs, bbase); 502 if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) { 503 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); 504 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 505 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 506 ffs_clusteracct(fs, cgp, fragno, 1); 507 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 508 fs->fs_cstotal.cs_nbfree++; 509 fs->fs_cs(fs, cg).cs_nbfree++; 510 } 511 } 512 fs->fs_fmod = 1; 513 bdwrite(bp); 514 } 515 516 517 static int 518 scanc(u_int size, const u_char *cp, const u_char table[], int mask) 519 { 520 const u_char *end = &cp[size]; 521 522 while (cp < end && (table[*cp] & mask) == 0) 523 cp++; 524 return (end - cp); 525 } 526 527 /* 528 * Find a block of the specified size in the specified cylinder group. 529 * 530 * It is a panic if a request is made to find a block if none are 531 * available. 532 */ 533 static int32_t 534 ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz) 535 { 536 int32_t bno; 537 int start, len, loc, i; 538 int blk, field, subfield, pos; 539 int ostart, olen; 540 const int needswap = UFS_FSNEEDSWAP(fs); 541 542 /* 543 * find the fragment by searching through the free block 544 * map for an appropriate bit pattern 545 */ 546 if (bpref) 547 start = dtogd(fs, bpref) / NBBY; 548 else 549 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY; 550 len = howmany(fs->fs_fpg, NBBY) - start; 551 ostart = start; 552 olen = len; 553 loc = scanc((u_int)len, 554 (const u_char *)&cg_blksfree_swap(cgp, needswap)[start], 555 (const u_char *)fragtbl[fs->fs_frag], 556 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 557 if (loc == 0) { 558 len = start + 1; 559 start = 0; 560 loc = scanc((u_int)len, 561 (const u_char *)&cg_blksfree_swap(cgp, needswap)[0], 562 (const u_char *)fragtbl[fs->fs_frag], 563 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 564 if (loc == 0) { 565 errx(1, 566 "ffs_alloccg: map corrupted: start %d len %d offset %d %ld", 567 ostart, olen, 568 ufs_rw32(cgp->cg_freeoff, needswap), 569 (long)cg_blksfree_swap(cgp, needswap) - (long)cgp); 570 /* NOTREACHED */ 571 } 572 } 573 bno = (start + len - loc) * NBBY; 574 cgp->cg_frotor = ufs_rw32(bno, needswap); 575 /* 576 * found the byte in the map 577 * sift through the bits to find the selected frag 578 */ 579 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 580 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno); 581 blk <<= 1; 582 field = around[allocsiz]; 583 subfield = inside[allocsiz]; 584 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 585 if ((blk & field) == subfield) 586 return (bno + pos); 587 field <<= 1; 588 subfield <<= 1; 589 } 590 } 591 errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno); 592 return (-1); 593 } 594 595 /* 596 * Update the cluster map because of an allocation or free. 597 * 598 * Cnt == 1 means free; cnt == -1 means allocating. 599 */ 600 void 601 ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt) 602 { 603 int32_t *sump; 604 int32_t *lp; 605 u_char *freemapp, *mapp; 606 int i, start, end, forw, back, map, bit; 607 const int needswap = UFS_FSNEEDSWAP(fs); 608 609 if (fs->fs_contigsumsize <= 0) 610 return; 611 freemapp = cg_clustersfree_swap(cgp, needswap); 612 sump = cg_clustersum_swap(cgp, needswap); 613 /* 614 * Allocate or clear the actual block. 615 */ 616 if (cnt > 0) 617 setbit(freemapp, blkno); 618 else 619 clrbit(freemapp, blkno); 620 /* 621 * Find the size of the cluster going forward. 622 */ 623 start = blkno + 1; 624 end = start + fs->fs_contigsumsize; 625 if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap)) 626 end = ufs_rw32(cgp->cg_nclusterblks, needswap); 627 mapp = &freemapp[start / NBBY]; 628 map = *mapp++; 629 bit = 1 << (start % NBBY); 630 for (i = start; i < end; i++) { 631 if ((map & bit) == 0) 632 break; 633 if ((i & (NBBY - 1)) != (NBBY - 1)) { 634 bit <<= 1; 635 } else { 636 map = *mapp++; 637 bit = 1; 638 } 639 } 640 forw = i - start; 641 /* 642 * Find the size of the cluster going backward. 643 */ 644 start = blkno - 1; 645 end = start - fs->fs_contigsumsize; 646 if (end < 0) 647 end = -1; 648 mapp = &freemapp[start / NBBY]; 649 map = *mapp--; 650 bit = 1 << (start % NBBY); 651 for (i = start; i > end; i--) { 652 if ((map & bit) == 0) 653 break; 654 if ((i & (NBBY - 1)) != 0) { 655 bit >>= 1; 656 } else { 657 map = *mapp--; 658 bit = 1 << (NBBY - 1); 659 } 660 } 661 back = start - i; 662 /* 663 * Account for old cluster and the possibly new forward and 664 * back clusters. 665 */ 666 i = back + forw + 1; 667 if (i > fs->fs_contigsumsize) 668 i = fs->fs_contigsumsize; 669 ufs_add32(sump[i], cnt, needswap); 670 if (back > 0) 671 ufs_add32(sump[back], -cnt, needswap); 672 if (forw > 0) 673 ufs_add32(sump[forw], -cnt, needswap); 674 675 /* 676 * Update cluster summary information. 677 */ 678 lp = &sump[fs->fs_contigsumsize]; 679 for (i = fs->fs_contigsumsize; i > 0; i--) 680 if (ufs_rw32(*lp--, needswap) > 0) 681 break; 682 fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i; 683 } 684