1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94 34 * $Id: ffs_alloc.c,v 1.16 1995/08/25 19:40:26 bde Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/buf.h> 40 #include <sys/proc.h> 41 #include <sys/vnode.h> 42 #include <sys/mount.h> 43 #include <sys/kernel.h> 44 #include <sys/syslog.h> 45 46 #include <vm/vm.h> 47 48 #include <ufs/ufs/quota.h> 49 #include <ufs/ufs/inode.h> 50 #include <ufs/ufs/ufs_extern.h> /* YF - needed for ufs_getlbns() */ 51 52 #include <ufs/ffs/fs.h> 53 #include <ufs/ffs/ffs_extern.h> 54 55 extern u_long nextgennumber; 56 57 static daddr_t ffs_alloccg __P((struct inode *, int, daddr_t, int)); 58 static daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, daddr_t)); 59 static daddr_t ffs_clusteralloc __P((struct inode *, int, daddr_t, int)); 60 static ino_t ffs_dirpref __P((struct fs *)); 61 static daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 62 static void ffs_fserr __P((struct fs *, u_int, char *)); 63 static u_long ffs_hashalloc 64 __P((struct inode *, int, long, int, u_long (*)())); 65 static ino_t ffs_nodealloccg __P((struct inode *, int, daddr_t, int)); 66 static daddr_t ffs_mapsearch __P((struct fs *, struct cg *, daddr_t, int)); 67 68 void ffs_clusteracct __P((struct fs *, struct cg *, daddr_t, int)); 69 70 /* 71 * Allocate a block in the file system. 72 * 73 * The size of the requested block is given, which must be some 74 * multiple of fs_fsize and <= fs_bsize. 75 * A preference may be optionally specified. If a preference is given 76 * the following hierarchy is used to allocate a block: 77 * 1) allocate the requested block. 78 * 2) allocate a rotationally optimal block in the same cylinder. 79 * 3) allocate a block in the same cylinder group. 80 * 4) quadradically rehash into other cylinder groups, until an 81 * available block is located. 82 * If no block preference is given the following heirarchy is used 83 * to allocate a block: 84 * 1) allocate a block in the cylinder group that contains the 85 * inode for the file. 86 * 2) quadradically rehash into other cylinder groups, until an 87 * available block is located. 88 */ 89 int 90 ffs_alloc(ip, lbn, bpref, size, cred, bnp) 91 register struct inode *ip; 92 daddr_t lbn, bpref; 93 int size; 94 struct ucred *cred; 95 daddr_t *bnp; 96 { 97 register struct fs *fs; 98 daddr_t bno; 99 int cg; 100 #ifdef QUOTA 101 int error; 102 #endif 103 104 105 *bnp = 0; 106 fs = ip->i_fs; 107 #ifdef DIAGNOSTIC 108 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 109 printf("dev = 0x%lx, bsize = %ld, size = %d, fs = %s\n", 110 (u_long)ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 111 panic("ffs_alloc: bad size"); 112 } 113 if (cred == NOCRED) 114 panic("ffs_alloc: missing credential"); 115 #endif /* DIAGNOSTIC */ 116 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 117 goto nospace; 118 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 119 goto nospace; 120 #ifdef QUOTA 121 error = chkdq(ip, (long)btodb(size), cred, 0); 122 if (error) 123 return (error); 124 #endif 125 if (bpref >= fs->fs_size) 126 bpref = 0; 127 if (bpref == 0) 128 cg = ino_to_cg(fs, ip->i_number); 129 else 130 cg = dtog(fs, bpref); 131 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 132 (u_long (*)())ffs_alloccg); 133 if (bno > 0) { 134 ip->i_blocks += btodb(size); 135 ip->i_flag |= IN_CHANGE | IN_UPDATE; 136 *bnp = bno; 137 return (0); 138 } 139 #ifdef QUOTA 140 /* 141 * Restore user's disk quota because allocation failed. 142 */ 143 (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 144 #endif 145 nospace: 146 ffs_fserr(fs, cred->cr_uid, "file system full"); 147 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 148 return (ENOSPC); 149 } 150 151 /* 152 * Reallocate a fragment to a bigger size 153 * 154 * The number and size of the old block is given, and a preference 155 * and new size is also specified. The allocator attempts to extend 156 * the original block. Failing that, the regular block allocator is 157 * invoked to get an appropriate block. 158 */ 159 int 160 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) 161 register struct inode *ip; 162 daddr_t lbprev; 163 daddr_t bpref; 164 int osize, nsize; 165 struct ucred *cred; 166 struct buf **bpp; 167 { 168 register struct fs *fs; 169 struct buf *bp; 170 int cg, request, error; 171 daddr_t bprev, bno; 172 173 *bpp = 0; 174 fs = ip->i_fs; 175 #ifdef DIAGNOSTIC 176 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 177 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 178 printf( 179 "dev = 0x%lx, bsize = %ld, osize = %d, " 180 "nsize = %d, fs = %s\n", 181 (u_long)ip->i_dev, fs->fs_bsize, osize, 182 nsize, fs->fs_fsmnt); 183 panic("ffs_realloccg: bad size"); 184 } 185 if (cred == NOCRED) 186 panic("ffs_realloccg: missing credential"); 187 #endif /* DIAGNOSTIC */ 188 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 189 goto nospace; 190 if ((bprev = ip->i_db[lbprev]) == 0) { 191 printf("dev = 0x%lx, bsize = %ld, bprev = %ld, fs = %s\n", 192 (u_long) ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 193 panic("ffs_realloccg: bad bprev"); 194 } 195 /* 196 * Allocate the extra space in the buffer. 197 */ 198 error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp); 199 if (error) { 200 brelse(bp); 201 return (error); 202 } 203 204 if( bp->b_blkno == bp->b_lblkno) { 205 if( lbprev >= NDADDR) 206 panic("ffs_realloccg: lbprev out of range"); 207 bp->b_blkno = fsbtodb(fs, bprev); 208 } 209 210 #ifdef QUOTA 211 error = chkdq(ip, (long)btodb(nsize - osize), cred, 0); 212 if (error) { 213 brelse(bp); 214 return (error); 215 } 216 #endif 217 /* 218 * Check for extension in the existing location. 219 */ 220 cg = dtog(fs, bprev); 221 bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize); 222 if (bno) { 223 if (bp->b_blkno != fsbtodb(fs, bno)) 224 panic("bad blockno"); 225 ip->i_blocks += btodb(nsize - osize); 226 ip->i_flag |= IN_CHANGE | IN_UPDATE; 227 allocbuf(bp, nsize); 228 bp->b_flags |= B_DONE; 229 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 230 *bpp = bp; 231 return (0); 232 } 233 /* 234 * Allocate a new disk location. 235 */ 236 if (bpref >= fs->fs_size) 237 bpref = 0; 238 switch ((int)fs->fs_optim) { 239 case FS_OPTSPACE: 240 /* 241 * Allocate an exact sized fragment. Although this makes 242 * best use of space, we will waste time relocating it if 243 * the file continues to grow. If the fragmentation is 244 * less than half of the minimum free reserve, we choose 245 * to begin optimizing for time. 246 */ 247 request = nsize; 248 if (fs->fs_minfree <= 5 || 249 fs->fs_cstotal.cs_nffree > 250 fs->fs_dsize * fs->fs_minfree / (2 * 100)) 251 break; 252 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 253 fs->fs_fsmnt); 254 fs->fs_optim = FS_OPTTIME; 255 break; 256 case FS_OPTTIME: 257 /* 258 * At this point we have discovered a file that is trying to 259 * grow a small fragment to a larger fragment. To save time, 260 * we allocate a full sized block, then free the unused portion. 261 * If the file continues to grow, the `ffs_fragextend' call 262 * above will be able to grow it in place without further 263 * copying. If aberrant programs cause disk fragmentation to 264 * grow within 2% of the free reserve, we choose to begin 265 * optimizing for space. 266 */ 267 request = fs->fs_bsize; 268 if (fs->fs_cstotal.cs_nffree < 269 fs->fs_dsize * (fs->fs_minfree - 2) / 100) 270 break; 271 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 272 fs->fs_fsmnt); 273 fs->fs_optim = FS_OPTSPACE; 274 break; 275 default: 276 printf("dev = 0x%lx, optim = %ld, fs = %s\n", 277 (u_long)ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 278 panic("ffs_realloccg: bad optim"); 279 /* NOTREACHED */ 280 } 281 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 282 (u_long (*)())ffs_alloccg); 283 if (bno > 0) { 284 bp->b_blkno = fsbtodb(fs, bno); 285 ffs_blkfree(ip, bprev, (long)osize); 286 if (nsize < request) 287 ffs_blkfree(ip, bno + numfrags(fs, nsize), 288 (long)(request - nsize)); 289 ip->i_blocks += btodb(nsize - osize); 290 ip->i_flag |= IN_CHANGE | IN_UPDATE; 291 allocbuf(bp, nsize); 292 bp->b_flags |= B_DONE; 293 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 294 *bpp = bp; 295 return (0); 296 } 297 #ifdef QUOTA 298 /* 299 * Restore user's disk quota because allocation failed. 300 */ 301 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 302 #endif 303 brelse(bp); 304 nospace: 305 /* 306 * no space available 307 */ 308 ffs_fserr(fs, cred->cr_uid, "file system full"); 309 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 310 return (ENOSPC); 311 } 312 313 /* 314 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 315 * 316 * The vnode and an array of buffer pointers for a range of sequential 317 * logical blocks to be made contiguous is given. The allocator attempts 318 * to find a range of sequential blocks starting as close as possible to 319 * an fs_rotdelay offset from the end of the allocation for the logical 320 * block immediately preceeding the current range. If successful, the 321 * physical block numbers in the buffer pointers and in the inode are 322 * changed to reflect the new allocation. If unsuccessful, the allocation 323 * is left unchanged. The success in doing the reallocation is returned. 324 * Note that the error return is not reflected back to the user. Rather 325 * the previous block allocation will be used. 326 */ 327 #include <sys/sysctl.h> 328 int doasyncfree = 1; 329 #ifdef DEBUG 330 struct ctldebug debug14 = { "doasyncfree", &doasyncfree }; 331 #endif 332 int 333 ffs_reallocblks(ap) 334 struct vop_reallocblks_args /* { 335 struct vnode *a_vp; 336 struct cluster_save *a_buflist; 337 } */ *ap; 338 { 339 struct fs *fs; 340 struct inode *ip; 341 struct vnode *vp; 342 struct buf *sbp, *ebp; 343 daddr_t *bap, *sbap, *ebap = 0; 344 struct cluster_save *buflist; 345 daddr_t start_lbn, end_lbn, soff, newblk, blkno; 346 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 347 int i, len, start_lvl, end_lvl, pref, ssize; 348 struct timeval tv; 349 350 vp = ap->a_vp; 351 ip = VTOI(vp); 352 fs = ip->i_fs; 353 if (fs->fs_contigsumsize <= 0) 354 return (ENOSPC); 355 buflist = ap->a_buflist; 356 len = buflist->bs_nchildren; 357 start_lbn = buflist->bs_children[0]->b_lblkno; 358 end_lbn = start_lbn + len - 1; 359 #ifdef DIAGNOSTIC 360 for (i = 1; i < len; i++) 361 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 362 panic("ffs_reallocblks: non-cluster"); 363 #endif 364 /* 365 * If the latest allocation is in a new cylinder group, assume that 366 * the filesystem has decided to move and do not force it back to 367 * the previous cylinder group. 368 */ 369 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 370 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 371 return (ENOSPC); 372 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 373 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 374 return (ENOSPC); 375 /* 376 * Get the starting offset and block map for the first block. 377 */ 378 if (start_lvl == 0) { 379 sbap = &ip->i_db[0]; 380 soff = start_lbn; 381 } else { 382 idp = &start_ap[start_lvl - 1]; 383 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 384 brelse(sbp); 385 return (ENOSPC); 386 } 387 sbap = (daddr_t *)sbp->b_data; 388 soff = idp->in_off; 389 } 390 /* 391 * Find the preferred location for the cluster. 392 */ 393 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 394 /* 395 * If the block range spans two block maps, get the second map. 396 */ 397 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 398 ssize = len; 399 } else { 400 #ifdef DIAGNOSTIC 401 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 402 panic("ffs_reallocblk: start == end"); 403 #endif 404 ssize = len - (idp->in_off + 1); 405 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 406 goto fail; 407 ebap = (daddr_t *)ebp->b_data; 408 } 409 /* 410 * Search the block map looking for an allocation of the desired size. 411 */ 412 if ((newblk = (daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 413 len, (u_long (*)())ffs_clusteralloc)) == 0) 414 goto fail; 415 /* 416 * We have found a new contiguous block. 417 * 418 * First we have to replace the old block pointers with the new 419 * block pointers in the inode and indirect blocks associated 420 * with the file. 421 */ 422 blkno = newblk; 423 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 424 if (i == ssize) 425 bap = ebap; 426 #ifdef DIAGNOSTIC 427 if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap)) 428 panic("ffs_reallocblks: alloc mismatch"); 429 #endif 430 *bap++ = blkno; 431 } 432 /* 433 * Next we must write out the modified inode and indirect blocks. 434 * For strict correctness, the writes should be synchronous since 435 * the old block values may have been written to disk. In practise 436 * they are almost never written, but if we are concerned about 437 * strict correctness, the `doasyncfree' flag should be set to zero. 438 * 439 * The test on `doasyncfree' should be changed to test a flag 440 * that shows whether the associated buffers and inodes have 441 * been written. The flag should be set when the cluster is 442 * started and cleared whenever the buffer or inode is flushed. 443 * We can then check below to see if it is set, and do the 444 * synchronous write only when it has been cleared. 445 */ 446 if (sbap != &ip->i_db[0]) { 447 if (doasyncfree) 448 bdwrite(sbp); 449 else 450 bwrite(sbp); 451 } else { 452 ip->i_flag |= IN_CHANGE | IN_UPDATE; 453 if (!doasyncfree) { 454 tv = time; 455 VOP_UPDATE(vp, &tv, &tv, 1); 456 } 457 } 458 if (ssize < len) 459 if (doasyncfree) 460 bdwrite(ebp); 461 else 462 bwrite(ebp); 463 /* 464 * Last, free the old blocks and assign the new blocks to the buffers. 465 */ 466 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 467 ffs_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), 468 fs->fs_bsize); 469 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 470 } 471 return (0); 472 473 fail: 474 if (ssize < len) 475 brelse(ebp); 476 if (sbap != &ip->i_db[0]) 477 brelse(sbp); 478 return (ENOSPC); 479 } 480 481 /* 482 * Allocate an inode in the file system. 483 * 484 * If allocating a directory, use ffs_dirpref to select the inode. 485 * If allocating in a directory, the following hierarchy is followed: 486 * 1) allocate the preferred inode. 487 * 2) allocate an inode in the same cylinder group. 488 * 3) quadradically rehash into other cylinder groups, until an 489 * available inode is located. 490 * If no inode preference is given the following heirarchy is used 491 * to allocate an inode: 492 * 1) allocate an inode in cylinder group 0. 493 * 2) quadradically rehash into other cylinder groups, until an 494 * available inode is located. 495 */ 496 int 497 ffs_valloc(ap) 498 struct vop_valloc_args /* { 499 struct vnode *a_pvp; 500 int a_mode; 501 struct ucred *a_cred; 502 struct vnode **a_vpp; 503 } */ *ap; 504 { 505 register struct vnode *pvp = ap->a_pvp; 506 register struct inode *pip; 507 register struct fs *fs; 508 register struct inode *ip; 509 mode_t mode = ap->a_mode; 510 ino_t ino, ipref; 511 int cg, error; 512 513 *ap->a_vpp = NULL; 514 pip = VTOI(pvp); 515 fs = pip->i_fs; 516 if (fs->fs_cstotal.cs_nifree == 0) 517 goto noinodes; 518 519 if ((mode & IFMT) == IFDIR) 520 ipref = ffs_dirpref(fs); 521 else 522 ipref = pip->i_number; 523 if (ipref >= fs->fs_ncg * fs->fs_ipg) 524 ipref = 0; 525 cg = ino_to_cg(fs, ipref); 526 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_nodealloccg); 527 if (ino == 0) 528 goto noinodes; 529 error = VFS_VGET(pvp->v_mount, ino, ap->a_vpp); 530 if (error) { 531 VOP_VFREE(pvp, ino, mode); 532 return (error); 533 } 534 ip = VTOI(*ap->a_vpp); 535 if (ip->i_mode) { 536 printf("mode = 0%o, inum = %ld, fs = %s\n", 537 ip->i_mode, ip->i_number, fs->fs_fsmnt); 538 panic("ffs_valloc: dup alloc"); 539 } 540 if (ip->i_blocks) { /* XXX */ 541 printf("free inode %s/%ld had %ld blocks\n", 542 fs->fs_fsmnt, ino, ip->i_blocks); 543 ip->i_blocks = 0; 544 } 545 ip->i_flags = 0; 546 /* 547 * Set up a new generation number for this inode. 548 */ 549 if (++nextgennumber < (u_long)time.tv_sec) 550 nextgennumber = time.tv_sec; 551 ip->i_gen = nextgennumber; 552 return (0); 553 noinodes: 554 ffs_fserr(fs, ap->a_cred->cr_uid, "out of inodes"); 555 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 556 return (ENOSPC); 557 } 558 559 /* 560 * Find a cylinder to place a directory. 561 * 562 * The policy implemented by this algorithm is to select from 563 * among those cylinder groups with above the average number of 564 * free inodes, the one with the smallest number of directories. 565 */ 566 static ino_t 567 ffs_dirpref(fs) 568 register struct fs *fs; 569 { 570 int cg, minndir, mincg, avgifree; 571 572 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 573 minndir = fs->fs_ipg; 574 mincg = 0; 575 for (cg = 0; cg < fs->fs_ncg; cg++) 576 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 577 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 578 mincg = cg; 579 minndir = fs->fs_cs(fs, cg).cs_ndir; 580 } 581 return ((ino_t)(fs->fs_ipg * mincg)); 582 } 583 584 /* 585 * Select the desired position for the next block in a file. The file is 586 * logically divided into sections. The first section is composed of the 587 * direct blocks. Each additional section contains fs_maxbpg blocks. 588 * 589 * If no blocks have been allocated in the first section, the policy is to 590 * request a block in the same cylinder group as the inode that describes 591 * the file. If no blocks have been allocated in any other section, the 592 * policy is to place the section in a cylinder group with a greater than 593 * average number of free blocks. An appropriate cylinder group is found 594 * by using a rotor that sweeps the cylinder groups. When a new group of 595 * blocks is needed, the sweep begins in the cylinder group following the 596 * cylinder group from which the previous allocation was made. The sweep 597 * continues until a cylinder group with greater than the average number 598 * of free blocks is found. If the allocation is for the first block in an 599 * indirect block, the information on the previous allocation is unavailable; 600 * here a best guess is made based upon the logical block number being 601 * allocated. 602 * 603 * If a section is already partially allocated, the policy is to 604 * contiguously allocate fs_maxcontig blocks. The end of one of these 605 * contiguous blocks and the beginning of the next is physically separated 606 * so that the disk head will be in transit between them for at least 607 * fs_rotdelay milliseconds. This is to allow time for the processor to 608 * schedule another I/O transfer. 609 */ 610 daddr_t 611 ffs_blkpref(ip, lbn, indx, bap) 612 struct inode *ip; 613 daddr_t lbn; 614 int indx; 615 daddr_t *bap; 616 { 617 register struct fs *fs; 618 register int cg; 619 int avgbfree, startcg; 620 daddr_t nextblk; 621 622 fs = ip->i_fs; 623 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 624 if (lbn < NDADDR) { 625 cg = ino_to_cg(fs, ip->i_number); 626 return (fs->fs_fpg * cg + fs->fs_frag); 627 } 628 /* 629 * Find a cylinder with greater than average number of 630 * unused data blocks. 631 */ 632 if (indx == 0 || bap[indx - 1] == 0) 633 startcg = 634 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 635 else 636 startcg = dtog(fs, bap[indx - 1]) + 1; 637 startcg %= fs->fs_ncg; 638 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 639 for (cg = startcg; cg < fs->fs_ncg; cg++) 640 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 641 fs->fs_cgrotor = cg; 642 return (fs->fs_fpg * cg + fs->fs_frag); 643 } 644 for (cg = 0; cg <= startcg; cg++) 645 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 646 fs->fs_cgrotor = cg; 647 return (fs->fs_fpg * cg + fs->fs_frag); 648 } 649 return (NULL); 650 } 651 /* 652 * One or more previous blocks have been laid out. If less 653 * than fs_maxcontig previous blocks are contiguous, the 654 * next block is requested contiguously, otherwise it is 655 * requested rotationally delayed by fs_rotdelay milliseconds. 656 */ 657 nextblk = bap[indx - 1] + fs->fs_frag; 658 if (fs->fs_rotdelay == 0 || indx < fs->fs_maxcontig || 659 bap[indx - fs->fs_maxcontig] + 660 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 661 return (nextblk); 662 /* 663 * Here we convert ms of delay to frags as: 664 * (frags) = (ms) * (rev/sec) * (sect/rev) / 665 * ((sect/frag) * (ms/sec)) 666 * then round up to the next block. 667 */ 668 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 669 (NSPF(fs) * 1000), fs->fs_frag); 670 return (nextblk); 671 } 672 673 /* 674 * Implement the cylinder overflow algorithm. 675 * 676 * The policy implemented by this algorithm is: 677 * 1) allocate the block in its requested cylinder group. 678 * 2) quadradically rehash on the cylinder group number. 679 * 3) brute force search for a free block. 680 */ 681 /*VARARGS5*/ 682 static u_long 683 ffs_hashalloc(ip, cg, pref, size, allocator) 684 struct inode *ip; 685 int cg; 686 long pref; 687 int size; /* size for data blocks, mode for inodes */ 688 u_long (*allocator)(); 689 { 690 register struct fs *fs; 691 long result; 692 int i, icg = cg; 693 694 fs = ip->i_fs; 695 /* 696 * 1: preferred cylinder group 697 */ 698 result = (*allocator)(ip, cg, pref, size); 699 if (result) 700 return (result); 701 /* 702 * 2: quadratic rehash 703 */ 704 for (i = 1; i < fs->fs_ncg; i *= 2) { 705 cg += i; 706 if (cg >= fs->fs_ncg) 707 cg -= fs->fs_ncg; 708 result = (*allocator)(ip, cg, 0, size); 709 if (result) 710 return (result); 711 } 712 /* 713 * 3: brute force search 714 * Note that we start at i == 2, since 0 was checked initially, 715 * and 1 is always checked in the quadratic rehash. 716 */ 717 cg = (icg + 2) % fs->fs_ncg; 718 for (i = 2; i < fs->fs_ncg; i++) { 719 result = (*allocator)(ip, cg, 0, size); 720 if (result) 721 return (result); 722 cg++; 723 if (cg == fs->fs_ncg) 724 cg = 0; 725 } 726 return (NULL); 727 } 728 729 /* 730 * Determine whether a fragment can be extended. 731 * 732 * Check to see if the necessary fragments are available, and 733 * if they are, allocate them. 734 */ 735 static daddr_t 736 ffs_fragextend(ip, cg, bprev, osize, nsize) 737 struct inode *ip; 738 int cg; 739 long bprev; 740 int osize, nsize; 741 { 742 register struct fs *fs; 743 register struct cg *cgp; 744 struct buf *bp; 745 long bno; 746 int frags, bbase; 747 int i, error; 748 749 fs = ip->i_fs; 750 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 751 return (NULL); 752 frags = numfrags(fs, nsize); 753 bbase = fragnum(fs, bprev); 754 if (bbase > fragnum(fs, (bprev + frags - 1))) { 755 /* cannot extend across a block boundary */ 756 return (NULL); 757 } 758 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 759 (int)fs->fs_cgsize, NOCRED, &bp); 760 if (error) { 761 brelse(bp); 762 return (NULL); 763 } 764 cgp = (struct cg *)bp->b_data; 765 if (!cg_chkmagic(cgp)) { 766 brelse(bp); 767 return (NULL); 768 } 769 cgp->cg_time = time.tv_sec; 770 bno = dtogd(fs, bprev); 771 for (i = numfrags(fs, osize); i < frags; i++) 772 if (isclr(cg_blksfree(cgp), bno + i)) { 773 brelse(bp); 774 return (NULL); 775 } 776 /* 777 * the current fragment can be extended 778 * deduct the count on fragment being extended into 779 * increase the count on the remaining fragment (if any) 780 * allocate the extended piece 781 */ 782 for (i = frags; i < fs->fs_frag - bbase; i++) 783 if (isclr(cg_blksfree(cgp), bno + i)) 784 break; 785 cgp->cg_frsum[i - numfrags(fs, osize)]--; 786 if (i != frags) 787 cgp->cg_frsum[i - frags]++; 788 for (i = numfrags(fs, osize); i < frags; i++) { 789 clrbit(cg_blksfree(cgp), bno + i); 790 cgp->cg_cs.cs_nffree--; 791 fs->fs_cstotal.cs_nffree--; 792 fs->fs_cs(fs, cg).cs_nffree--; 793 } 794 fs->fs_fmod = 1; 795 bdwrite(bp); 796 return (bprev); 797 } 798 799 /* 800 * Determine whether a block can be allocated. 801 * 802 * Check to see if a block of the appropriate size is available, 803 * and if it is, allocate it. 804 */ 805 static daddr_t 806 ffs_alloccg(ip, cg, bpref, size) 807 struct inode *ip; 808 int cg; 809 daddr_t bpref; 810 int size; 811 { 812 register struct fs *fs; 813 register struct cg *cgp; 814 struct buf *bp; 815 register int i; 816 int error, bno, frags, allocsiz; 817 818 fs = ip->i_fs; 819 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 820 return (NULL); 821 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 822 (int)fs->fs_cgsize, NOCRED, &bp); 823 if (error) { 824 brelse(bp); 825 return (NULL); 826 } 827 cgp = (struct cg *)bp->b_data; 828 if (!cg_chkmagic(cgp) || 829 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 830 brelse(bp); 831 return (NULL); 832 } 833 cgp->cg_time = time.tv_sec; 834 if (size == fs->fs_bsize) { 835 bno = ffs_alloccgblk(fs, cgp, bpref); 836 bdwrite(bp); 837 return (bno); 838 } 839 /* 840 * check to see if any fragments are already available 841 * allocsiz is the size which will be allocated, hacking 842 * it down to a smaller size if necessary 843 */ 844 frags = numfrags(fs, size); 845 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 846 if (cgp->cg_frsum[allocsiz] != 0) 847 break; 848 if (allocsiz == fs->fs_frag) { 849 /* 850 * no fragments were available, so a block will be 851 * allocated, and hacked up 852 */ 853 if (cgp->cg_cs.cs_nbfree == 0) { 854 brelse(bp); 855 return (NULL); 856 } 857 bno = ffs_alloccgblk(fs, cgp, bpref); 858 bpref = dtogd(fs, bno); 859 for (i = frags; i < fs->fs_frag; i++) 860 setbit(cg_blksfree(cgp), bpref + i); 861 i = fs->fs_frag - frags; 862 cgp->cg_cs.cs_nffree += i; 863 fs->fs_cstotal.cs_nffree += i; 864 fs->fs_cs(fs, cg).cs_nffree += i; 865 fs->fs_fmod = 1; 866 cgp->cg_frsum[i]++; 867 bdwrite(bp); 868 return (bno); 869 } 870 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 871 if (bno < 0) { 872 brelse(bp); 873 return (NULL); 874 } 875 for (i = 0; i < frags; i++) 876 clrbit(cg_blksfree(cgp), bno + i); 877 cgp->cg_cs.cs_nffree -= frags; 878 fs->fs_cstotal.cs_nffree -= frags; 879 fs->fs_cs(fs, cg).cs_nffree -= frags; 880 fs->fs_fmod = 1; 881 cgp->cg_frsum[allocsiz]--; 882 if (frags != allocsiz) 883 cgp->cg_frsum[allocsiz - frags]++; 884 bdwrite(bp); 885 return (cg * fs->fs_fpg + bno); 886 } 887 888 /* 889 * Allocate a block in a cylinder group. 890 * 891 * This algorithm implements the following policy: 892 * 1) allocate the requested block. 893 * 2) allocate a rotationally optimal block in the same cylinder. 894 * 3) allocate the next available block on the block rotor for the 895 * specified cylinder group. 896 * Note that this routine only allocates fs_bsize blocks; these 897 * blocks may be fragmented by the routine that allocates them. 898 */ 899 static daddr_t 900 ffs_alloccgblk(fs, cgp, bpref) 901 register struct fs *fs; 902 register struct cg *cgp; 903 daddr_t bpref; 904 { 905 daddr_t bno, blkno; 906 int cylno, pos, delta; 907 short *cylbp; 908 register int i; 909 910 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 911 bpref = cgp->cg_rotor; 912 goto norot; 913 } 914 bpref = blknum(fs, bpref); 915 bpref = dtogd(fs, bpref); 916 /* 917 * if the requested block is available, use it 918 */ 919 if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 920 bno = bpref; 921 goto gotit; 922 } 923 if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) { 924 /* 925 * Block layout information is not available. 926 * Leaving bpref unchanged means we take the 927 * next available free block following the one 928 * we just allocated. Hopefully this will at 929 * least hit a track cache on drives of unknown 930 * geometry (e.g. SCSI). 931 */ 932 goto norot; 933 } 934 /* 935 * check for a block available on the same cylinder 936 */ 937 cylno = cbtocylno(fs, bpref); 938 if (cg_blktot(cgp)[cylno] == 0) 939 goto norot; 940 /* 941 * check the summary information to see if a block is 942 * available in the requested cylinder starting at the 943 * requested rotational position and proceeding around. 944 */ 945 cylbp = cg_blks(fs, cgp, cylno); 946 pos = cbtorpos(fs, bpref); 947 for (i = pos; i < fs->fs_nrpos; i++) 948 if (cylbp[i] > 0) 949 break; 950 if (i == fs->fs_nrpos) 951 for (i = 0; i < pos; i++) 952 if (cylbp[i] > 0) 953 break; 954 if (cylbp[i] > 0) { 955 /* 956 * found a rotational position, now find the actual 957 * block. A panic if none is actually there. 958 */ 959 pos = cylno % fs->fs_cpc; 960 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 961 if (fs_postbl(fs, pos)[i] == -1) { 962 printf("pos = %d, i = %d, fs = %s\n", 963 pos, i, fs->fs_fsmnt); 964 panic("ffs_alloccgblk: cyl groups corrupted"); 965 } 966 for (i = fs_postbl(fs, pos)[i];; ) { 967 if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) { 968 bno = blkstofrags(fs, (bno + i)); 969 goto gotit; 970 } 971 delta = fs_rotbl(fs)[i]; 972 if (delta <= 0 || 973 delta + i > fragstoblks(fs, fs->fs_fpg)) 974 break; 975 i += delta; 976 } 977 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 978 panic("ffs_alloccgblk: can't find blk in cyl"); 979 } 980 norot: 981 /* 982 * no blocks in the requested cylinder, so take next 983 * available one in this cylinder group. 984 */ 985 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 986 if (bno < 0) 987 return (NULL); 988 cgp->cg_rotor = bno; 989 gotit: 990 blkno = fragstoblks(fs, bno); 991 ffs_clrblock(fs, cg_blksfree(cgp), (long)blkno); 992 ffs_clusteracct(fs, cgp, blkno, -1); 993 cgp->cg_cs.cs_nbfree--; 994 fs->fs_cstotal.cs_nbfree--; 995 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 996 cylno = cbtocylno(fs, bno); 997 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 998 cg_blktot(cgp)[cylno]--; 999 fs->fs_fmod = 1; 1000 return (cgp->cg_cgx * fs->fs_fpg + bno); 1001 } 1002 1003 /* 1004 * Determine whether a cluster can be allocated. 1005 * 1006 * We do not currently check for optimal rotational layout if there 1007 * are multiple choices in the same cylinder group. Instead we just 1008 * take the first one that we find following bpref. 1009 */ 1010 static daddr_t 1011 ffs_clusteralloc(ip, cg, bpref, len) 1012 struct inode *ip; 1013 int cg; 1014 daddr_t bpref; 1015 int len; 1016 { 1017 register struct fs *fs; 1018 register struct cg *cgp; 1019 struct buf *bp; 1020 int i, run, bno, bit, map; 1021 u_char *mapp; 1022 1023 fs = ip->i_fs; 1024 if (fs->fs_cs(fs, cg).cs_nbfree < len) 1025 return (NULL); 1026 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1027 NOCRED, &bp)) 1028 goto fail; 1029 cgp = (struct cg *)bp->b_data; 1030 if (!cg_chkmagic(cgp)) 1031 goto fail; 1032 /* 1033 * Check to see if a cluster of the needed size (or bigger) is 1034 * available in this cylinder group. 1035 */ 1036 for (i = len; i <= fs->fs_contigsumsize; i++) 1037 if (cg_clustersum(cgp)[i] > 0) 1038 break; 1039 if (i > fs->fs_contigsumsize) 1040 goto fail; 1041 /* 1042 * Search the cluster map to find a big enough cluster. 1043 * We take the first one that we find, even if it is larger 1044 * than we need as we prefer to get one close to the previous 1045 * block allocation. We do not search before the current 1046 * preference point as we do not want to allocate a block 1047 * that is allocated before the previous one (as we will 1048 * then have to wait for another pass of the elevator 1049 * algorithm before it will be read). We prefer to fail and 1050 * be recalled to try an allocation in the next cylinder group. 1051 */ 1052 if (dtog(fs, bpref) != cg) 1053 bpref = 0; 1054 else 1055 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1056 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1057 map = *mapp++; 1058 bit = 1 << (bpref % NBBY); 1059 for (run = 0, i = bpref; i < cgp->cg_nclusterblks; i++) { 1060 if ((map & bit) == 0) { 1061 run = 0; 1062 } else { 1063 run++; 1064 if (run == len) 1065 break; 1066 } 1067 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1068 bit <<= 1; 1069 } else { 1070 map = *mapp++; 1071 bit = 1; 1072 } 1073 } 1074 if (i == cgp->cg_nclusterblks) 1075 goto fail; 1076 /* 1077 * Allocate the cluster that we have found. 1078 */ 1079 bno = cg * fs->fs_fpg + blkstofrags(fs, i - run + 1); 1080 len = blkstofrags(fs, len); 1081 for (i = 0; i < len; i += fs->fs_frag) 1082 if (ffs_alloccgblk(fs, cgp, bno + i) != bno + i) 1083 panic("ffs_clusteralloc: lost block"); 1084 bdwrite(bp); 1085 return (bno); 1086 1087 fail: 1088 brelse(bp); 1089 return (0); 1090 } 1091 1092 /* 1093 * Determine whether an inode can be allocated. 1094 * 1095 * Check to see if an inode is available, and if it is, 1096 * allocate it using the following policy: 1097 * 1) allocate the requested inode. 1098 * 2) allocate the next available inode after the requested 1099 * inode in the specified cylinder group. 1100 */ 1101 static ino_t 1102 ffs_nodealloccg(ip, cg, ipref, mode) 1103 struct inode *ip; 1104 int cg; 1105 daddr_t ipref; 1106 int mode; 1107 { 1108 register struct fs *fs; 1109 register struct cg *cgp; 1110 struct buf *bp; 1111 int error, start, len, loc, map, i; 1112 1113 fs = ip->i_fs; 1114 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1115 return (NULL); 1116 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1117 (int)fs->fs_cgsize, NOCRED, &bp); 1118 if (error) { 1119 brelse(bp); 1120 return (NULL); 1121 } 1122 cgp = (struct cg *)bp->b_data; 1123 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1124 brelse(bp); 1125 return (NULL); 1126 } 1127 cgp->cg_time = time.tv_sec; 1128 if (ipref) { 1129 ipref %= fs->fs_ipg; 1130 if (isclr(cg_inosused(cgp), ipref)) 1131 goto gotit; 1132 } 1133 start = cgp->cg_irotor / NBBY; 1134 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 1135 loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 1136 if (loc == 0) { 1137 len = start + 1; 1138 start = 0; 1139 loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 1140 if (loc == 0) { 1141 printf("cg = %d, irotor = %ld, fs = %s\n", 1142 cg, cgp->cg_irotor, fs->fs_fsmnt); 1143 panic("ffs_nodealloccg: map corrupted"); 1144 /* NOTREACHED */ 1145 } 1146 } 1147 i = start + len - loc; 1148 map = cg_inosused(cgp)[i]; 1149 ipref = i * NBBY; 1150 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 1151 if ((map & i) == 0) { 1152 cgp->cg_irotor = ipref; 1153 goto gotit; 1154 } 1155 } 1156 printf("fs = %s\n", fs->fs_fsmnt); 1157 panic("ffs_nodealloccg: block not in map"); 1158 /* NOTREACHED */ 1159 gotit: 1160 setbit(cg_inosused(cgp), ipref); 1161 cgp->cg_cs.cs_nifree--; 1162 fs->fs_cstotal.cs_nifree--; 1163 fs->fs_cs(fs, cg).cs_nifree--; 1164 fs->fs_fmod = 1; 1165 if ((mode & IFMT) == IFDIR) { 1166 cgp->cg_cs.cs_ndir++; 1167 fs->fs_cstotal.cs_ndir++; 1168 fs->fs_cs(fs, cg).cs_ndir++; 1169 } 1170 bdwrite(bp); 1171 return (cg * fs->fs_ipg + ipref); 1172 } 1173 1174 /* 1175 * Free a block or fragment. 1176 * 1177 * The specified block or fragment is placed back in the 1178 * free map. If a fragment is deallocated, a possible 1179 * block reassembly is checked. 1180 */ 1181 void 1182 ffs_blkfree(ip, bno, size) 1183 register struct inode *ip; 1184 daddr_t bno; 1185 long size; 1186 { 1187 register struct fs *fs; 1188 register struct cg *cgp; 1189 struct buf *bp; 1190 daddr_t blkno; 1191 int i, error, cg, blk, frags, bbase; 1192 1193 fs = ip->i_fs; 1194 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1195 printf("dev = 0x%lx, bsize = %ld, size = %ld, fs = %s\n", 1196 (u_long)ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 1197 panic("blkfree: bad size"); 1198 } 1199 cg = dtog(fs, bno); 1200 if ((u_int)bno >= fs->fs_size) { 1201 printf("bad block %ld, ino %ld\n", bno, ip->i_number); 1202 ffs_fserr(fs, ip->i_uid, "bad block"); 1203 return; 1204 } 1205 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1206 (int)fs->fs_cgsize, NOCRED, &bp); 1207 if (error) { 1208 brelse(bp); 1209 return; 1210 } 1211 cgp = (struct cg *)bp->b_data; 1212 if (!cg_chkmagic(cgp)) { 1213 brelse(bp); 1214 return; 1215 } 1216 cgp->cg_time = time.tv_sec; 1217 bno = dtogd(fs, bno); 1218 if (size == fs->fs_bsize) { 1219 blkno = fragstoblks(fs, bno); 1220 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 1221 printf("dev = 0x%lx, block = %ld, fs = %s\n", 1222 (u_long) ip->i_dev, bno, fs->fs_fsmnt); 1223 panic("blkfree: freeing free block"); 1224 } 1225 ffs_setblock(fs, cg_blksfree(cgp), blkno); 1226 ffs_clusteracct(fs, cgp, blkno, 1); 1227 cgp->cg_cs.cs_nbfree++; 1228 fs->fs_cstotal.cs_nbfree++; 1229 fs->fs_cs(fs, cg).cs_nbfree++; 1230 i = cbtocylno(fs, bno); 1231 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 1232 cg_blktot(cgp)[i]++; 1233 } else { 1234 bbase = bno - fragnum(fs, bno); 1235 /* 1236 * decrement the counts associated with the old frags 1237 */ 1238 blk = blkmap(fs, cg_blksfree(cgp), bbase); 1239 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1240 /* 1241 * deallocate the fragment 1242 */ 1243 frags = numfrags(fs, size); 1244 for (i = 0; i < frags; i++) { 1245 if (isset(cg_blksfree(cgp), bno + i)) { 1246 printf("dev = 0x%lx, block = %ld, fs = %s\n", 1247 (u_long) ip->i_dev, bno + i, fs->fs_fsmnt); 1248 panic("blkfree: freeing free frag"); 1249 } 1250 setbit(cg_blksfree(cgp), bno + i); 1251 } 1252 cgp->cg_cs.cs_nffree += i; 1253 fs->fs_cstotal.cs_nffree += i; 1254 fs->fs_cs(fs, cg).cs_nffree += i; 1255 /* 1256 * add back in counts associated with the new frags 1257 */ 1258 blk = blkmap(fs, cg_blksfree(cgp), bbase); 1259 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1260 /* 1261 * if a complete block has been reassembled, account for it 1262 */ 1263 blkno = fragstoblks(fs, bbase); 1264 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 1265 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1266 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1267 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1268 ffs_clusteracct(fs, cgp, blkno, 1); 1269 cgp->cg_cs.cs_nbfree++; 1270 fs->fs_cstotal.cs_nbfree++; 1271 fs->fs_cs(fs, cg).cs_nbfree++; 1272 i = cbtocylno(fs, bbase); 1273 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 1274 cg_blktot(cgp)[i]++; 1275 } 1276 } 1277 fs->fs_fmod = 1; 1278 bdwrite(bp); 1279 } 1280 1281 /* 1282 * Free an inode. 1283 * 1284 * The specified inode is placed back in the free map. 1285 */ 1286 int 1287 ffs_vfree(ap) 1288 struct vop_vfree_args /* { 1289 struct vnode *a_pvp; 1290 ino_t a_ino; 1291 int a_mode; 1292 } */ *ap; 1293 { 1294 register struct fs *fs; 1295 register struct cg *cgp; 1296 register struct inode *pip; 1297 ino_t ino = ap->a_ino; 1298 struct buf *bp; 1299 int error, cg; 1300 1301 pip = VTOI(ap->a_pvp); 1302 fs = pip->i_fs; 1303 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 1304 panic("ifree: range: dev = 0x%x, ino = %d, fs = %s", 1305 pip->i_dev, ino, fs->fs_fsmnt); 1306 cg = ino_to_cg(fs, ino); 1307 error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1308 (int)fs->fs_cgsize, NOCRED, &bp); 1309 if (error) { 1310 brelse(bp); 1311 return (0); 1312 } 1313 cgp = (struct cg *)bp->b_data; 1314 if (!cg_chkmagic(cgp)) { 1315 brelse(bp); 1316 return (0); 1317 } 1318 cgp->cg_time = time.tv_sec; 1319 ino %= fs->fs_ipg; 1320 if (isclr(cg_inosused(cgp), ino)) { 1321 printf("dev = 0x%lx, ino = %ld, fs = %s\n", 1322 (u_long)pip->i_dev, ino, fs->fs_fsmnt); 1323 if (fs->fs_ronly == 0) 1324 panic("ifree: freeing free inode"); 1325 } 1326 clrbit(cg_inosused(cgp), ino); 1327 if (ino < cgp->cg_irotor) 1328 cgp->cg_irotor = ino; 1329 cgp->cg_cs.cs_nifree++; 1330 fs->fs_cstotal.cs_nifree++; 1331 fs->fs_cs(fs, cg).cs_nifree++; 1332 if ((ap->a_mode & IFMT) == IFDIR) { 1333 cgp->cg_cs.cs_ndir--; 1334 fs->fs_cstotal.cs_ndir--; 1335 fs->fs_cs(fs, cg).cs_ndir--; 1336 } 1337 fs->fs_fmod = 1; 1338 bdwrite(bp); 1339 return (0); 1340 } 1341 1342 /* 1343 * Find a block of the specified size in the specified cylinder group. 1344 * 1345 * It is a panic if a request is made to find a block if none are 1346 * available. 1347 */ 1348 static daddr_t 1349 ffs_mapsearch(fs, cgp, bpref, allocsiz) 1350 register struct fs *fs; 1351 register struct cg *cgp; 1352 daddr_t bpref; 1353 int allocsiz; 1354 { 1355 daddr_t bno; 1356 int start, len, loc, i; 1357 int blk, field, subfield, pos; 1358 1359 /* 1360 * find the fragment by searching through the free block 1361 * map for an appropriate bit pattern 1362 */ 1363 if (bpref) 1364 start = dtogd(fs, bpref) / NBBY; 1365 else 1366 start = cgp->cg_frotor / NBBY; 1367 len = howmany(fs->fs_fpg, NBBY) - start; 1368 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[start], 1369 (u_char *)fragtbl[fs->fs_frag], 1370 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1371 if (loc == 0) { 1372 len = start + 1; 1373 start = 0; 1374 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[0], 1375 (u_char *)fragtbl[fs->fs_frag], 1376 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1377 if (loc == 0) { 1378 printf("start = %d, len = %d, fs = %s\n", 1379 start, len, fs->fs_fsmnt); 1380 panic("ffs_alloccg: map corrupted"); 1381 /* NOTREACHED */ 1382 } 1383 } 1384 bno = (start + len - loc) * NBBY; 1385 cgp->cg_frotor = bno; 1386 /* 1387 * found the byte in the map 1388 * sift through the bits to find the selected frag 1389 */ 1390 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1391 blk = blkmap(fs, cg_blksfree(cgp), bno); 1392 blk <<= 1; 1393 field = around[allocsiz]; 1394 subfield = inside[allocsiz]; 1395 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1396 if ((blk & field) == subfield) 1397 return (bno + pos); 1398 field <<= 1; 1399 subfield <<= 1; 1400 } 1401 } 1402 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 1403 panic("ffs_alloccg: block not in map"); 1404 return (-1); 1405 } 1406 1407 /* 1408 * Update the cluster map because of an allocation or free. 1409 * 1410 * Cnt == 1 means free; cnt == -1 means allocating. 1411 */ 1412 void 1413 ffs_clusteracct(fs, cgp, blkno, cnt) 1414 struct fs *fs; 1415 struct cg *cgp; 1416 daddr_t blkno; 1417 int cnt; 1418 { 1419 long *sump; 1420 u_char *freemapp, *mapp; 1421 int i, start, end, forw, back, map, bit; 1422 1423 if (fs->fs_contigsumsize <= 0) 1424 return; 1425 freemapp = cg_clustersfree(cgp); 1426 sump = cg_clustersum(cgp); 1427 /* 1428 * Allocate or clear the actual block. 1429 */ 1430 if (cnt > 0) 1431 setbit(freemapp, blkno); 1432 else 1433 clrbit(freemapp, blkno); 1434 /* 1435 * Find the size of the cluster going forward. 1436 */ 1437 start = blkno + 1; 1438 end = start + fs->fs_contigsumsize; 1439 if (end >= cgp->cg_nclusterblks) 1440 end = cgp->cg_nclusterblks; 1441 mapp = &freemapp[start / NBBY]; 1442 map = *mapp++; 1443 bit = 1 << (start % NBBY); 1444 for (i = start; i < end; i++) { 1445 if ((map & bit) == 0) 1446 break; 1447 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1448 bit <<= 1; 1449 } else { 1450 map = *mapp++; 1451 bit = 1; 1452 } 1453 } 1454 forw = i - start; 1455 /* 1456 * Find the size of the cluster going backward. 1457 */ 1458 start = blkno - 1; 1459 end = start - fs->fs_contigsumsize; 1460 if (end < 0) 1461 end = -1; 1462 mapp = &freemapp[start / NBBY]; 1463 map = *mapp--; 1464 bit = 1 << (start % NBBY); 1465 for (i = start; i > end; i--) { 1466 if ((map & bit) == 0) 1467 break; 1468 if ((i & (NBBY - 1)) != 0) { 1469 bit >>= 1; 1470 } else { 1471 map = *mapp--; 1472 bit = 1 << (NBBY - 1); 1473 } 1474 } 1475 back = start - i; 1476 /* 1477 * Account for old cluster and the possibly new forward and 1478 * back clusters. 1479 */ 1480 i = back + forw + 1; 1481 if (i > fs->fs_contigsumsize) 1482 i = fs->fs_contigsumsize; 1483 sump[i] += cnt; 1484 if (back > 0) 1485 sump[back] -= cnt; 1486 if (forw > 0) 1487 sump[forw] -= cnt; 1488 } 1489 1490 /* 1491 * Fserr prints the name of a file system with an error diagnostic. 1492 * 1493 * The form of the error message is: 1494 * fs: error message 1495 */ 1496 static void 1497 ffs_fserr(fs, uid, cp) 1498 struct fs *fs; 1499 u_int uid; 1500 char *cp; 1501 { 1502 1503 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); 1504 } 1505