1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94 34 * $Id$ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/buf.h> 40 #include <sys/proc.h> 41 #include <sys/vnode.h> 42 #include <sys/mount.h> 43 #include <sys/kernel.h> 44 #include <sys/syslog.h> 45 46 #include <vm/vm.h> 47 48 #include <ufs/ufs/quota.h> 49 #include <ufs/ufs/inode.h> 50 51 #include <ufs/ffs/fs.h> 52 #include <ufs/ffs/ffs_extern.h> 53 54 extern u_long nextgennumber; 55 56 static daddr_t ffs_alloccg __P((struct inode *, int, daddr_t, int)); 57 static daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, daddr_t)); 58 static daddr_t ffs_clusteralloc __P((struct inode *, int, daddr_t, int)); 59 static ino_t ffs_dirpref __P((struct fs *)); 60 static daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 61 static void ffs_fserr __P((struct fs *, u_int, char *)); 62 static u_long ffs_hashalloc 63 __P((struct inode *, int, long, int, u_long (*)())); 64 static ino_t ffs_nodealloccg __P((struct inode *, int, daddr_t, int)); 65 static daddr_t ffs_mapsearch __P((struct fs *, struct cg *, daddr_t, int)); 66 67 void ffs_clusteracct __P((struct fs *, struct cg *, daddr_t, int)); 68 69 /* 70 * Allocate a block in the file system. 71 * 72 * The size of the requested block is given, which must be some 73 * multiple of fs_fsize and <= fs_bsize. 74 * A preference may be optionally specified. If a preference is given 75 * the following hierarchy is used to allocate a block: 76 * 1) allocate the requested block. 77 * 2) allocate a rotationally optimal block in the same cylinder. 78 * 3) allocate a block in the same cylinder group. 79 * 4) quadradically rehash into other cylinder groups, until an 80 * available block is located. 81 * If no block preference is given the following heirarchy is used 82 * to allocate a block: 83 * 1) allocate a block in the cylinder group that contains the 84 * inode for the file. 85 * 2) quadradically rehash into other cylinder groups, until an 86 * available block is located. 87 */ 88 int 89 ffs_alloc(ip, lbn, bpref, size, cred, bnp) 90 register struct inode *ip; 91 daddr_t lbn, bpref; 92 int size; 93 struct ucred *cred; 94 daddr_t *bnp; 95 { 96 register struct fs *fs; 97 daddr_t bno; 98 int cg, error; 99 100 *bnp = 0; 101 fs = ip->i_fs; 102 #ifdef DIAGNOSTIC 103 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 104 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 105 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 106 panic("ffs_alloc: bad size"); 107 } 108 if (cred == NOCRED) 109 panic("ffs_alloc: missing credential\n"); 110 #endif /* DIAGNOSTIC */ 111 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 112 goto nospace; 113 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 114 goto nospace; 115 #ifdef QUOTA 116 if (error = chkdq(ip, (long)btodb(size), cred, 0)) 117 return (error); 118 #endif 119 if (bpref >= fs->fs_size) 120 bpref = 0; 121 if (bpref == 0) 122 cg = ino_to_cg(fs, ip->i_number); 123 else 124 cg = dtog(fs, bpref); 125 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 126 (u_long (*)())ffs_alloccg); 127 if (bno > 0) { 128 ip->i_blocks += btodb(size); 129 ip->i_flag |= IN_CHANGE | IN_UPDATE; 130 *bnp = bno; 131 return (0); 132 } 133 #ifdef QUOTA 134 /* 135 * Restore user's disk quota because allocation failed. 136 */ 137 (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 138 #endif 139 nospace: 140 ffs_fserr(fs, cred->cr_uid, "file system full"); 141 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 142 return (ENOSPC); 143 } 144 145 /* 146 * Reallocate a fragment to a bigger size 147 * 148 * The number and size of the old block is given, and a preference 149 * and new size is also specified. The allocator attempts to extend 150 * the original block. Failing that, the regular block allocator is 151 * invoked to get an appropriate block. 152 */ 153 int 154 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) 155 register struct inode *ip; 156 daddr_t lbprev; 157 daddr_t bpref; 158 int osize, nsize; 159 struct ucred *cred; 160 struct buf **bpp; 161 { 162 register struct fs *fs; 163 struct buf *bp; 164 int cg, request, error; 165 daddr_t bprev, bno; 166 167 *bpp = 0; 168 fs = ip->i_fs; 169 #ifdef DIAGNOSTIC 170 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 171 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 172 printf( 173 "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 174 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 175 panic("ffs_realloccg: bad size"); 176 } 177 if (cred == NOCRED) 178 panic("ffs_realloccg: missing credential\n"); 179 #endif /* DIAGNOSTIC */ 180 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 181 goto nospace; 182 if ((bprev = ip->i_db[lbprev]) == 0) { 183 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 184 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 185 panic("ffs_realloccg: bad bprev"); 186 } 187 /* 188 * Allocate the extra space in the buffer. 189 */ 190 if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) { 191 brelse(bp); 192 return (error); 193 } 194 #ifdef QUOTA 195 if (error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) { 196 brelse(bp); 197 return (error); 198 } 199 #endif 200 /* 201 * Check for extension in the existing location. 202 */ 203 cg = dtog(fs, bprev); 204 if (bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize)) { 205 if (bp->b_blkno != fsbtodb(fs, bno)) 206 panic("bad blockno"); 207 ip->i_blocks += btodb(nsize - osize); 208 ip->i_flag |= IN_CHANGE | IN_UPDATE; 209 allocbuf(bp, nsize); 210 bp->b_flags |= B_DONE; 211 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 212 *bpp = bp; 213 return (0); 214 } 215 /* 216 * Allocate a new disk location. 217 */ 218 if (bpref >= fs->fs_size) 219 bpref = 0; 220 switch ((int)fs->fs_optim) { 221 case FS_OPTSPACE: 222 /* 223 * Allocate an exact sized fragment. Although this makes 224 * best use of space, we will waste time relocating it if 225 * the file continues to grow. If the fragmentation is 226 * less than half of the minimum free reserve, we choose 227 * to begin optimizing for time. 228 */ 229 request = nsize; 230 if (fs->fs_minfree < 5 || 231 fs->fs_cstotal.cs_nffree > 232 fs->fs_dsize * fs->fs_minfree / (2 * 100)) 233 break; 234 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 235 fs->fs_fsmnt); 236 fs->fs_optim = FS_OPTTIME; 237 break; 238 case FS_OPTTIME: 239 /* 240 * At this point we have discovered a file that is trying to 241 * grow a small fragment to a larger fragment. To save time, 242 * we allocate a full sized block, then free the unused portion. 243 * If the file continues to grow, the `ffs_fragextend' call 244 * above will be able to grow it in place without further 245 * copying. If aberrant programs cause disk fragmentation to 246 * grow within 2% of the free reserve, we choose to begin 247 * optimizing for space. 248 */ 249 request = fs->fs_bsize; 250 if (fs->fs_cstotal.cs_nffree < 251 fs->fs_dsize * (fs->fs_minfree - 2) / 100) 252 break; 253 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 254 fs->fs_fsmnt); 255 fs->fs_optim = FS_OPTSPACE; 256 break; 257 default: 258 printf("dev = 0x%x, optim = %d, fs = %s\n", 259 ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 260 panic("ffs_realloccg: bad optim"); 261 /* NOTREACHED */ 262 } 263 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 264 (u_long (*)())ffs_alloccg); 265 if (bno > 0) { 266 bp->b_blkno = fsbtodb(fs, bno); 267 (void) vnode_pager_uncache(ITOV(ip)); 268 ffs_blkfree(ip, bprev, (long)osize); 269 if (nsize < request) 270 ffs_blkfree(ip, bno + numfrags(fs, nsize), 271 (long)(request - nsize)); 272 ip->i_blocks += btodb(nsize - osize); 273 ip->i_flag |= IN_CHANGE | IN_UPDATE; 274 allocbuf(bp, nsize); 275 bp->b_flags |= B_DONE; 276 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 277 *bpp = bp; 278 return (0); 279 } 280 #ifdef QUOTA 281 /* 282 * Restore user's disk quota because allocation failed. 283 */ 284 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 285 #endif 286 brelse(bp); 287 nospace: 288 /* 289 * no space available 290 */ 291 ffs_fserr(fs, cred->cr_uid, "file system full"); 292 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 293 return (ENOSPC); 294 } 295 296 /* 297 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 298 * 299 * The vnode and an array of buffer pointers for a range of sequential 300 * logical blocks to be made contiguous is given. The allocator attempts 301 * to find a range of sequential blocks starting as close as possible to 302 * an fs_rotdelay offset from the end of the allocation for the logical 303 * block immediately preceeding the current range. If successful, the 304 * physical block numbers in the buffer pointers and in the inode are 305 * changed to reflect the new allocation. If unsuccessful, the allocation 306 * is left unchanged. The success in doing the reallocation is returned. 307 * Note that the error return is not reflected back to the user. Rather 308 * the previous block allocation will be used. 309 */ 310 #include <sys/sysctl.h> 311 int doasyncfree = 1; 312 #ifdef DEBUG 313 struct ctldebug debug14 = { "doasyncfree", &doasyncfree }; 314 #endif 315 int 316 ffs_reallocblks(ap) 317 struct vop_reallocblks_args /* { 318 struct vnode *a_vp; 319 struct cluster_save *a_buflist; 320 } */ *ap; 321 { 322 struct fs *fs; 323 struct inode *ip; 324 struct vnode *vp; 325 struct buf *sbp, *ebp; 326 daddr_t *bap, *sbap, *ebap = 0; 327 struct cluster_save *buflist; 328 daddr_t start_lbn, end_lbn, soff, eoff, newblk, blkno; 329 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 330 int i, len, start_lvl, end_lvl, pref, ssize; 331 332 vp = ap->a_vp; 333 ip = VTOI(vp); 334 fs = ip->i_fs; 335 if (fs->fs_contigsumsize <= 0) 336 return (ENOSPC); 337 buflist = ap->a_buflist; 338 len = buflist->bs_nchildren; 339 start_lbn = buflist->bs_children[0]->b_lblkno; 340 end_lbn = start_lbn + len - 1; 341 #ifdef DIAGNOSTIC 342 for (i = 1; i < len; i++) 343 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 344 panic("ffs_reallocblks: non-cluster"); 345 #endif 346 /* 347 * If the latest allocation is in a new cylinder group, assume that 348 * the filesystem has decided to move and do not force it back to 349 * the previous cylinder group. 350 */ 351 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 352 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 353 return (ENOSPC); 354 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 355 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 356 return (ENOSPC); 357 /* 358 * Get the starting offset and block map for the first block. 359 */ 360 if (start_lvl == 0) { 361 sbap = &ip->i_db[0]; 362 soff = start_lbn; 363 } else { 364 idp = &start_ap[start_lvl - 1]; 365 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 366 brelse(sbp); 367 return (ENOSPC); 368 } 369 sbap = (daddr_t *)sbp->b_data; 370 soff = idp->in_off; 371 } 372 /* 373 * Find the preferred location for the cluster. 374 */ 375 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 376 /* 377 * If the block range spans two block maps, get the second map. 378 */ 379 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 380 ssize = len; 381 } else { 382 #ifdef DIAGNOSTIC 383 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 384 panic("ffs_reallocblk: start == end"); 385 #endif 386 ssize = len - (idp->in_off + 1); 387 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 388 goto fail; 389 ebap = (daddr_t *)ebp->b_data; 390 } 391 /* 392 * Search the block map looking for an allocation of the desired size. 393 */ 394 if ((newblk = (daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 395 len, (u_long (*)())ffs_clusteralloc)) == 0) 396 goto fail; 397 /* 398 * We have found a new contiguous block. 399 * 400 * First we have to replace the old block pointers with the new 401 * block pointers in the inode and indirect blocks associated 402 * with the file. 403 */ 404 blkno = newblk; 405 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 406 if (i == ssize) 407 bap = ebap; 408 #ifdef DIAGNOSTIC 409 if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap)) 410 panic("ffs_reallocblks: alloc mismatch"); 411 #endif 412 *bap++ = blkno; 413 } 414 /* 415 * Next we must write out the modified inode and indirect blocks. 416 * For strict correctness, the writes should be synchronous since 417 * the old block values may have been written to disk. In practise 418 * they are almost never written, but if we are concerned about 419 * strict correctness, the `doasyncfree' flag should be set to zero. 420 * 421 * The test on `doasyncfree' should be changed to test a flag 422 * that shows whether the associated buffers and inodes have 423 * been written. The flag should be set when the cluster is 424 * started and cleared whenever the buffer or inode is flushed. 425 * We can then check below to see if it is set, and do the 426 * synchronous write only when it has been cleared. 427 */ 428 if (sbap != &ip->i_db[0]) { 429 if (doasyncfree) 430 bdwrite(sbp); 431 else 432 bwrite(sbp); 433 } else { 434 ip->i_flag |= IN_CHANGE | IN_UPDATE; 435 if (!doasyncfree) 436 VOP_UPDATE(vp, &time, &time, MNT_WAIT); 437 } 438 if (ssize < len) 439 if (doasyncfree) 440 bdwrite(ebp); 441 else 442 bwrite(ebp); 443 /* 444 * Last, free the old blocks and assign the new blocks to the buffers. 445 */ 446 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 447 ffs_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), 448 fs->fs_bsize); 449 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 450 } 451 return (0); 452 453 fail: 454 if (ssize < len) 455 brelse(ebp); 456 if (sbap != &ip->i_db[0]) 457 brelse(sbp); 458 return (ENOSPC); 459 } 460 461 /* 462 * Allocate an inode in the file system. 463 * 464 * If allocating a directory, use ffs_dirpref to select the inode. 465 * If allocating in a directory, the following hierarchy is followed: 466 * 1) allocate the preferred inode. 467 * 2) allocate an inode in the same cylinder group. 468 * 3) quadradically rehash into other cylinder groups, until an 469 * available inode is located. 470 * If no inode preference is given the following heirarchy is used 471 * to allocate an inode: 472 * 1) allocate an inode in cylinder group 0. 473 * 2) quadradically rehash into other cylinder groups, until an 474 * available inode is located. 475 */ 476 int 477 ffs_valloc(ap) 478 struct vop_valloc_args /* { 479 struct vnode *a_pvp; 480 int a_mode; 481 struct ucred *a_cred; 482 struct vnode **a_vpp; 483 } */ *ap; 484 { 485 register struct vnode *pvp = ap->a_pvp; 486 register struct inode *pip; 487 register struct fs *fs; 488 register struct inode *ip; 489 mode_t mode = ap->a_mode; 490 ino_t ino, ipref; 491 int cg, error; 492 493 *ap->a_vpp = NULL; 494 pip = VTOI(pvp); 495 fs = pip->i_fs; 496 if (fs->fs_cstotal.cs_nifree == 0) 497 goto noinodes; 498 499 if ((mode & IFMT) == IFDIR) 500 ipref = ffs_dirpref(fs); 501 else 502 ipref = pip->i_number; 503 if (ipref >= fs->fs_ncg * fs->fs_ipg) 504 ipref = 0; 505 cg = ino_to_cg(fs, ipref); 506 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_nodealloccg); 507 if (ino == 0) 508 goto noinodes; 509 error = VFS_VGET(pvp->v_mount, ino, ap->a_vpp); 510 if (error) { 511 VOP_VFREE(pvp, ino, mode); 512 return (error); 513 } 514 ip = VTOI(*ap->a_vpp); 515 if (ip->i_mode) { 516 printf("mode = 0%o, inum = %d, fs = %s\n", 517 ip->i_mode, ip->i_number, fs->fs_fsmnt); 518 panic("ffs_valloc: dup alloc"); 519 } 520 if (ip->i_blocks) { /* XXX */ 521 printf("free inode %s/%d had %d blocks\n", 522 fs->fs_fsmnt, ino, ip->i_blocks); 523 ip->i_blocks = 0; 524 } 525 ip->i_flags = 0; 526 /* 527 * Set up a new generation number for this inode. 528 */ 529 if (++nextgennumber < (u_long)time.tv_sec) 530 nextgennumber = time.tv_sec; 531 ip->i_gen = nextgennumber; 532 return (0); 533 noinodes: 534 ffs_fserr(fs, ap->a_cred->cr_uid, "out of inodes"); 535 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 536 return (ENOSPC); 537 } 538 539 /* 540 * Find a cylinder to place a directory. 541 * 542 * The policy implemented by this algorithm is to select from 543 * among those cylinder groups with above the average number of 544 * free inodes, the one with the smallest number of directories. 545 */ 546 static ino_t 547 ffs_dirpref(fs) 548 register struct fs *fs; 549 { 550 int cg, minndir, mincg, avgifree; 551 552 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 553 minndir = fs->fs_ipg; 554 mincg = 0; 555 for (cg = 0; cg < fs->fs_ncg; cg++) 556 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 557 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 558 mincg = cg; 559 minndir = fs->fs_cs(fs, cg).cs_ndir; 560 } 561 return ((ino_t)(fs->fs_ipg * mincg)); 562 } 563 564 /* 565 * Select the desired position for the next block in a file. The file is 566 * logically divided into sections. The first section is composed of the 567 * direct blocks. Each additional section contains fs_maxbpg blocks. 568 * 569 * If no blocks have been allocated in the first section, the policy is to 570 * request a block in the same cylinder group as the inode that describes 571 * the file. If no blocks have been allocated in any other section, the 572 * policy is to place the section in a cylinder group with a greater than 573 * average number of free blocks. An appropriate cylinder group is found 574 * by using a rotor that sweeps the cylinder groups. When a new group of 575 * blocks is needed, the sweep begins in the cylinder group following the 576 * cylinder group from which the previous allocation was made. The sweep 577 * continues until a cylinder group with greater than the average number 578 * of free blocks is found. If the allocation is for the first block in an 579 * indirect block, the information on the previous allocation is unavailable; 580 * here a best guess is made based upon the logical block number being 581 * allocated. 582 * 583 * If a section is already partially allocated, the policy is to 584 * contiguously allocate fs_maxcontig blocks. The end of one of these 585 * contiguous blocks and the beginning of the next is physically separated 586 * so that the disk head will be in transit between them for at least 587 * fs_rotdelay milliseconds. This is to allow time for the processor to 588 * schedule another I/O transfer. 589 */ 590 daddr_t 591 ffs_blkpref(ip, lbn, indx, bap) 592 struct inode *ip; 593 daddr_t lbn; 594 int indx; 595 daddr_t *bap; 596 { 597 register struct fs *fs; 598 register int cg; 599 int avgbfree, startcg; 600 daddr_t nextblk; 601 602 fs = ip->i_fs; 603 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 604 if (lbn < NDADDR) { 605 cg = ino_to_cg(fs, ip->i_number); 606 return (fs->fs_fpg * cg + fs->fs_frag); 607 } 608 /* 609 * Find a cylinder with greater than average number of 610 * unused data blocks. 611 */ 612 if (indx == 0 || bap[indx - 1] == 0) 613 startcg = 614 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 615 else 616 startcg = dtog(fs, bap[indx - 1]) + 1; 617 startcg %= fs->fs_ncg; 618 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 619 for (cg = startcg; cg < fs->fs_ncg; cg++) 620 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 621 fs->fs_cgrotor = cg; 622 return (fs->fs_fpg * cg + fs->fs_frag); 623 } 624 for (cg = 0; cg <= startcg; cg++) 625 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 626 fs->fs_cgrotor = cg; 627 return (fs->fs_fpg * cg + fs->fs_frag); 628 } 629 return (NULL); 630 } 631 /* 632 * One or more previous blocks have been laid out. If less 633 * than fs_maxcontig previous blocks are contiguous, the 634 * next block is requested contiguously, otherwise it is 635 * requested rotationally delayed by fs_rotdelay milliseconds. 636 */ 637 nextblk = bap[indx - 1] + fs->fs_frag; 638 if (indx < fs->fs_maxcontig || bap[indx - fs->fs_maxcontig] + 639 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 640 return (nextblk); 641 if (fs->fs_rotdelay != 0) 642 /* 643 * Here we convert ms of delay to frags as: 644 * (frags) = (ms) * (rev/sec) * (sect/rev) / 645 * ((sect/frag) * (ms/sec)) 646 * then round up to the next block. 647 */ 648 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 649 (NSPF(fs) * 1000), fs->fs_frag); 650 return (nextblk); 651 } 652 653 /* 654 * Implement the cylinder overflow algorithm. 655 * 656 * The policy implemented by this algorithm is: 657 * 1) allocate the block in its requested cylinder group. 658 * 2) quadradically rehash on the cylinder group number. 659 * 3) brute force search for a free block. 660 */ 661 /*VARARGS5*/ 662 static u_long 663 ffs_hashalloc(ip, cg, pref, size, allocator) 664 struct inode *ip; 665 int cg; 666 long pref; 667 int size; /* size for data blocks, mode for inodes */ 668 u_long (*allocator)(); 669 { 670 register struct fs *fs; 671 long result; 672 int i, icg = cg; 673 674 fs = ip->i_fs; 675 /* 676 * 1: preferred cylinder group 677 */ 678 result = (*allocator)(ip, cg, pref, size); 679 if (result) 680 return (result); 681 /* 682 * 2: quadratic rehash 683 */ 684 for (i = 1; i < fs->fs_ncg; i *= 2) { 685 cg += i; 686 if (cg >= fs->fs_ncg) 687 cg -= fs->fs_ncg; 688 result = (*allocator)(ip, cg, 0, size); 689 if (result) 690 return (result); 691 } 692 /* 693 * 3: brute force search 694 * Note that we start at i == 2, since 0 was checked initially, 695 * and 1 is always checked in the quadratic rehash. 696 */ 697 cg = (icg + 2) % fs->fs_ncg; 698 for (i = 2; i < fs->fs_ncg; i++) { 699 result = (*allocator)(ip, cg, 0, size); 700 if (result) 701 return (result); 702 cg++; 703 if (cg == fs->fs_ncg) 704 cg = 0; 705 } 706 return (NULL); 707 } 708 709 /* 710 * Determine whether a fragment can be extended. 711 * 712 * Check to see if the necessary fragments are available, and 713 * if they are, allocate them. 714 */ 715 static daddr_t 716 ffs_fragextend(ip, cg, bprev, osize, nsize) 717 struct inode *ip; 718 int cg; 719 long bprev; 720 int osize, nsize; 721 { 722 register struct fs *fs; 723 register struct cg *cgp; 724 struct buf *bp; 725 long bno; 726 int frags, bbase; 727 int i, error; 728 729 fs = ip->i_fs; 730 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 731 return (NULL); 732 frags = numfrags(fs, nsize); 733 bbase = fragnum(fs, bprev); 734 if (bbase > fragnum(fs, (bprev + frags - 1))) { 735 /* cannot extend across a block boundary */ 736 return (NULL); 737 } 738 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 739 (int)fs->fs_cgsize, NOCRED, &bp); 740 if (error) { 741 brelse(bp); 742 return (NULL); 743 } 744 cgp = (struct cg *)bp->b_data; 745 if (!cg_chkmagic(cgp)) { 746 brelse(bp); 747 return (NULL); 748 } 749 cgp->cg_time = time.tv_sec; 750 bno = dtogd(fs, bprev); 751 for (i = numfrags(fs, osize); i < frags; i++) 752 if (isclr(cg_blksfree(cgp), bno + i)) { 753 brelse(bp); 754 return (NULL); 755 } 756 /* 757 * the current fragment can be extended 758 * deduct the count on fragment being extended into 759 * increase the count on the remaining fragment (if any) 760 * allocate the extended piece 761 */ 762 for (i = frags; i < fs->fs_frag - bbase; i++) 763 if (isclr(cg_blksfree(cgp), bno + i)) 764 break; 765 cgp->cg_frsum[i - numfrags(fs, osize)]--; 766 if (i != frags) 767 cgp->cg_frsum[i - frags]++; 768 for (i = numfrags(fs, osize); i < frags; i++) { 769 clrbit(cg_blksfree(cgp), bno + i); 770 cgp->cg_cs.cs_nffree--; 771 fs->fs_cstotal.cs_nffree--; 772 fs->fs_cs(fs, cg).cs_nffree--; 773 } 774 fs->fs_fmod = 1; 775 bdwrite(bp); 776 return (bprev); 777 } 778 779 /* 780 * Determine whether a block can be allocated. 781 * 782 * Check to see if a block of the appropriate size is available, 783 * and if it is, allocate it. 784 */ 785 static daddr_t 786 ffs_alloccg(ip, cg, bpref, size) 787 struct inode *ip; 788 int cg; 789 daddr_t bpref; 790 int size; 791 { 792 register struct fs *fs; 793 register struct cg *cgp; 794 struct buf *bp; 795 register int i; 796 int error, bno, frags, allocsiz; 797 798 fs = ip->i_fs; 799 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 800 return (NULL); 801 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 802 (int)fs->fs_cgsize, NOCRED, &bp); 803 if (error) { 804 brelse(bp); 805 return (NULL); 806 } 807 cgp = (struct cg *)bp->b_data; 808 if (!cg_chkmagic(cgp) || 809 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 810 brelse(bp); 811 return (NULL); 812 } 813 cgp->cg_time = time.tv_sec; 814 if (size == fs->fs_bsize) { 815 bno = ffs_alloccgblk(fs, cgp, bpref); 816 bdwrite(bp); 817 return (bno); 818 } 819 /* 820 * check to see if any fragments are already available 821 * allocsiz is the size which will be allocated, hacking 822 * it down to a smaller size if necessary 823 */ 824 frags = numfrags(fs, size); 825 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 826 if (cgp->cg_frsum[allocsiz] != 0) 827 break; 828 if (allocsiz == fs->fs_frag) { 829 /* 830 * no fragments were available, so a block will be 831 * allocated, and hacked up 832 */ 833 if (cgp->cg_cs.cs_nbfree == 0) { 834 brelse(bp); 835 return (NULL); 836 } 837 bno = ffs_alloccgblk(fs, cgp, bpref); 838 bpref = dtogd(fs, bno); 839 for (i = frags; i < fs->fs_frag; i++) 840 setbit(cg_blksfree(cgp), bpref + i); 841 i = fs->fs_frag - frags; 842 cgp->cg_cs.cs_nffree += i; 843 fs->fs_cstotal.cs_nffree += i; 844 fs->fs_cs(fs, cg).cs_nffree += i; 845 fs->fs_fmod = 1; 846 cgp->cg_frsum[i]++; 847 bdwrite(bp); 848 return (bno); 849 } 850 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 851 if (bno < 0) { 852 brelse(bp); 853 return (NULL); 854 } 855 for (i = 0; i < frags; i++) 856 clrbit(cg_blksfree(cgp), bno + i); 857 cgp->cg_cs.cs_nffree -= frags; 858 fs->fs_cstotal.cs_nffree -= frags; 859 fs->fs_cs(fs, cg).cs_nffree -= frags; 860 fs->fs_fmod = 1; 861 cgp->cg_frsum[allocsiz]--; 862 if (frags != allocsiz) 863 cgp->cg_frsum[allocsiz - frags]++; 864 bdwrite(bp); 865 return (cg * fs->fs_fpg + bno); 866 } 867 868 /* 869 * Allocate a block in a cylinder group. 870 * 871 * This algorithm implements the following policy: 872 * 1) allocate the requested block. 873 * 2) allocate a rotationally optimal block in the same cylinder. 874 * 3) allocate the next available block on the block rotor for the 875 * specified cylinder group. 876 * Note that this routine only allocates fs_bsize blocks; these 877 * blocks may be fragmented by the routine that allocates them. 878 */ 879 static daddr_t 880 ffs_alloccgblk(fs, cgp, bpref) 881 register struct fs *fs; 882 register struct cg *cgp; 883 daddr_t bpref; 884 { 885 daddr_t bno, blkno; 886 int cylno, pos, delta; 887 short *cylbp; 888 register int i; 889 890 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 891 bpref = cgp->cg_rotor; 892 goto norot; 893 } 894 bpref = blknum(fs, bpref); 895 bpref = dtogd(fs, bpref); 896 /* 897 * if the requested block is available, use it 898 */ 899 if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 900 bno = bpref; 901 goto gotit; 902 } 903 /* 904 * check for a block available on the same cylinder 905 */ 906 cylno = cbtocylno(fs, bpref); 907 if (cg_blktot(cgp)[cylno] == 0) 908 goto norot; 909 if (fs->fs_cpc == 0) { 910 /* 911 * Block layout information is not available. 912 * Leaving bpref unchanged means we take the 913 * next available free block following the one 914 * we just allocated. Hopefully this will at 915 * least hit a track cache on drives of unknown 916 * geometry (e.g. SCSI). 917 */ 918 goto norot; 919 } 920 /* 921 * check the summary information to see if a block is 922 * available in the requested cylinder starting at the 923 * requested rotational position and proceeding around. 924 */ 925 cylbp = cg_blks(fs, cgp, cylno); 926 pos = cbtorpos(fs, bpref); 927 for (i = pos; i < fs->fs_nrpos; i++) 928 if (cylbp[i] > 0) 929 break; 930 if (i == fs->fs_nrpos) 931 for (i = 0; i < pos; i++) 932 if (cylbp[i] > 0) 933 break; 934 if (cylbp[i] > 0) { 935 /* 936 * found a rotational position, now find the actual 937 * block. A panic if none is actually there. 938 */ 939 pos = cylno % fs->fs_cpc; 940 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 941 if (fs_postbl(fs, pos)[i] == -1) { 942 printf("pos = %d, i = %d, fs = %s\n", 943 pos, i, fs->fs_fsmnt); 944 panic("ffs_alloccgblk: cyl groups corrupted"); 945 } 946 for (i = fs_postbl(fs, pos)[i];; ) { 947 if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) { 948 bno = blkstofrags(fs, (bno + i)); 949 goto gotit; 950 } 951 delta = fs_rotbl(fs)[i]; 952 if (delta <= 0 || 953 delta + i > fragstoblks(fs, fs->fs_fpg)) 954 break; 955 i += delta; 956 } 957 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 958 panic("ffs_alloccgblk: can't find blk in cyl"); 959 } 960 norot: 961 /* 962 * no blocks in the requested cylinder, so take next 963 * available one in this cylinder group. 964 */ 965 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 966 if (bno < 0) 967 return (NULL); 968 cgp->cg_rotor = bno; 969 gotit: 970 blkno = fragstoblks(fs, bno); 971 ffs_clrblock(fs, cg_blksfree(cgp), (long)blkno); 972 ffs_clusteracct(fs, cgp, blkno, -1); 973 cgp->cg_cs.cs_nbfree--; 974 fs->fs_cstotal.cs_nbfree--; 975 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 976 cylno = cbtocylno(fs, bno); 977 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 978 cg_blktot(cgp)[cylno]--; 979 fs->fs_fmod = 1; 980 return (cgp->cg_cgx * fs->fs_fpg + bno); 981 } 982 983 /* 984 * Determine whether a cluster can be allocated. 985 * 986 * We do not currently check for optimal rotational layout if there 987 * are multiple choices in the same cylinder group. Instead we just 988 * take the first one that we find following bpref. 989 */ 990 static daddr_t 991 ffs_clusteralloc(ip, cg, bpref, len) 992 struct inode *ip; 993 int cg; 994 daddr_t bpref; 995 int len; 996 { 997 register struct fs *fs; 998 register struct cg *cgp; 999 struct buf *bp; 1000 int i, run, bno, bit, map; 1001 u_char *mapp; 1002 1003 fs = ip->i_fs; 1004 if (fs->fs_cs(fs, cg).cs_nbfree < len) 1005 return (NULL); 1006 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1007 NOCRED, &bp)) 1008 goto fail; 1009 cgp = (struct cg *)bp->b_data; 1010 if (!cg_chkmagic(cgp)) 1011 goto fail; 1012 /* 1013 * Check to see if a cluster of the needed size (or bigger) is 1014 * available in this cylinder group. 1015 */ 1016 for (i = len; i <= fs->fs_contigsumsize; i++) 1017 if (cg_clustersum(cgp)[i] > 0) 1018 break; 1019 if (i > fs->fs_contigsumsize) 1020 goto fail; 1021 /* 1022 * Search the cluster map to find a big enough cluster. 1023 * We take the first one that we find, even if it is larger 1024 * than we need as we prefer to get one close to the previous 1025 * block allocation. We do not search before the current 1026 * preference point as we do not want to allocate a block 1027 * that is allocated before the previous one (as we will 1028 * then have to wait for another pass of the elevator 1029 * algorithm before it will be read). We prefer to fail and 1030 * be recalled to try an allocation in the next cylinder group. 1031 */ 1032 if (dtog(fs, bpref) != cg) 1033 bpref = 0; 1034 else 1035 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1036 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1037 map = *mapp++; 1038 bit = 1 << (bpref % NBBY); 1039 for (run = 0, i = bpref; i < cgp->cg_nclusterblks; i++) { 1040 if ((map & bit) == 0) { 1041 run = 0; 1042 } else { 1043 run++; 1044 if (run == len) 1045 break; 1046 } 1047 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1048 bit <<= 1; 1049 } else { 1050 map = *mapp++; 1051 bit = 1; 1052 } 1053 } 1054 if (i == cgp->cg_nclusterblks) 1055 goto fail; 1056 /* 1057 * Allocate the cluster that we have found. 1058 */ 1059 bno = cg * fs->fs_fpg + blkstofrags(fs, i - run + 1); 1060 len = blkstofrags(fs, len); 1061 for (i = 0; i < len; i += fs->fs_frag) 1062 if (ffs_alloccgblk(fs, cgp, bno + i) != bno + i) 1063 panic("ffs_clusteralloc: lost block"); 1064 brelse(bp); 1065 return (bno); 1066 1067 fail: 1068 brelse(bp); 1069 return (0); 1070 } 1071 1072 /* 1073 * Determine whether an inode can be allocated. 1074 * 1075 * Check to see if an inode is available, and if it is, 1076 * allocate it using the following policy: 1077 * 1) allocate the requested inode. 1078 * 2) allocate the next available inode after the requested 1079 * inode in the specified cylinder group. 1080 */ 1081 static ino_t 1082 ffs_nodealloccg(ip, cg, ipref, mode) 1083 struct inode *ip; 1084 int cg; 1085 daddr_t ipref; 1086 int mode; 1087 { 1088 register struct fs *fs; 1089 register struct cg *cgp; 1090 struct buf *bp; 1091 int error, start, len, loc, map, i; 1092 1093 fs = ip->i_fs; 1094 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1095 return (NULL); 1096 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1097 (int)fs->fs_cgsize, NOCRED, &bp); 1098 if (error) { 1099 brelse(bp); 1100 return (NULL); 1101 } 1102 cgp = (struct cg *)bp->b_data; 1103 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1104 brelse(bp); 1105 return (NULL); 1106 } 1107 cgp->cg_time = time.tv_sec; 1108 if (ipref) { 1109 ipref %= fs->fs_ipg; 1110 if (isclr(cg_inosused(cgp), ipref)) 1111 goto gotit; 1112 } 1113 start = cgp->cg_irotor / NBBY; 1114 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 1115 loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 1116 if (loc == 0) { 1117 len = start + 1; 1118 start = 0; 1119 loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 1120 if (loc == 0) { 1121 printf("cg = %d, irotor = %d, fs = %s\n", 1122 cg, cgp->cg_irotor, fs->fs_fsmnt); 1123 panic("ffs_nodealloccg: map corrupted"); 1124 /* NOTREACHED */ 1125 } 1126 } 1127 i = start + len - loc; 1128 map = cg_inosused(cgp)[i]; 1129 ipref = i * NBBY; 1130 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 1131 if ((map & i) == 0) { 1132 cgp->cg_irotor = ipref; 1133 goto gotit; 1134 } 1135 } 1136 printf("fs = %s\n", fs->fs_fsmnt); 1137 panic("ffs_nodealloccg: block not in map"); 1138 /* NOTREACHED */ 1139 gotit: 1140 setbit(cg_inosused(cgp), ipref); 1141 cgp->cg_cs.cs_nifree--; 1142 fs->fs_cstotal.cs_nifree--; 1143 fs->fs_cs(fs, cg).cs_nifree--; 1144 fs->fs_fmod = 1; 1145 if ((mode & IFMT) == IFDIR) { 1146 cgp->cg_cs.cs_ndir++; 1147 fs->fs_cstotal.cs_ndir++; 1148 fs->fs_cs(fs, cg).cs_ndir++; 1149 } 1150 bdwrite(bp); 1151 return (cg * fs->fs_ipg + ipref); 1152 } 1153 1154 /* 1155 * Free a block or fragment. 1156 * 1157 * The specified block or fragment is placed back in the 1158 * free map. If a fragment is deallocated, a possible 1159 * block reassembly is checked. 1160 */ 1161 void 1162 ffs_blkfree(ip, bno, size) 1163 register struct inode *ip; 1164 daddr_t bno; 1165 long size; 1166 { 1167 register struct fs *fs; 1168 register struct cg *cgp; 1169 struct buf *bp; 1170 daddr_t blkno; 1171 int i, error, cg, blk, frags, bbase; 1172 1173 fs = ip->i_fs; 1174 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1175 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 1176 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 1177 panic("blkfree: bad size"); 1178 } 1179 cg = dtog(fs, bno); 1180 if ((u_int)bno >= fs->fs_size) { 1181 printf("bad block %d, ino %d\n", bno, ip->i_number); 1182 ffs_fserr(fs, ip->i_uid, "bad block"); 1183 return; 1184 } 1185 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1186 (int)fs->fs_cgsize, NOCRED, &bp); 1187 if (error) { 1188 brelse(bp); 1189 return; 1190 } 1191 cgp = (struct cg *)bp->b_data; 1192 if (!cg_chkmagic(cgp)) { 1193 brelse(bp); 1194 return; 1195 } 1196 cgp->cg_time = time.tv_sec; 1197 bno = dtogd(fs, bno); 1198 if (size == fs->fs_bsize) { 1199 blkno = fragstoblks(fs, bno); 1200 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 1201 printf("dev = 0x%x, block = %d, fs = %s\n", 1202 ip->i_dev, bno, fs->fs_fsmnt); 1203 panic("blkfree: freeing free block"); 1204 } 1205 ffs_setblock(fs, cg_blksfree(cgp), blkno); 1206 ffs_clusteracct(fs, cgp, blkno, 1); 1207 cgp->cg_cs.cs_nbfree++; 1208 fs->fs_cstotal.cs_nbfree++; 1209 fs->fs_cs(fs, cg).cs_nbfree++; 1210 i = cbtocylno(fs, bno); 1211 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 1212 cg_blktot(cgp)[i]++; 1213 } else { 1214 bbase = bno - fragnum(fs, bno); 1215 /* 1216 * decrement the counts associated with the old frags 1217 */ 1218 blk = blkmap(fs, cg_blksfree(cgp), bbase); 1219 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1220 /* 1221 * deallocate the fragment 1222 */ 1223 frags = numfrags(fs, size); 1224 for (i = 0; i < frags; i++) { 1225 if (isset(cg_blksfree(cgp), bno + i)) { 1226 printf("dev = 0x%x, block = %d, fs = %s\n", 1227 ip->i_dev, bno + i, fs->fs_fsmnt); 1228 panic("blkfree: freeing free frag"); 1229 } 1230 setbit(cg_blksfree(cgp), bno + i); 1231 } 1232 cgp->cg_cs.cs_nffree += i; 1233 fs->fs_cstotal.cs_nffree += i; 1234 fs->fs_cs(fs, cg).cs_nffree += i; 1235 /* 1236 * add back in counts associated with the new frags 1237 */ 1238 blk = blkmap(fs, cg_blksfree(cgp), bbase); 1239 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1240 /* 1241 * if a complete block has been reassembled, account for it 1242 */ 1243 blkno = fragstoblks(fs, bbase); 1244 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 1245 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1246 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1247 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1248 ffs_clusteracct(fs, cgp, blkno, 1); 1249 cgp->cg_cs.cs_nbfree++; 1250 fs->fs_cstotal.cs_nbfree++; 1251 fs->fs_cs(fs, cg).cs_nbfree++; 1252 i = cbtocylno(fs, bbase); 1253 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 1254 cg_blktot(cgp)[i]++; 1255 } 1256 } 1257 fs->fs_fmod = 1; 1258 bdwrite(bp); 1259 } 1260 1261 /* 1262 * Free an inode. 1263 * 1264 * The specified inode is placed back in the free map. 1265 */ 1266 int 1267 ffs_vfree(ap) 1268 struct vop_vfree_args /* { 1269 struct vnode *a_pvp; 1270 ino_t a_ino; 1271 int a_mode; 1272 } */ *ap; 1273 { 1274 register struct fs *fs; 1275 register struct cg *cgp; 1276 register struct inode *pip; 1277 ino_t ino = ap->a_ino; 1278 struct buf *bp; 1279 int error, cg; 1280 1281 pip = VTOI(ap->a_pvp); 1282 fs = pip->i_fs; 1283 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 1284 panic("ifree: range: dev = 0x%x, ino = %d, fs = %s\n", 1285 pip->i_dev, ino, fs->fs_fsmnt); 1286 cg = ino_to_cg(fs, ino); 1287 error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1288 (int)fs->fs_cgsize, NOCRED, &bp); 1289 if (error) { 1290 brelse(bp); 1291 return (0); 1292 } 1293 cgp = (struct cg *)bp->b_data; 1294 if (!cg_chkmagic(cgp)) { 1295 brelse(bp); 1296 return (0); 1297 } 1298 cgp->cg_time = time.tv_sec; 1299 ino %= fs->fs_ipg; 1300 if (isclr(cg_inosused(cgp), ino)) { 1301 printf("dev = 0x%x, ino = %d, fs = %s\n", 1302 pip->i_dev, ino, fs->fs_fsmnt); 1303 if (fs->fs_ronly == 0) 1304 panic("ifree: freeing free inode"); 1305 } 1306 clrbit(cg_inosused(cgp), ino); 1307 if (ino < cgp->cg_irotor) 1308 cgp->cg_irotor = ino; 1309 cgp->cg_cs.cs_nifree++; 1310 fs->fs_cstotal.cs_nifree++; 1311 fs->fs_cs(fs, cg).cs_nifree++; 1312 if ((ap->a_mode & IFMT) == IFDIR) { 1313 cgp->cg_cs.cs_ndir--; 1314 fs->fs_cstotal.cs_ndir--; 1315 fs->fs_cs(fs, cg).cs_ndir--; 1316 } 1317 fs->fs_fmod = 1; 1318 bdwrite(bp); 1319 return (0); 1320 } 1321 1322 /* 1323 * Find a block of the specified size in the specified cylinder group. 1324 * 1325 * It is a panic if a request is made to find a block if none are 1326 * available. 1327 */ 1328 static daddr_t 1329 ffs_mapsearch(fs, cgp, bpref, allocsiz) 1330 register struct fs *fs; 1331 register struct cg *cgp; 1332 daddr_t bpref; 1333 int allocsiz; 1334 { 1335 daddr_t bno; 1336 int start, len, loc, i; 1337 int blk, field, subfield, pos; 1338 1339 /* 1340 * find the fragment by searching through the free block 1341 * map for an appropriate bit pattern 1342 */ 1343 if (bpref) 1344 start = dtogd(fs, bpref) / NBBY; 1345 else 1346 start = cgp->cg_frotor / NBBY; 1347 len = howmany(fs->fs_fpg, NBBY) - start; 1348 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[start], 1349 (u_char *)fragtbl[fs->fs_frag], 1350 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1351 if (loc == 0) { 1352 len = start + 1; 1353 start = 0; 1354 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[0], 1355 (u_char *)fragtbl[fs->fs_frag], 1356 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1357 if (loc == 0) { 1358 printf("start = %d, len = %d, fs = %s\n", 1359 start, len, fs->fs_fsmnt); 1360 panic("ffs_alloccg: map corrupted"); 1361 /* NOTREACHED */ 1362 } 1363 } 1364 bno = (start + len - loc) * NBBY; 1365 cgp->cg_frotor = bno; 1366 /* 1367 * found the byte in the map 1368 * sift through the bits to find the selected frag 1369 */ 1370 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1371 blk = blkmap(fs, cg_blksfree(cgp), bno); 1372 blk <<= 1; 1373 field = around[allocsiz]; 1374 subfield = inside[allocsiz]; 1375 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1376 if ((blk & field) == subfield) 1377 return (bno + pos); 1378 field <<= 1; 1379 subfield <<= 1; 1380 } 1381 } 1382 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 1383 panic("ffs_alloccg: block not in map"); 1384 return (-1); 1385 } 1386 1387 /* 1388 * Update the cluster map because of an allocation or free. 1389 * 1390 * Cnt == 1 means free; cnt == -1 means allocating. 1391 */ 1392 void 1393 ffs_clusteracct(fs, cgp, blkno, cnt) 1394 struct fs *fs; 1395 struct cg *cgp; 1396 daddr_t blkno; 1397 int cnt; 1398 { 1399 long *sump; 1400 u_char *freemapp, *mapp; 1401 int i, start, end, forw, back, map, bit; 1402 1403 if (fs->fs_contigsumsize <= 0) 1404 return; 1405 freemapp = cg_clustersfree(cgp); 1406 sump = cg_clustersum(cgp); 1407 /* 1408 * Allocate or clear the actual block. 1409 */ 1410 if (cnt > 0) 1411 setbit(freemapp, blkno); 1412 else 1413 clrbit(freemapp, blkno); 1414 /* 1415 * Find the size of the cluster going forward. 1416 */ 1417 start = blkno + 1; 1418 end = start + fs->fs_contigsumsize; 1419 if (end >= cgp->cg_nclusterblks) 1420 end = cgp->cg_nclusterblks; 1421 mapp = &freemapp[start / NBBY]; 1422 map = *mapp++; 1423 bit = 1 << (start % NBBY); 1424 for (i = start; i < end; i++) { 1425 if ((map & bit) == 0) 1426 break; 1427 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1428 bit <<= 1; 1429 } else { 1430 map = *mapp++; 1431 bit = 1; 1432 } 1433 } 1434 forw = i - start; 1435 /* 1436 * Find the size of the cluster going backward. 1437 */ 1438 start = blkno - 1; 1439 end = start - fs->fs_contigsumsize; 1440 if (end < 0) 1441 end = -1; 1442 mapp = &freemapp[start / NBBY]; 1443 map = *mapp--; 1444 bit = 1 << (start % NBBY); 1445 for (i = start; i > end; i--) { 1446 if ((map & bit) == 0) 1447 break; 1448 if ((i & (NBBY - 1)) != 0) { 1449 bit >>= 1; 1450 } else { 1451 map = *mapp--; 1452 bit = 1 << (NBBY - 1); 1453 } 1454 } 1455 back = start - i; 1456 /* 1457 * Account for old cluster and the possibly new forward and 1458 * back clusters. 1459 */ 1460 i = back + forw + 1; 1461 if (i > fs->fs_contigsumsize) 1462 i = fs->fs_contigsumsize; 1463 sump[i] += cnt; 1464 if (back > 0) 1465 sump[back] -= cnt; 1466 if (forw > 0) 1467 sump[forw] -= cnt; 1468 } 1469 1470 /* 1471 * Fserr prints the name of a file system with an error diagnostic. 1472 * 1473 * The form of the error message is: 1474 * fs: error message 1475 */ 1476 static void 1477 ffs_fserr(fs, uid, cp) 1478 struct fs *fs; 1479 u_int uid; 1480 char *cp; 1481 { 1482 1483 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); 1484 } 1485