1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 34 * $FreeBSD$ 35 */ 36 37 #include "opt_quota.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/buf.h> 42 #include <sys/conf.h> 43 #include <sys/proc.h> 44 #include <sys/vnode.h> 45 #include <sys/mount.h> 46 #include <sys/kernel.h> 47 #include <sys/sysctl.h> 48 #include <sys/syslog.h> 49 50 #include <ufs/ufs/extattr.h> 51 #include <ufs/ufs/quota.h> 52 #include <ufs/ufs/inode.h> 53 #include <ufs/ufs/ufs_extern.h> 54 #include <ufs/ufs/ufsmount.h> 55 56 #include <ufs/ffs/fs.h> 57 #include <ufs/ffs/ffs_extern.h> 58 59 typedef ufs_daddr_t allocfcn_t __P((struct inode *ip, int cg, ufs_daddr_t bpref, 60 int size)); 61 62 static ufs_daddr_t ffs_alloccg __P((struct inode *, int, ufs_daddr_t, int)); 63 static ufs_daddr_t 64 ffs_alloccgblk __P((struct inode *, struct buf *, ufs_daddr_t)); 65 #ifdef DIAGNOSTIC 66 static int ffs_checkblk __P((struct inode *, ufs_daddr_t, long)); 67 #endif 68 static void ffs_clusteracct __P((struct fs *, struct cg *, ufs_daddr_t, 69 int)); 70 static ufs_daddr_t ffs_clusteralloc __P((struct inode *, int, ufs_daddr_t, 71 int)); 72 static ino_t ffs_dirpref __P((struct fs *)); 73 static ufs_daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 74 static void ffs_fserr __P((struct fs *, u_int, char *)); 75 static u_long ffs_hashalloc 76 __P((struct inode *, int, long, int, allocfcn_t *)); 77 static ino_t ffs_nodealloccg __P((struct inode *, int, ufs_daddr_t, int)); 78 static ufs_daddr_t ffs_mapsearch __P((struct fs *, struct cg *, ufs_daddr_t, 79 int)); 80 81 /* 82 * Allocate a block in the file system. 83 * 84 * The size of the requested block is given, which must be some 85 * multiple of fs_fsize and <= fs_bsize. 86 * A preference may be optionally specified. If a preference is given 87 * the following hierarchy is used to allocate a block: 88 * 1) allocate the requested block. 89 * 2) allocate a rotationally optimal block in the same cylinder. 90 * 3) allocate a block in the same cylinder group. 91 * 4) quadradically rehash into other cylinder groups, until an 92 * available block is located. 93 * If no block preference is given the following heirarchy is used 94 * to allocate a block: 95 * 1) allocate a block in the cylinder group that contains the 96 * inode for the file. 97 * 2) quadradically rehash into other cylinder groups, until an 98 * available block is located. 99 */ 100 int 101 ffs_alloc(ip, lbn, bpref, size, cred, bnp) 102 register struct inode *ip; 103 ufs_daddr_t lbn, bpref; 104 int size; 105 struct ucred *cred; 106 ufs_daddr_t *bnp; 107 { 108 register struct fs *fs; 109 ufs_daddr_t bno; 110 int cg; 111 #ifdef QUOTA 112 int error; 113 #endif 114 115 *bnp = 0; 116 fs = ip->i_fs; 117 #ifdef DIAGNOSTIC 118 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 119 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n", 120 devtoname(ip->i_dev), (long)fs->fs_bsize, size, 121 fs->fs_fsmnt); 122 panic("ffs_alloc: bad size"); 123 } 124 if (cred == NOCRED) 125 panic("ffs_alloc: missing credential"); 126 #endif /* DIAGNOSTIC */ 127 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 128 goto nospace; 129 if (cred->cr_uid != 0 && 130 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 131 goto nospace; 132 #ifdef QUOTA 133 error = chkdq(ip, (long)btodb(size), cred, 0); 134 if (error) 135 return (error); 136 #endif 137 if (bpref >= fs->fs_size) 138 bpref = 0; 139 if (bpref == 0) 140 cg = ino_to_cg(fs, ip->i_number); 141 else 142 cg = dtog(fs, bpref); 143 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 144 ffs_alloccg); 145 if (bno > 0) { 146 ip->i_blocks += btodb(size); 147 ip->i_flag |= IN_CHANGE | IN_UPDATE; 148 *bnp = bno; 149 return (0); 150 } 151 #ifdef QUOTA 152 /* 153 * Restore user's disk quota because allocation failed. 154 */ 155 (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 156 #endif 157 nospace: 158 ffs_fserr(fs, cred->cr_uid, "file system full"); 159 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 160 return (ENOSPC); 161 } 162 163 /* 164 * Reallocate a fragment to a bigger size 165 * 166 * The number and size of the old block is given, and a preference 167 * and new size is also specified. The allocator attempts to extend 168 * the original block. Failing that, the regular block allocator is 169 * invoked to get an appropriate block. 170 */ 171 int 172 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) 173 register struct inode *ip; 174 ufs_daddr_t lbprev; 175 ufs_daddr_t bpref; 176 int osize, nsize; 177 struct ucred *cred; 178 struct buf **bpp; 179 { 180 register struct fs *fs; 181 struct buf *bp; 182 int cg, request, error; 183 ufs_daddr_t bprev, bno; 184 185 *bpp = 0; 186 fs = ip->i_fs; 187 #ifdef DIAGNOSTIC 188 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 189 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 190 printf( 191 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 192 devtoname(ip->i_dev), (long)fs->fs_bsize, osize, 193 nsize, fs->fs_fsmnt); 194 panic("ffs_realloccg: bad size"); 195 } 196 if (cred == NOCRED) 197 panic("ffs_realloccg: missing credential"); 198 #endif /* DIAGNOSTIC */ 199 if (cred->cr_uid != 0 && 200 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) 201 goto nospace; 202 if ((bprev = ip->i_db[lbprev]) == 0) { 203 printf("dev = %s, bsize = %ld, bprev = %ld, fs = %s\n", 204 devtoname(ip->i_dev), (long)fs->fs_bsize, (long)bprev, 205 fs->fs_fsmnt); 206 panic("ffs_realloccg: bad bprev"); 207 } 208 /* 209 * Allocate the extra space in the buffer. 210 */ 211 error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp); 212 if (error) { 213 brelse(bp); 214 return (error); 215 } 216 217 if( bp->b_blkno == bp->b_lblkno) { 218 if( lbprev >= NDADDR) 219 panic("ffs_realloccg: lbprev out of range"); 220 bp->b_blkno = fsbtodb(fs, bprev); 221 } 222 223 #ifdef QUOTA 224 error = chkdq(ip, (long)btodb(nsize - osize), cred, 0); 225 if (error) { 226 brelse(bp); 227 return (error); 228 } 229 #endif 230 /* 231 * Check for extension in the existing location. 232 */ 233 cg = dtog(fs, bprev); 234 bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize); 235 if (bno) { 236 if (bp->b_blkno != fsbtodb(fs, bno)) 237 panic("ffs_realloccg: bad blockno"); 238 ip->i_blocks += btodb(nsize - osize); 239 ip->i_flag |= IN_CHANGE | IN_UPDATE; 240 allocbuf(bp, nsize); 241 bp->b_flags |= B_DONE; 242 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 243 *bpp = bp; 244 return (0); 245 } 246 /* 247 * Allocate a new disk location. 248 */ 249 if (bpref >= fs->fs_size) 250 bpref = 0; 251 switch ((int)fs->fs_optim) { 252 case FS_OPTSPACE: 253 /* 254 * Allocate an exact sized fragment. Although this makes 255 * best use of space, we will waste time relocating it if 256 * the file continues to grow. If the fragmentation is 257 * less than half of the minimum free reserve, we choose 258 * to begin optimizing for time. 259 */ 260 request = nsize; 261 if (fs->fs_minfree <= 5 || 262 fs->fs_cstotal.cs_nffree > 263 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100)) 264 break; 265 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 266 fs->fs_fsmnt); 267 fs->fs_optim = FS_OPTTIME; 268 break; 269 case FS_OPTTIME: 270 /* 271 * At this point we have discovered a file that is trying to 272 * grow a small fragment to a larger fragment. To save time, 273 * we allocate a full sized block, then free the unused portion. 274 * If the file continues to grow, the `ffs_fragextend' call 275 * above will be able to grow it in place without further 276 * copying. If aberrant programs cause disk fragmentation to 277 * grow within 2% of the free reserve, we choose to begin 278 * optimizing for space. 279 */ 280 request = fs->fs_bsize; 281 if (fs->fs_cstotal.cs_nffree < 282 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100) 283 break; 284 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 285 fs->fs_fsmnt); 286 fs->fs_optim = FS_OPTSPACE; 287 break; 288 default: 289 printf("dev = %s, optim = %ld, fs = %s\n", 290 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt); 291 panic("ffs_realloccg: bad optim"); 292 /* NOTREACHED */ 293 } 294 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 295 ffs_alloccg); 296 if (bno > 0) { 297 bp->b_blkno = fsbtodb(fs, bno); 298 if (!DOINGSOFTDEP(ITOV(ip))) 299 ffs_blkfree(ip, bprev, (long)osize); 300 if (nsize < request) 301 ffs_blkfree(ip, bno + numfrags(fs, nsize), 302 (long)(request - nsize)); 303 ip->i_blocks += btodb(nsize - osize); 304 ip->i_flag |= IN_CHANGE | IN_UPDATE; 305 allocbuf(bp, nsize); 306 bp->b_flags |= B_DONE; 307 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 308 *bpp = bp; 309 return (0); 310 } 311 #ifdef QUOTA 312 /* 313 * Restore user's disk quota because allocation failed. 314 */ 315 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 316 #endif 317 brelse(bp); 318 nospace: 319 /* 320 * no space available 321 */ 322 ffs_fserr(fs, cred->cr_uid, "file system full"); 323 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 324 return (ENOSPC); 325 } 326 327 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 328 329 /* 330 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 331 * 332 * The vnode and an array of buffer pointers for a range of sequential 333 * logical blocks to be made contiguous is given. The allocator attempts 334 * to find a range of sequential blocks starting as close as possible to 335 * an fs_rotdelay offset from the end of the allocation for the logical 336 * block immediately preceeding the current range. If successful, the 337 * physical block numbers in the buffer pointers and in the inode are 338 * changed to reflect the new allocation. If unsuccessful, the allocation 339 * is left unchanged. The success in doing the reallocation is returned. 340 * Note that the error return is not reflected back to the user. Rather 341 * the previous block allocation will be used. 342 */ 343 static int doasyncfree = 1; 344 SYSCTL_INT(_vfs_ffs, FFS_ASYNCFREE, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, ""); 345 346 static int doreallocblks = 1; 347 SYSCTL_INT(_vfs_ffs, FFS_REALLOCBLKS, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 348 349 #ifdef DEBUG 350 static volatile int prtrealloc = 0; 351 #endif 352 353 int 354 ffs_reallocblks(ap) 355 struct vop_reallocblks_args /* { 356 struct vnode *a_vp; 357 struct cluster_save *a_buflist; 358 } */ *ap; 359 { 360 struct fs *fs; 361 struct inode *ip; 362 struct vnode *vp; 363 struct buf *sbp, *ebp; 364 ufs_daddr_t *bap, *sbap, *ebap = 0; 365 struct cluster_save *buflist; 366 ufs_daddr_t start_lbn, end_lbn, soff, newblk, blkno; 367 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 368 int i, len, start_lvl, end_lvl, pref, ssize; 369 370 if (doreallocblks == 0) 371 return (ENOSPC); 372 vp = ap->a_vp; 373 ip = VTOI(vp); 374 fs = ip->i_fs; 375 if (fs->fs_contigsumsize <= 0) 376 return (ENOSPC); 377 buflist = ap->a_buflist; 378 len = buflist->bs_nchildren; 379 start_lbn = buflist->bs_children[0]->b_lblkno; 380 end_lbn = start_lbn + len - 1; 381 #ifdef DIAGNOSTIC 382 for (i = 0; i < len; i++) 383 if (!ffs_checkblk(ip, 384 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 385 panic("ffs_reallocblks: unallocated block 1"); 386 for (i = 1; i < len; i++) 387 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 388 panic("ffs_reallocblks: non-logical cluster"); 389 blkno = buflist->bs_children[0]->b_blkno; 390 ssize = fsbtodb(fs, fs->fs_frag); 391 for (i = 1; i < len - 1; i++) 392 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 393 panic("ffs_reallocblks: non-physical cluster %d", i); 394 #endif 395 /* 396 * If the latest allocation is in a new cylinder group, assume that 397 * the filesystem has decided to move and do not force it back to 398 * the previous cylinder group. 399 */ 400 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 401 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 402 return (ENOSPC); 403 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 404 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 405 return (ENOSPC); 406 /* 407 * Get the starting offset and block map for the first block. 408 */ 409 if (start_lvl == 0) { 410 sbap = &ip->i_db[0]; 411 soff = start_lbn; 412 } else { 413 idp = &start_ap[start_lvl - 1]; 414 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 415 brelse(sbp); 416 return (ENOSPC); 417 } 418 sbap = (ufs_daddr_t *)sbp->b_data; 419 soff = idp->in_off; 420 } 421 /* 422 * Find the preferred location for the cluster. 423 */ 424 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 425 /* 426 * If the block range spans two block maps, get the second map. 427 */ 428 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 429 ssize = len; 430 } else { 431 #ifdef DIAGNOSTIC 432 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 433 panic("ffs_reallocblk: start == end"); 434 #endif 435 ssize = len - (idp->in_off + 1); 436 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 437 goto fail; 438 ebap = (ufs_daddr_t *)ebp->b_data; 439 } 440 /* 441 * Search the block map looking for an allocation of the desired size. 442 */ 443 if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 444 len, ffs_clusteralloc)) == 0) 445 goto fail; 446 /* 447 * We have found a new contiguous block. 448 * 449 * First we have to replace the old block pointers with the new 450 * block pointers in the inode and indirect blocks associated 451 * with the file. 452 */ 453 #ifdef DEBUG 454 if (prtrealloc) 455 printf("realloc: ino %d, lbns %d-%d\n\told:", ip->i_number, 456 start_lbn, end_lbn); 457 #endif 458 blkno = newblk; 459 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 460 if (i == ssize) { 461 bap = ebap; 462 soff = -i; 463 } 464 #ifdef DIAGNOSTIC 465 if (!ffs_checkblk(ip, 466 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 467 panic("ffs_reallocblks: unallocated block 2"); 468 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 469 panic("ffs_reallocblks: alloc mismatch"); 470 #endif 471 #ifdef DEBUG 472 if (prtrealloc) 473 printf(" %d,", *bap); 474 #endif 475 if (DOINGSOFTDEP(vp)) { 476 if (sbap == &ip->i_db[0] && i < ssize) 477 softdep_setup_allocdirect(ip, start_lbn + i, 478 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 479 buflist->bs_children[i]); 480 else 481 softdep_setup_allocindir_page(ip, start_lbn + i, 482 i < ssize ? sbp : ebp, soff + i, blkno, 483 *bap, buflist->bs_children[i]); 484 } 485 *bap++ = blkno; 486 } 487 /* 488 * Next we must write out the modified inode and indirect blocks. 489 * For strict correctness, the writes should be synchronous since 490 * the old block values may have been written to disk. In practise 491 * they are almost never written, but if we are concerned about 492 * strict correctness, the `doasyncfree' flag should be set to zero. 493 * 494 * The test on `doasyncfree' should be changed to test a flag 495 * that shows whether the associated buffers and inodes have 496 * been written. The flag should be set when the cluster is 497 * started and cleared whenever the buffer or inode is flushed. 498 * We can then check below to see if it is set, and do the 499 * synchronous write only when it has been cleared. 500 */ 501 if (sbap != &ip->i_db[0]) { 502 if (doasyncfree) 503 bdwrite(sbp); 504 else 505 bwrite(sbp); 506 } else { 507 ip->i_flag |= IN_CHANGE | IN_UPDATE; 508 if (!doasyncfree) 509 UFS_UPDATE(vp, 1); 510 } 511 if (ssize < len) { 512 if (doasyncfree) 513 bdwrite(ebp); 514 else 515 bwrite(ebp); 516 } 517 /* 518 * Last, free the old blocks and assign the new blocks to the buffers. 519 */ 520 #ifdef DEBUG 521 if (prtrealloc) 522 printf("\n\tnew:"); 523 #endif 524 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 525 if (!DOINGSOFTDEP(vp)) 526 ffs_blkfree(ip, 527 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 528 fs->fs_bsize); 529 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 530 #ifdef DIAGNOSTIC 531 if (!ffs_checkblk(ip, 532 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 533 panic("ffs_reallocblks: unallocated block 3"); 534 #endif 535 #ifdef DEBUG 536 if (prtrealloc) 537 printf(" %d,", blkno); 538 #endif 539 } 540 #ifdef DEBUG 541 if (prtrealloc) { 542 prtrealloc--; 543 printf("\n"); 544 } 545 #endif 546 return (0); 547 548 fail: 549 if (ssize < len) 550 brelse(ebp); 551 if (sbap != &ip->i_db[0]) 552 brelse(sbp); 553 return (ENOSPC); 554 } 555 556 /* 557 * Allocate an inode in the file system. 558 * 559 * If allocating a directory, use ffs_dirpref to select the inode. 560 * If allocating in a directory, the following hierarchy is followed: 561 * 1) allocate the preferred inode. 562 * 2) allocate an inode in the same cylinder group. 563 * 3) quadradically rehash into other cylinder groups, until an 564 * available inode is located. 565 * If no inode preference is given the following heirarchy is used 566 * to allocate an inode: 567 * 1) allocate an inode in cylinder group 0. 568 * 2) quadradically rehash into other cylinder groups, until an 569 * available inode is located. 570 */ 571 int 572 ffs_valloc(pvp, mode, cred, vpp) 573 struct vnode *pvp; 574 int mode; 575 struct ucred *cred; 576 struct vnode **vpp; 577 { 578 register struct inode *pip; 579 register struct fs *fs; 580 register struct inode *ip; 581 ino_t ino, ipref; 582 int cg, error; 583 584 *vpp = NULL; 585 pip = VTOI(pvp); 586 fs = pip->i_fs; 587 if (fs->fs_cstotal.cs_nifree == 0) 588 goto noinodes; 589 590 if ((mode & IFMT) == IFDIR) 591 ipref = ffs_dirpref(fs); 592 else 593 ipref = pip->i_number; 594 if (ipref >= fs->fs_ncg * fs->fs_ipg) 595 ipref = 0; 596 cg = ino_to_cg(fs, ipref); 597 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, 598 (allocfcn_t *)ffs_nodealloccg); 599 if (ino == 0) 600 goto noinodes; 601 error = VFS_VGET(pvp->v_mount, ino, vpp); 602 if (error) { 603 UFS_VFREE(pvp, ino, mode); 604 return (error); 605 } 606 ip = VTOI(*vpp); 607 if (ip->i_mode) { 608 printf("mode = 0%o, inum = %lu, fs = %s\n", 609 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); 610 panic("ffs_valloc: dup alloc"); 611 } 612 if (ip->i_blocks) { /* XXX */ 613 printf("free inode %s/%lu had %ld blocks\n", 614 fs->fs_fsmnt, (u_long)ino, (long)ip->i_blocks); 615 ip->i_blocks = 0; 616 } 617 ip->i_flags = 0; 618 /* 619 * Set up a new generation number for this inode. 620 */ 621 if (ip->i_gen == 0 || ++ip->i_gen == 0) 622 ip->i_gen = random() / 2 + 1; 623 return (0); 624 noinodes: 625 ffs_fserr(fs, cred->cr_uid, "out of inodes"); 626 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 627 return (ENOSPC); 628 } 629 630 /* 631 * Find a cylinder to place a directory. 632 * 633 * The policy implemented by this algorithm is to select from 634 * among those cylinder groups with above the average number of 635 * free inodes, the one with the smallest number of directories. 636 */ 637 static ino_t 638 ffs_dirpref(fs) 639 register struct fs *fs; 640 { 641 int cg, minndir, mincg, avgifree; 642 643 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 644 minndir = fs->fs_ipg; 645 mincg = 0; 646 for (cg = 0; cg < fs->fs_ncg; cg++) 647 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 648 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 649 mincg = cg; 650 minndir = fs->fs_cs(fs, cg).cs_ndir; 651 } 652 return ((ino_t)(fs->fs_ipg * mincg)); 653 } 654 655 /* 656 * Select the desired position for the next block in a file. The file is 657 * logically divided into sections. The first section is composed of the 658 * direct blocks. Each additional section contains fs_maxbpg blocks. 659 * 660 * If no blocks have been allocated in the first section, the policy is to 661 * request a block in the same cylinder group as the inode that describes 662 * the file. If no blocks have been allocated in any other section, the 663 * policy is to place the section in a cylinder group with a greater than 664 * average number of free blocks. An appropriate cylinder group is found 665 * by using a rotor that sweeps the cylinder groups. When a new group of 666 * blocks is needed, the sweep begins in the cylinder group following the 667 * cylinder group from which the previous allocation was made. The sweep 668 * continues until a cylinder group with greater than the average number 669 * of free blocks is found. If the allocation is for the first block in an 670 * indirect block, the information on the previous allocation is unavailable; 671 * here a best guess is made based upon the logical block number being 672 * allocated. 673 * 674 * If a section is already partially allocated, the policy is to 675 * contiguously allocate fs_maxcontig blocks. The end of one of these 676 * contiguous blocks and the beginning of the next is physically separated 677 * so that the disk head will be in transit between them for at least 678 * fs_rotdelay milliseconds. This is to allow time for the processor to 679 * schedule another I/O transfer. 680 */ 681 ufs_daddr_t 682 ffs_blkpref(ip, lbn, indx, bap) 683 struct inode *ip; 684 ufs_daddr_t lbn; 685 int indx; 686 ufs_daddr_t *bap; 687 { 688 register struct fs *fs; 689 register int cg; 690 int avgbfree, startcg; 691 ufs_daddr_t nextblk; 692 693 fs = ip->i_fs; 694 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 695 if (lbn < NDADDR + NINDIR(fs)) { 696 cg = ino_to_cg(fs, ip->i_number); 697 return (fs->fs_fpg * cg + fs->fs_frag); 698 } 699 /* 700 * Find a cylinder with greater than average number of 701 * unused data blocks. 702 */ 703 if (indx == 0 || bap[indx - 1] == 0) 704 startcg = 705 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 706 else 707 startcg = dtog(fs, bap[indx - 1]) + 1; 708 startcg %= fs->fs_ncg; 709 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 710 for (cg = startcg; cg < fs->fs_ncg; cg++) 711 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 712 fs->fs_cgrotor = cg; 713 return (fs->fs_fpg * cg + fs->fs_frag); 714 } 715 for (cg = 0; cg <= startcg; cg++) 716 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 717 fs->fs_cgrotor = cg; 718 return (fs->fs_fpg * cg + fs->fs_frag); 719 } 720 return (0); 721 } 722 /* 723 * One or more previous blocks have been laid out. If less 724 * than fs_maxcontig previous blocks are contiguous, the 725 * next block is requested contiguously, otherwise it is 726 * requested rotationally delayed by fs_rotdelay milliseconds. 727 */ 728 nextblk = bap[indx - 1] + fs->fs_frag; 729 if (fs->fs_rotdelay == 0 || indx < fs->fs_maxcontig || 730 bap[indx - fs->fs_maxcontig] + 731 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 732 return (nextblk); 733 /* 734 * Here we convert ms of delay to frags as: 735 * (frags) = (ms) * (rev/sec) * (sect/rev) / 736 * ((sect/frag) * (ms/sec)) 737 * then round up to the next block. 738 */ 739 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 740 (NSPF(fs) * 1000), fs->fs_frag); 741 return (nextblk); 742 } 743 744 /* 745 * Implement the cylinder overflow algorithm. 746 * 747 * The policy implemented by this algorithm is: 748 * 1) allocate the block in its requested cylinder group. 749 * 2) quadradically rehash on the cylinder group number. 750 * 3) brute force search for a free block. 751 */ 752 /*VARARGS5*/ 753 static u_long 754 ffs_hashalloc(ip, cg, pref, size, allocator) 755 struct inode *ip; 756 int cg; 757 long pref; 758 int size; /* size for data blocks, mode for inodes */ 759 allocfcn_t *allocator; 760 { 761 register struct fs *fs; 762 long result; /* XXX why not same type as we return? */ 763 int i, icg = cg; 764 765 fs = ip->i_fs; 766 /* 767 * 1: preferred cylinder group 768 */ 769 result = (*allocator)(ip, cg, pref, size); 770 if (result) 771 return (result); 772 /* 773 * 2: quadratic rehash 774 */ 775 for (i = 1; i < fs->fs_ncg; i *= 2) { 776 cg += i; 777 if (cg >= fs->fs_ncg) 778 cg -= fs->fs_ncg; 779 result = (*allocator)(ip, cg, 0, size); 780 if (result) 781 return (result); 782 } 783 /* 784 * 3: brute force search 785 * Note that we start at i == 2, since 0 was checked initially, 786 * and 1 is always checked in the quadratic rehash. 787 */ 788 cg = (icg + 2) % fs->fs_ncg; 789 for (i = 2; i < fs->fs_ncg; i++) { 790 result = (*allocator)(ip, cg, 0, size); 791 if (result) 792 return (result); 793 cg++; 794 if (cg == fs->fs_ncg) 795 cg = 0; 796 } 797 return (0); 798 } 799 800 /* 801 * Determine whether a fragment can be extended. 802 * 803 * Check to see if the necessary fragments are available, and 804 * if they are, allocate them. 805 */ 806 static ufs_daddr_t 807 ffs_fragextend(ip, cg, bprev, osize, nsize) 808 struct inode *ip; 809 int cg; 810 long bprev; 811 int osize, nsize; 812 { 813 register struct fs *fs; 814 register struct cg *cgp; 815 struct buf *bp; 816 long bno; 817 int frags, bbase; 818 int i, error; 819 u_int8_t *blksfree; 820 821 fs = ip->i_fs; 822 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 823 return (0); 824 frags = numfrags(fs, nsize); 825 bbase = fragnum(fs, bprev); 826 if (bbase > fragnum(fs, (bprev + frags - 1))) { 827 /* cannot extend across a block boundary */ 828 return (0); 829 } 830 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 831 (int)fs->fs_cgsize, NOCRED, &bp); 832 if (error) { 833 brelse(bp); 834 return (0); 835 } 836 cgp = (struct cg *)bp->b_data; 837 if (!cg_chkmagic(cgp)) { 838 brelse(bp); 839 return (0); 840 } 841 bp->b_xflags |= BX_BKGRDWRITE; 842 cgp->cg_time = time_second; 843 bno = dtogd(fs, bprev); 844 blksfree = cg_blksfree(cgp); 845 for (i = numfrags(fs, osize); i < frags; i++) 846 if (isclr(blksfree, bno + i)) { 847 brelse(bp); 848 return (0); 849 } 850 /* 851 * the current fragment can be extended 852 * deduct the count on fragment being extended into 853 * increase the count on the remaining fragment (if any) 854 * allocate the extended piece 855 */ 856 for (i = frags; i < fs->fs_frag - bbase; i++) 857 if (isclr(blksfree, bno + i)) 858 break; 859 cgp->cg_frsum[i - numfrags(fs, osize)]--; 860 if (i != frags) 861 cgp->cg_frsum[i - frags]++; 862 for (i = numfrags(fs, osize); i < frags; i++) { 863 clrbit(blksfree, bno + i); 864 cgp->cg_cs.cs_nffree--; 865 fs->fs_cstotal.cs_nffree--; 866 fs->fs_cs(fs, cg).cs_nffree--; 867 } 868 fs->fs_fmod = 1; 869 if (DOINGSOFTDEP(ITOV(ip))) 870 softdep_setup_blkmapdep(bp, fs, bprev); 871 bdwrite(bp); 872 return (bprev); 873 } 874 875 /* 876 * Determine whether a block can be allocated. 877 * 878 * Check to see if a block of the appropriate size is available, 879 * and if it is, allocate it. 880 */ 881 static ufs_daddr_t 882 ffs_alloccg(ip, cg, bpref, size) 883 struct inode *ip; 884 int cg; 885 ufs_daddr_t bpref; 886 int size; 887 { 888 register struct fs *fs; 889 register struct cg *cgp; 890 struct buf *bp; 891 register int i; 892 ufs_daddr_t bno, blkno; 893 int allocsiz, error, frags; 894 u_int8_t *blksfree; 895 896 fs = ip->i_fs; 897 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 898 return (0); 899 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 900 (int)fs->fs_cgsize, NOCRED, &bp); 901 if (error) { 902 brelse(bp); 903 return (0); 904 } 905 cgp = (struct cg *)bp->b_data; 906 if (!cg_chkmagic(cgp) || 907 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 908 brelse(bp); 909 return (0); 910 } 911 bp->b_xflags |= BX_BKGRDWRITE; 912 cgp->cg_time = time_second; 913 if (size == fs->fs_bsize) { 914 bno = ffs_alloccgblk(ip, bp, bpref); 915 bdwrite(bp); 916 return (bno); 917 } 918 /* 919 * check to see if any fragments are already available 920 * allocsiz is the size which will be allocated, hacking 921 * it down to a smaller size if necessary 922 */ 923 blksfree = cg_blksfree(cgp); 924 frags = numfrags(fs, size); 925 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 926 if (cgp->cg_frsum[allocsiz] != 0) 927 break; 928 if (allocsiz == fs->fs_frag) { 929 /* 930 * no fragments were available, so a block will be 931 * allocated, and hacked up 932 */ 933 if (cgp->cg_cs.cs_nbfree == 0) { 934 brelse(bp); 935 return (0); 936 } 937 bno = ffs_alloccgblk(ip, bp, bpref); 938 bpref = dtogd(fs, bno); 939 for (i = frags; i < fs->fs_frag; i++) 940 setbit(blksfree, bpref + i); 941 i = fs->fs_frag - frags; 942 cgp->cg_cs.cs_nffree += i; 943 fs->fs_cstotal.cs_nffree += i; 944 fs->fs_cs(fs, cg).cs_nffree += i; 945 fs->fs_fmod = 1; 946 cgp->cg_frsum[i]++; 947 bdwrite(bp); 948 return (bno); 949 } 950 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 951 if (bno < 0) { 952 brelse(bp); 953 return (0); 954 } 955 for (i = 0; i < frags; i++) 956 clrbit(blksfree, bno + i); 957 cgp->cg_cs.cs_nffree -= frags; 958 fs->fs_cstotal.cs_nffree -= frags; 959 fs->fs_cs(fs, cg).cs_nffree -= frags; 960 fs->fs_fmod = 1; 961 cgp->cg_frsum[allocsiz]--; 962 if (frags != allocsiz) 963 cgp->cg_frsum[allocsiz - frags]++; 964 blkno = cg * fs->fs_fpg + bno; 965 if (DOINGSOFTDEP(ITOV(ip))) 966 softdep_setup_blkmapdep(bp, fs, blkno); 967 bdwrite(bp); 968 return ((u_long)blkno); 969 } 970 971 /* 972 * Allocate a block in a cylinder group. 973 * 974 * This algorithm implements the following policy: 975 * 1) allocate the requested block. 976 * 2) allocate a rotationally optimal block in the same cylinder. 977 * 3) allocate the next available block on the block rotor for the 978 * specified cylinder group. 979 * Note that this routine only allocates fs_bsize blocks; these 980 * blocks may be fragmented by the routine that allocates them. 981 */ 982 static ufs_daddr_t 983 ffs_alloccgblk(ip, bp, bpref) 984 struct inode *ip; 985 struct buf *bp; 986 ufs_daddr_t bpref; 987 { 988 struct fs *fs; 989 struct cg *cgp; 990 ufs_daddr_t bno, blkno; 991 int cylno, pos, delta; 992 short *cylbp; 993 register int i; 994 u_int8_t *blksfree; 995 996 fs = ip->i_fs; 997 cgp = (struct cg *)bp->b_data; 998 blksfree = cg_blksfree(cgp); 999 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 1000 bpref = cgp->cg_rotor; 1001 goto norot; 1002 } 1003 bpref = blknum(fs, bpref); 1004 bpref = dtogd(fs, bpref); 1005 /* 1006 * if the requested block is available, use it 1007 */ 1008 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bpref))) { 1009 bno = bpref; 1010 goto gotit; 1011 } 1012 if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) { 1013 /* 1014 * Block layout information is not available. 1015 * Leaving bpref unchanged means we take the 1016 * next available free block following the one 1017 * we just allocated. Hopefully this will at 1018 * least hit a track cache on drives of unknown 1019 * geometry (e.g. SCSI). 1020 */ 1021 goto norot; 1022 } 1023 /* 1024 * check for a block available on the same cylinder 1025 */ 1026 cylno = cbtocylno(fs, bpref); 1027 if (cg_blktot(cgp)[cylno] == 0) 1028 goto norot; 1029 /* 1030 * check the summary information to see if a block is 1031 * available in the requested cylinder starting at the 1032 * requested rotational position and proceeding around. 1033 */ 1034 cylbp = cg_blks(fs, cgp, cylno); 1035 pos = cbtorpos(fs, bpref); 1036 for (i = pos; i < fs->fs_nrpos; i++) 1037 if (cylbp[i] > 0) 1038 break; 1039 if (i == fs->fs_nrpos) 1040 for (i = 0; i < pos; i++) 1041 if (cylbp[i] > 0) 1042 break; 1043 if (cylbp[i] > 0) { 1044 /* 1045 * found a rotational position, now find the actual 1046 * block. A panic if none is actually there. 1047 */ 1048 pos = cylno % fs->fs_cpc; 1049 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 1050 if (fs_postbl(fs, pos)[i] == -1) { 1051 printf("pos = %d, i = %d, fs = %s\n", 1052 pos, i, fs->fs_fsmnt); 1053 panic("ffs_alloccgblk: cyl groups corrupted"); 1054 } 1055 for (i = fs_postbl(fs, pos)[i];; ) { 1056 if (ffs_isblock(fs, blksfree, bno + i)) { 1057 bno = blkstofrags(fs, (bno + i)); 1058 goto gotit; 1059 } 1060 delta = fs_rotbl(fs)[i]; 1061 if (delta <= 0 || 1062 delta + i > fragstoblks(fs, fs->fs_fpg)) 1063 break; 1064 i += delta; 1065 } 1066 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 1067 panic("ffs_alloccgblk: can't find blk in cyl"); 1068 } 1069 norot: 1070 /* 1071 * no blocks in the requested cylinder, so take next 1072 * available one in this cylinder group. 1073 */ 1074 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1075 if (bno < 0) 1076 return (0); 1077 cgp->cg_rotor = bno; 1078 gotit: 1079 blkno = fragstoblks(fs, bno); 1080 ffs_clrblock(fs, blksfree, (long)blkno); 1081 ffs_clusteracct(fs, cgp, blkno, -1); 1082 cgp->cg_cs.cs_nbfree--; 1083 fs->fs_cstotal.cs_nbfree--; 1084 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1085 cylno = cbtocylno(fs, bno); 1086 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 1087 cg_blktot(cgp)[cylno]--; 1088 fs->fs_fmod = 1; 1089 blkno = cgp->cg_cgx * fs->fs_fpg + bno; 1090 if (DOINGSOFTDEP(ITOV(ip))) 1091 softdep_setup_blkmapdep(bp, fs, blkno); 1092 return (blkno); 1093 } 1094 1095 /* 1096 * Determine whether a cluster can be allocated. 1097 * 1098 * We do not currently check for optimal rotational layout if there 1099 * are multiple choices in the same cylinder group. Instead we just 1100 * take the first one that we find following bpref. 1101 */ 1102 static ufs_daddr_t 1103 ffs_clusteralloc(ip, cg, bpref, len) 1104 struct inode *ip; 1105 int cg; 1106 ufs_daddr_t bpref; 1107 int len; 1108 { 1109 register struct fs *fs; 1110 register struct cg *cgp; 1111 struct buf *bp; 1112 int i, got, run, bno, bit, map; 1113 u_char *mapp; 1114 int32_t *lp; 1115 u_int8_t *blksfree; 1116 1117 fs = ip->i_fs; 1118 if (fs->fs_maxcluster[cg] < len) 1119 return (0); 1120 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1121 NOCRED, &bp)) 1122 goto fail; 1123 cgp = (struct cg *)bp->b_data; 1124 if (!cg_chkmagic(cgp)) 1125 goto fail; 1126 bp->b_xflags |= BX_BKGRDWRITE; 1127 /* 1128 * Check to see if a cluster of the needed size (or bigger) is 1129 * available in this cylinder group. 1130 */ 1131 lp = &cg_clustersum(cgp)[len]; 1132 for (i = len; i <= fs->fs_contigsumsize; i++) 1133 if (*lp++ > 0) 1134 break; 1135 if (i > fs->fs_contigsumsize) { 1136 /* 1137 * This is the first time looking for a cluster in this 1138 * cylinder group. Update the cluster summary information 1139 * to reflect the true maximum sized cluster so that 1140 * future cluster allocation requests can avoid reading 1141 * the cylinder group map only to find no clusters. 1142 */ 1143 lp = &cg_clustersum(cgp)[len - 1]; 1144 for (i = len - 1; i > 0; i--) 1145 if (*lp-- > 0) 1146 break; 1147 fs->fs_maxcluster[cg] = i; 1148 goto fail; 1149 } 1150 /* 1151 * Search the cluster map to find a big enough cluster. 1152 * We take the first one that we find, even if it is larger 1153 * than we need as we prefer to get one close to the previous 1154 * block allocation. We do not search before the current 1155 * preference point as we do not want to allocate a block 1156 * that is allocated before the previous one (as we will 1157 * then have to wait for another pass of the elevator 1158 * algorithm before it will be read). We prefer to fail and 1159 * be recalled to try an allocation in the next cylinder group. 1160 */ 1161 if (dtog(fs, bpref) != cg) 1162 bpref = 0; 1163 else 1164 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1165 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1166 map = *mapp++; 1167 bit = 1 << (bpref % NBBY); 1168 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1169 if ((map & bit) == 0) { 1170 run = 0; 1171 } else { 1172 run++; 1173 if (run == len) 1174 break; 1175 } 1176 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1177 bit <<= 1; 1178 } else { 1179 map = *mapp++; 1180 bit = 1; 1181 } 1182 } 1183 if (got >= cgp->cg_nclusterblks) 1184 goto fail; 1185 /* 1186 * Allocate the cluster that we have found. 1187 */ 1188 blksfree = cg_blksfree(cgp); 1189 for (i = 1; i <= len; i++) 1190 if (!ffs_isblock(fs, blksfree, got - run + i)) 1191 panic("ffs_clusteralloc: map mismatch"); 1192 bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); 1193 if (dtog(fs, bno) != cg) 1194 panic("ffs_clusteralloc: allocated out of group"); 1195 len = blkstofrags(fs, len); 1196 for (i = 0; i < len; i += fs->fs_frag) 1197 if ((got = ffs_alloccgblk(ip, bp, bno + i)) != bno + i) 1198 panic("ffs_clusteralloc: lost block"); 1199 bdwrite(bp); 1200 return (bno); 1201 1202 fail: 1203 brelse(bp); 1204 return (0); 1205 } 1206 1207 /* 1208 * Determine whether an inode can be allocated. 1209 * 1210 * Check to see if an inode is available, and if it is, 1211 * allocate it using the following policy: 1212 * 1) allocate the requested inode. 1213 * 2) allocate the next available inode after the requested 1214 * inode in the specified cylinder group. 1215 */ 1216 static ino_t 1217 ffs_nodealloccg(ip, cg, ipref, mode) 1218 struct inode *ip; 1219 int cg; 1220 ufs_daddr_t ipref; 1221 int mode; 1222 { 1223 register struct fs *fs; 1224 register struct cg *cgp; 1225 struct buf *bp; 1226 u_int8_t *inosused; 1227 int error, start, len, loc, map, i; 1228 1229 fs = ip->i_fs; 1230 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1231 return (0); 1232 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1233 (int)fs->fs_cgsize, NOCRED, &bp); 1234 if (error) { 1235 brelse(bp); 1236 return (0); 1237 } 1238 cgp = (struct cg *)bp->b_data; 1239 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1240 brelse(bp); 1241 return (0); 1242 } 1243 bp->b_xflags |= BX_BKGRDWRITE; 1244 cgp->cg_time = time_second; 1245 inosused = cg_inosused(cgp); 1246 if (ipref) { 1247 ipref %= fs->fs_ipg; 1248 if (isclr(inosused, ipref)) 1249 goto gotit; 1250 } 1251 start = cgp->cg_irotor / NBBY; 1252 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 1253 loc = skpc(0xff, len, &inosused[start]); 1254 if (loc == 0) { 1255 len = start + 1; 1256 start = 0; 1257 loc = skpc(0xff, len, &inosused[0]); 1258 if (loc == 0) { 1259 printf("cg = %d, irotor = %ld, fs = %s\n", 1260 cg, (long)cgp->cg_irotor, fs->fs_fsmnt); 1261 panic("ffs_nodealloccg: map corrupted"); 1262 /* NOTREACHED */ 1263 } 1264 } 1265 i = start + len - loc; 1266 map = inosused[i]; 1267 ipref = i * NBBY; 1268 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 1269 if ((map & i) == 0) { 1270 cgp->cg_irotor = ipref; 1271 goto gotit; 1272 } 1273 } 1274 printf("fs = %s\n", fs->fs_fsmnt); 1275 panic("ffs_nodealloccg: block not in map"); 1276 /* NOTREACHED */ 1277 gotit: 1278 if (DOINGSOFTDEP(ITOV(ip))) 1279 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref); 1280 setbit(inosused, ipref); 1281 cgp->cg_cs.cs_nifree--; 1282 fs->fs_cstotal.cs_nifree--; 1283 fs->fs_cs(fs, cg).cs_nifree--; 1284 fs->fs_fmod = 1; 1285 if ((mode & IFMT) == IFDIR) { 1286 cgp->cg_cs.cs_ndir++; 1287 fs->fs_cstotal.cs_ndir++; 1288 fs->fs_cs(fs, cg).cs_ndir++; 1289 } 1290 bdwrite(bp); 1291 return (cg * fs->fs_ipg + ipref); 1292 } 1293 1294 /* 1295 * Free a block or fragment. 1296 * 1297 * The specified block or fragment is placed back in the 1298 * free map. If a fragment is deallocated, a possible 1299 * block reassembly is checked. 1300 */ 1301 void 1302 ffs_blkfree(ip, bno, size) 1303 register struct inode *ip; 1304 ufs_daddr_t bno; 1305 long size; 1306 { 1307 register struct fs *fs; 1308 register struct cg *cgp; 1309 struct buf *bp; 1310 ufs_daddr_t blkno; 1311 int i, error, cg, blk, frags, bbase; 1312 u_int8_t *blksfree; 1313 1314 fs = ip->i_fs; 1315 VOP_FREEBLKS(ip->i_devvp, fsbtodb(fs, bno), size); 1316 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 1317 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 1318 printf("dev=%s, bno = %ld, bsize = %ld, size = %ld, fs = %s\n", 1319 devtoname(ip->i_dev), (long)bno, (long)fs->fs_bsize, size, 1320 fs->fs_fsmnt); 1321 panic("ffs_blkfree: bad size"); 1322 } 1323 cg = dtog(fs, bno); 1324 if ((u_int)bno >= fs->fs_size) { 1325 printf("bad block %ld, ino %lu\n", 1326 (long)bno, (u_long)ip->i_number); 1327 ffs_fserr(fs, ip->i_uid, "bad block"); 1328 return; 1329 } 1330 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1331 (int)fs->fs_cgsize, NOCRED, &bp); 1332 if (error) { 1333 brelse(bp); 1334 return; 1335 } 1336 cgp = (struct cg *)bp->b_data; 1337 if (!cg_chkmagic(cgp)) { 1338 brelse(bp); 1339 return; 1340 } 1341 bp->b_xflags |= BX_BKGRDWRITE; 1342 cgp->cg_time = time_second; 1343 bno = dtogd(fs, bno); 1344 blksfree = cg_blksfree(cgp); 1345 if (size == fs->fs_bsize) { 1346 blkno = fragstoblks(fs, bno); 1347 if (!ffs_isfreeblock(fs, blksfree, blkno)) { 1348 printf("dev = %s, block = %ld, fs = %s\n", 1349 devtoname(ip->i_dev), (long)bno, fs->fs_fsmnt); 1350 panic("ffs_blkfree: freeing free block"); 1351 } 1352 ffs_setblock(fs, blksfree, blkno); 1353 ffs_clusteracct(fs, cgp, blkno, 1); 1354 cgp->cg_cs.cs_nbfree++; 1355 fs->fs_cstotal.cs_nbfree++; 1356 fs->fs_cs(fs, cg).cs_nbfree++; 1357 i = cbtocylno(fs, bno); 1358 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 1359 cg_blktot(cgp)[i]++; 1360 } else { 1361 bbase = bno - fragnum(fs, bno); 1362 /* 1363 * decrement the counts associated with the old frags 1364 */ 1365 blk = blkmap(fs, blksfree, bbase); 1366 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1367 /* 1368 * deallocate the fragment 1369 */ 1370 frags = numfrags(fs, size); 1371 for (i = 0; i < frags; i++) { 1372 if (isset(blksfree, bno + i)) { 1373 printf("dev = %s, block = %ld, fs = %s\n", 1374 devtoname(ip->i_dev), (long)(bno + i), 1375 fs->fs_fsmnt); 1376 panic("ffs_blkfree: freeing free frag"); 1377 } 1378 setbit(blksfree, bno + i); 1379 } 1380 cgp->cg_cs.cs_nffree += i; 1381 fs->fs_cstotal.cs_nffree += i; 1382 fs->fs_cs(fs, cg).cs_nffree += i; 1383 /* 1384 * add back in counts associated with the new frags 1385 */ 1386 blk = blkmap(fs, blksfree, bbase); 1387 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1388 /* 1389 * if a complete block has been reassembled, account for it 1390 */ 1391 blkno = fragstoblks(fs, bbase); 1392 if (ffs_isblock(fs, blksfree, blkno)) { 1393 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1394 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1395 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1396 ffs_clusteracct(fs, cgp, blkno, 1); 1397 cgp->cg_cs.cs_nbfree++; 1398 fs->fs_cstotal.cs_nbfree++; 1399 fs->fs_cs(fs, cg).cs_nbfree++; 1400 i = cbtocylno(fs, bbase); 1401 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 1402 cg_blktot(cgp)[i]++; 1403 } 1404 } 1405 fs->fs_fmod = 1; 1406 bdwrite(bp); 1407 } 1408 1409 #ifdef DIAGNOSTIC 1410 /* 1411 * Verify allocation of a block or fragment. Returns true if block or 1412 * fragment is allocated, false if it is free. 1413 */ 1414 static int 1415 ffs_checkblk(ip, bno, size) 1416 struct inode *ip; 1417 ufs_daddr_t bno; 1418 long size; 1419 { 1420 struct fs *fs; 1421 struct cg *cgp; 1422 struct buf *bp; 1423 int i, error, frags, free; 1424 u_int8_t *blksfree; 1425 1426 fs = ip->i_fs; 1427 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1428 printf("bsize = %ld, size = %ld, fs = %s\n", 1429 (long)fs->fs_bsize, size, fs->fs_fsmnt); 1430 panic("ffs_checkblk: bad size"); 1431 } 1432 if ((u_int)bno >= fs->fs_size) 1433 panic("ffs_checkblk: bad block %d", bno); 1434 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), 1435 (int)fs->fs_cgsize, NOCRED, &bp); 1436 if (error) 1437 panic("ffs_checkblk: cg bread failed"); 1438 cgp = (struct cg *)bp->b_data; 1439 if (!cg_chkmagic(cgp)) 1440 panic("ffs_checkblk: cg magic mismatch"); 1441 bp->b_xflags |= BX_BKGRDWRITE; 1442 blksfree = cg_blksfree(cgp); 1443 bno = dtogd(fs, bno); 1444 if (size == fs->fs_bsize) { 1445 free = ffs_isblock(fs, blksfree, fragstoblks(fs, bno)); 1446 } else { 1447 frags = numfrags(fs, size); 1448 for (free = 0, i = 0; i < frags; i++) 1449 if (isset(blksfree, bno + i)) 1450 free++; 1451 if (free != 0 && free != frags) 1452 panic("ffs_checkblk: partially free fragment"); 1453 } 1454 brelse(bp); 1455 return (!free); 1456 } 1457 #endif /* DIAGNOSTIC */ 1458 1459 /* 1460 * Free an inode. 1461 */ 1462 int 1463 ffs_vfree( pvp, ino, mode) 1464 struct vnode *pvp; 1465 ino_t ino; 1466 int mode; 1467 { 1468 if (DOINGSOFTDEP(pvp)) { 1469 softdep_freefile(pvp, ino, mode); 1470 return (0); 1471 } 1472 return (ffs_freefile(pvp, ino, mode)); 1473 } 1474 1475 /* 1476 * Do the actual free operation. 1477 * The specified inode is placed back in the free map. 1478 */ 1479 int 1480 ffs_freefile( pvp, ino, mode) 1481 struct vnode *pvp; 1482 ino_t ino; 1483 int mode; 1484 { 1485 register struct fs *fs; 1486 register struct cg *cgp; 1487 register struct inode *pip; 1488 struct buf *bp; 1489 int error, cg; 1490 u_int8_t *inosused; 1491 1492 pip = VTOI(pvp); 1493 fs = pip->i_fs; 1494 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 1495 panic("ffs_vfree: range: dev = (%d,%d), ino = %d, fs = %s", 1496 major(pip->i_dev), minor(pip->i_dev), ino, fs->fs_fsmnt); 1497 cg = ino_to_cg(fs, ino); 1498 error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1499 (int)fs->fs_cgsize, NOCRED, &bp); 1500 if (error) { 1501 brelse(bp); 1502 return (error); 1503 } 1504 cgp = (struct cg *)bp->b_data; 1505 if (!cg_chkmagic(cgp)) { 1506 brelse(bp); 1507 return (0); 1508 } 1509 bp->b_xflags |= BX_BKGRDWRITE; 1510 cgp->cg_time = time_second; 1511 inosused = cg_inosused(cgp); 1512 ino %= fs->fs_ipg; 1513 if (isclr(inosused, ino)) { 1514 printf("dev = %s, ino = %lu, fs = %s\n", 1515 devtoname(pip->i_dev), (u_long)ino, fs->fs_fsmnt); 1516 if (fs->fs_ronly == 0) 1517 panic("ffs_vfree: freeing free inode"); 1518 } 1519 clrbit(inosused, ino); 1520 if (ino < cgp->cg_irotor) 1521 cgp->cg_irotor = ino; 1522 cgp->cg_cs.cs_nifree++; 1523 fs->fs_cstotal.cs_nifree++; 1524 fs->fs_cs(fs, cg).cs_nifree++; 1525 if ((mode & IFMT) == IFDIR) { 1526 cgp->cg_cs.cs_ndir--; 1527 fs->fs_cstotal.cs_ndir--; 1528 fs->fs_cs(fs, cg).cs_ndir--; 1529 } 1530 fs->fs_fmod = 1; 1531 bdwrite(bp); 1532 return (0); 1533 } 1534 1535 /* 1536 * Find a block of the specified size in the specified cylinder group. 1537 * 1538 * It is a panic if a request is made to find a block if none are 1539 * available. 1540 */ 1541 static ufs_daddr_t 1542 ffs_mapsearch(fs, cgp, bpref, allocsiz) 1543 register struct fs *fs; 1544 register struct cg *cgp; 1545 ufs_daddr_t bpref; 1546 int allocsiz; 1547 { 1548 ufs_daddr_t bno; 1549 int start, len, loc, i; 1550 int blk, field, subfield, pos; 1551 u_int8_t *blksfree; 1552 1553 /* 1554 * find the fragment by searching through the free block 1555 * map for an appropriate bit pattern 1556 */ 1557 if (bpref) 1558 start = dtogd(fs, bpref) / NBBY; 1559 else 1560 start = cgp->cg_frotor / NBBY; 1561 blksfree = cg_blksfree(cgp); 1562 len = howmany(fs->fs_fpg, NBBY) - start; 1563 loc = scanc((u_int)len, (u_char *)&blksfree[start], 1564 (u_char *)fragtbl[fs->fs_frag], 1565 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1566 if (loc == 0) { 1567 len = start + 1; 1568 start = 0; 1569 loc = scanc((u_int)len, (u_char *)&blksfree[0], 1570 (u_char *)fragtbl[fs->fs_frag], 1571 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1572 if (loc == 0) { 1573 printf("start = %d, len = %d, fs = %s\n", 1574 start, len, fs->fs_fsmnt); 1575 panic("ffs_alloccg: map corrupted"); 1576 /* NOTREACHED */ 1577 } 1578 } 1579 bno = (start + len - loc) * NBBY; 1580 cgp->cg_frotor = bno; 1581 /* 1582 * found the byte in the map 1583 * sift through the bits to find the selected frag 1584 */ 1585 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1586 blk = blkmap(fs, blksfree, bno); 1587 blk <<= 1; 1588 field = around[allocsiz]; 1589 subfield = inside[allocsiz]; 1590 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1591 if ((blk & field) == subfield) 1592 return (bno + pos); 1593 field <<= 1; 1594 subfield <<= 1; 1595 } 1596 } 1597 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 1598 panic("ffs_alloccg: block not in map"); 1599 return (-1); 1600 } 1601 1602 /* 1603 * Update the cluster map because of an allocation or free. 1604 * 1605 * Cnt == 1 means free; cnt == -1 means allocating. 1606 */ 1607 static void 1608 ffs_clusteracct(fs, cgp, blkno, cnt) 1609 struct fs *fs; 1610 struct cg *cgp; 1611 ufs_daddr_t blkno; 1612 int cnt; 1613 { 1614 int32_t *sump; 1615 int32_t *lp; 1616 u_char *freemapp, *mapp; 1617 int i, start, end, forw, back, map, bit; 1618 1619 if (fs->fs_contigsumsize <= 0) 1620 return; 1621 freemapp = cg_clustersfree(cgp); 1622 sump = cg_clustersum(cgp); 1623 /* 1624 * Allocate or clear the actual block. 1625 */ 1626 if (cnt > 0) 1627 setbit(freemapp, blkno); 1628 else 1629 clrbit(freemapp, blkno); 1630 /* 1631 * Find the size of the cluster going forward. 1632 */ 1633 start = blkno + 1; 1634 end = start + fs->fs_contigsumsize; 1635 if (end >= cgp->cg_nclusterblks) 1636 end = cgp->cg_nclusterblks; 1637 mapp = &freemapp[start / NBBY]; 1638 map = *mapp++; 1639 bit = 1 << (start % NBBY); 1640 for (i = start; i < end; i++) { 1641 if ((map & bit) == 0) 1642 break; 1643 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1644 bit <<= 1; 1645 } else { 1646 map = *mapp++; 1647 bit = 1; 1648 } 1649 } 1650 forw = i - start; 1651 /* 1652 * Find the size of the cluster going backward. 1653 */ 1654 start = blkno - 1; 1655 end = start - fs->fs_contigsumsize; 1656 if (end < 0) 1657 end = -1; 1658 mapp = &freemapp[start / NBBY]; 1659 map = *mapp--; 1660 bit = 1 << (start % NBBY); 1661 for (i = start; i > end; i--) { 1662 if ((map & bit) == 0) 1663 break; 1664 if ((i & (NBBY - 1)) != 0) { 1665 bit >>= 1; 1666 } else { 1667 map = *mapp--; 1668 bit = 1 << (NBBY - 1); 1669 } 1670 } 1671 back = start - i; 1672 /* 1673 * Account for old cluster and the possibly new forward and 1674 * back clusters. 1675 */ 1676 i = back + forw + 1; 1677 if (i > fs->fs_contigsumsize) 1678 i = fs->fs_contigsumsize; 1679 sump[i] += cnt; 1680 if (back > 0) 1681 sump[back] -= cnt; 1682 if (forw > 0) 1683 sump[forw] -= cnt; 1684 /* 1685 * Update cluster summary information. 1686 */ 1687 lp = &sump[fs->fs_contigsumsize]; 1688 for (i = fs->fs_contigsumsize; i > 0; i--) 1689 if (*lp-- > 0) 1690 break; 1691 fs->fs_maxcluster[cgp->cg_cgx] = i; 1692 } 1693 1694 /* 1695 * Fserr prints the name of a file system with an error diagnostic. 1696 * 1697 * The form of the error message is: 1698 * fs: error message 1699 */ 1700 static void 1701 ffs_fserr(fs, uid, cp) 1702 struct fs *fs; 1703 u_int uid; 1704 char *cp; 1705 { 1706 struct proc *p = curproc; /* XXX */ 1707 1708 log(LOG_ERR, "pid %d (%s), uid %d on %s: %s\n", p ? p->p_pid : -1, 1709 p ? p->p_comm : "-", uid, fs->fs_fsmnt, cp); 1710 } 1711