1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 34 * $FreeBSD$ 35 */ 36 37 #include "opt_quota.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bio.h> 42 #include <sys/buf.h> 43 #include <sys/conf.h> 44 #include <sys/file.h> 45 #include <sys/proc.h> 46 #include <sys/vnode.h> 47 #include <sys/mount.h> 48 #include <sys/kernel.h> 49 #include <sys/sysctl.h> 50 #include <sys/syslog.h> 51 52 #include <ufs/ufs/extattr.h> 53 #include <ufs/ufs/quota.h> 54 #include <ufs/ufs/inode.h> 55 #include <ufs/ufs/ufs_extern.h> 56 #include <ufs/ufs/ufsmount.h> 57 58 #include <ufs/ffs/fs.h> 59 #include <ufs/ffs/ffs_extern.h> 60 61 typedef ufs_daddr_t allocfcn_t __P((struct inode *ip, int cg, ufs_daddr_t bpref, 62 int size)); 63 64 static ufs_daddr_t ffs_alloccg __P((struct inode *, int, ufs_daddr_t, int)); 65 static ufs_daddr_t 66 ffs_alloccgblk __P((struct inode *, struct buf *, ufs_daddr_t)); 67 #ifdef DIAGNOSTIC 68 static int ffs_checkblk __P((struct inode *, ufs_daddr_t, long)); 69 #endif 70 static ufs_daddr_t ffs_clusteralloc __P((struct inode *, int, ufs_daddr_t, 71 int)); 72 static ino_t ffs_dirpref __P((struct inode *)); 73 static ufs_daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 74 static void ffs_fserr __P((struct fs *, u_int, char *)); 75 static u_long ffs_hashalloc 76 __P((struct inode *, int, long, int, allocfcn_t *)); 77 static ino_t ffs_nodealloccg __P((struct inode *, int, ufs_daddr_t, int)); 78 static ufs_daddr_t ffs_mapsearch __P((struct fs *, struct cg *, ufs_daddr_t, 79 int)); 80 81 /* 82 * Allocate a block in the file system. 83 * 84 * The size of the requested block is given, which must be some 85 * multiple of fs_fsize and <= fs_bsize. 86 * A preference may be optionally specified. If a preference is given 87 * the following hierarchy is used to allocate a block: 88 * 1) allocate the requested block. 89 * 2) allocate a rotationally optimal block in the same cylinder. 90 * 3) allocate a block in the same cylinder group. 91 * 4) quadradically rehash into other cylinder groups, until an 92 * available block is located. 93 * If no block preference is given the following heirarchy is used 94 * to allocate a block: 95 * 1) allocate a block in the cylinder group that contains the 96 * inode for the file. 97 * 2) quadradically rehash into other cylinder groups, until an 98 * available block is located. 99 */ 100 int 101 ffs_alloc(ip, lbn, bpref, size, cred, bnp) 102 register struct inode *ip; 103 ufs_daddr_t lbn, bpref; 104 int size; 105 struct ucred *cred; 106 ufs_daddr_t *bnp; 107 { 108 register struct fs *fs; 109 ufs_daddr_t bno; 110 int cg; 111 #ifdef QUOTA 112 int error; 113 #endif 114 115 *bnp = 0; 116 fs = ip->i_fs; 117 #ifdef DIAGNOSTIC 118 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 119 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n", 120 devtoname(ip->i_dev), (long)fs->fs_bsize, size, 121 fs->fs_fsmnt); 122 panic("ffs_alloc: bad size"); 123 } 124 if (cred == NOCRED) 125 panic("ffs_alloc: missing credential"); 126 #endif /* DIAGNOSTIC */ 127 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 128 goto nospace; 129 if (cred->cr_uid != 0 && 130 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 131 goto nospace; 132 #ifdef QUOTA 133 error = chkdq(ip, (long)btodb(size), cred, 0); 134 if (error) 135 return (error); 136 #endif 137 if (bpref >= fs->fs_size) 138 bpref = 0; 139 if (bpref == 0) 140 cg = ino_to_cg(fs, ip->i_number); 141 else 142 cg = dtog(fs, bpref); 143 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 144 ffs_alloccg); 145 if (bno > 0) { 146 ip->i_blocks += btodb(size); 147 ip->i_flag |= IN_CHANGE | IN_UPDATE; 148 *bnp = bno; 149 return (0); 150 } 151 #ifdef QUOTA 152 /* 153 * Restore user's disk quota because allocation failed. 154 */ 155 (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 156 #endif 157 nospace: 158 ffs_fserr(fs, cred->cr_uid, "file system full"); 159 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 160 return (ENOSPC); 161 } 162 163 /* 164 * Reallocate a fragment to a bigger size 165 * 166 * The number and size of the old block is given, and a preference 167 * and new size is also specified. The allocator attempts to extend 168 * the original block. Failing that, the regular block allocator is 169 * invoked to get an appropriate block. 170 */ 171 int 172 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) 173 register struct inode *ip; 174 ufs_daddr_t lbprev; 175 ufs_daddr_t bpref; 176 int osize, nsize; 177 struct ucred *cred; 178 struct buf **bpp; 179 { 180 register struct fs *fs; 181 struct buf *bp; 182 int cg, request, error; 183 ufs_daddr_t bprev, bno; 184 185 *bpp = 0; 186 fs = ip->i_fs; 187 #ifdef DIAGNOSTIC 188 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 189 panic("ffs_realloccg: allocation on suspended filesystem"); 190 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 191 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 192 printf( 193 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 194 devtoname(ip->i_dev), (long)fs->fs_bsize, osize, 195 nsize, fs->fs_fsmnt); 196 panic("ffs_realloccg: bad size"); 197 } 198 if (cred == NOCRED) 199 panic("ffs_realloccg: missing credential"); 200 #endif /* DIAGNOSTIC */ 201 if (cred->cr_uid != 0 && 202 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) 203 goto nospace; 204 if ((bprev = ip->i_db[lbprev]) == 0) { 205 printf("dev = %s, bsize = %ld, bprev = %ld, fs = %s\n", 206 devtoname(ip->i_dev), (long)fs->fs_bsize, (long)bprev, 207 fs->fs_fsmnt); 208 panic("ffs_realloccg: bad bprev"); 209 } 210 /* 211 * Allocate the extra space in the buffer. 212 */ 213 error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp); 214 if (error) { 215 brelse(bp); 216 return (error); 217 } 218 219 if( bp->b_blkno == bp->b_lblkno) { 220 if( lbprev >= NDADDR) 221 panic("ffs_realloccg: lbprev out of range"); 222 bp->b_blkno = fsbtodb(fs, bprev); 223 } 224 225 #ifdef QUOTA 226 error = chkdq(ip, (long)btodb(nsize - osize), cred, 0); 227 if (error) { 228 brelse(bp); 229 return (error); 230 } 231 #endif 232 /* 233 * Check for extension in the existing location. 234 */ 235 cg = dtog(fs, bprev); 236 bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize); 237 if (bno) { 238 if (bp->b_blkno != fsbtodb(fs, bno)) 239 panic("ffs_realloccg: bad blockno"); 240 ip->i_blocks += btodb(nsize - osize); 241 ip->i_flag |= IN_CHANGE | IN_UPDATE; 242 allocbuf(bp, nsize); 243 bp->b_flags |= B_DONE; 244 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 245 *bpp = bp; 246 return (0); 247 } 248 /* 249 * Allocate a new disk location. 250 */ 251 if (bpref >= fs->fs_size) 252 bpref = 0; 253 switch ((int)fs->fs_optim) { 254 case FS_OPTSPACE: 255 /* 256 * Allocate an exact sized fragment. Although this makes 257 * best use of space, we will waste time relocating it if 258 * the file continues to grow. If the fragmentation is 259 * less than half of the minimum free reserve, we choose 260 * to begin optimizing for time. 261 */ 262 request = nsize; 263 if (fs->fs_minfree <= 5 || 264 fs->fs_cstotal.cs_nffree > 265 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100)) 266 break; 267 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 268 fs->fs_fsmnt); 269 fs->fs_optim = FS_OPTTIME; 270 break; 271 case FS_OPTTIME: 272 /* 273 * At this point we have discovered a file that is trying to 274 * grow a small fragment to a larger fragment. To save time, 275 * we allocate a full sized block, then free the unused portion. 276 * If the file continues to grow, the `ffs_fragextend' call 277 * above will be able to grow it in place without further 278 * copying. If aberrant programs cause disk fragmentation to 279 * grow within 2% of the free reserve, we choose to begin 280 * optimizing for space. 281 */ 282 request = fs->fs_bsize; 283 if (fs->fs_cstotal.cs_nffree < 284 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100) 285 break; 286 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 287 fs->fs_fsmnt); 288 fs->fs_optim = FS_OPTSPACE; 289 break; 290 default: 291 printf("dev = %s, optim = %ld, fs = %s\n", 292 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt); 293 panic("ffs_realloccg: bad optim"); 294 /* NOTREACHED */ 295 } 296 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 297 ffs_alloccg); 298 if (bno > 0) { 299 bp->b_blkno = fsbtodb(fs, bno); 300 if (!DOINGSOFTDEP(ITOV(ip))) 301 ffs_blkfree(ip, bprev, (long)osize); 302 if (nsize < request) 303 ffs_blkfree(ip, bno + numfrags(fs, nsize), 304 (long)(request - nsize)); 305 ip->i_blocks += btodb(nsize - osize); 306 ip->i_flag |= IN_CHANGE | IN_UPDATE; 307 allocbuf(bp, nsize); 308 bp->b_flags |= B_DONE; 309 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 310 *bpp = bp; 311 return (0); 312 } 313 #ifdef QUOTA 314 /* 315 * Restore user's disk quota because allocation failed. 316 */ 317 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 318 #endif 319 brelse(bp); 320 nospace: 321 /* 322 * no space available 323 */ 324 ffs_fserr(fs, cred->cr_uid, "file system full"); 325 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 326 return (ENOSPC); 327 } 328 329 /* 330 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 331 * 332 * The vnode and an array of buffer pointers for a range of sequential 333 * logical blocks to be made contiguous is given. The allocator attempts 334 * to find a range of sequential blocks starting as close as possible to 335 * an fs_rotdelay offset from the end of the allocation for the logical 336 * block immediately preceding the current range. If successful, the 337 * physical block numbers in the buffer pointers and in the inode are 338 * changed to reflect the new allocation. If unsuccessful, the allocation 339 * is left unchanged. The success in doing the reallocation is returned. 340 * Note that the error return is not reflected back to the user. Rather 341 * the previous block allocation will be used. 342 */ 343 344 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 345 346 static int doasyncfree = 1; 347 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, ""); 348 349 static int doreallocblks = 1; 350 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 351 352 #ifdef DEBUG 353 static volatile int prtrealloc = 0; 354 #endif 355 356 int 357 ffs_reallocblks(ap) 358 struct vop_reallocblks_args /* { 359 struct vnode *a_vp; 360 struct cluster_save *a_buflist; 361 } */ *ap; 362 { 363 struct fs *fs; 364 struct inode *ip; 365 struct vnode *vp; 366 struct buf *sbp, *ebp; 367 ufs_daddr_t *bap, *sbap, *ebap = 0; 368 struct cluster_save *buflist; 369 ufs_daddr_t start_lbn, end_lbn, soff, newblk, blkno; 370 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 371 int i, len, start_lvl, end_lvl, pref, ssize; 372 373 if (doreallocblks == 0) 374 return (ENOSPC); 375 vp = ap->a_vp; 376 ip = VTOI(vp); 377 fs = ip->i_fs; 378 if (fs->fs_contigsumsize <= 0) 379 return (ENOSPC); 380 buflist = ap->a_buflist; 381 len = buflist->bs_nchildren; 382 start_lbn = buflist->bs_children[0]->b_lblkno; 383 end_lbn = start_lbn + len - 1; 384 #ifdef DIAGNOSTIC 385 for (i = 0; i < len; i++) 386 if (!ffs_checkblk(ip, 387 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 388 panic("ffs_reallocblks: unallocated block 1"); 389 for (i = 1; i < len; i++) 390 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 391 panic("ffs_reallocblks: non-logical cluster"); 392 blkno = buflist->bs_children[0]->b_blkno; 393 ssize = fsbtodb(fs, fs->fs_frag); 394 for (i = 1; i < len - 1; i++) 395 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 396 panic("ffs_reallocblks: non-physical cluster %d", i); 397 #endif 398 /* 399 * If the latest allocation is in a new cylinder group, assume that 400 * the filesystem has decided to move and do not force it back to 401 * the previous cylinder group. 402 */ 403 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 404 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 405 return (ENOSPC); 406 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 407 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 408 return (ENOSPC); 409 /* 410 * Get the starting offset and block map for the first block. 411 */ 412 if (start_lvl == 0) { 413 sbap = &ip->i_db[0]; 414 soff = start_lbn; 415 } else { 416 idp = &start_ap[start_lvl - 1]; 417 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 418 brelse(sbp); 419 return (ENOSPC); 420 } 421 sbap = (ufs_daddr_t *)sbp->b_data; 422 soff = idp->in_off; 423 } 424 /* 425 * Find the preferred location for the cluster. 426 */ 427 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 428 /* 429 * If the block range spans two block maps, get the second map. 430 */ 431 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 432 ssize = len; 433 } else { 434 #ifdef DIAGNOSTIC 435 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 436 panic("ffs_reallocblk: start == end"); 437 #endif 438 ssize = len - (idp->in_off + 1); 439 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 440 goto fail; 441 ebap = (ufs_daddr_t *)ebp->b_data; 442 } 443 /* 444 * Search the block map looking for an allocation of the desired size. 445 */ 446 if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 447 len, ffs_clusteralloc)) == 0) 448 goto fail; 449 /* 450 * We have found a new contiguous block. 451 * 452 * First we have to replace the old block pointers with the new 453 * block pointers in the inode and indirect blocks associated 454 * with the file. 455 */ 456 #ifdef DEBUG 457 if (prtrealloc) 458 printf("realloc: ino %d, lbns %d-%d\n\told:", ip->i_number, 459 start_lbn, end_lbn); 460 #endif 461 blkno = newblk; 462 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 463 if (i == ssize) { 464 bap = ebap; 465 soff = -i; 466 } 467 #ifdef DIAGNOSTIC 468 if (!ffs_checkblk(ip, 469 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 470 panic("ffs_reallocblks: unallocated block 2"); 471 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 472 panic("ffs_reallocblks: alloc mismatch"); 473 #endif 474 #ifdef DEBUG 475 if (prtrealloc) 476 printf(" %d,", *bap); 477 #endif 478 if (DOINGSOFTDEP(vp)) { 479 if (sbap == &ip->i_db[0] && i < ssize) 480 softdep_setup_allocdirect(ip, start_lbn + i, 481 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 482 buflist->bs_children[i]); 483 else 484 softdep_setup_allocindir_page(ip, start_lbn + i, 485 i < ssize ? sbp : ebp, soff + i, blkno, 486 *bap, buflist->bs_children[i]); 487 } 488 *bap++ = blkno; 489 } 490 /* 491 * Next we must write out the modified inode and indirect blocks. 492 * For strict correctness, the writes should be synchronous since 493 * the old block values may have been written to disk. In practise 494 * they are almost never written, but if we are concerned about 495 * strict correctness, the `doasyncfree' flag should be set to zero. 496 * 497 * The test on `doasyncfree' should be changed to test a flag 498 * that shows whether the associated buffers and inodes have 499 * been written. The flag should be set when the cluster is 500 * started and cleared whenever the buffer or inode is flushed. 501 * We can then check below to see if it is set, and do the 502 * synchronous write only when it has been cleared. 503 */ 504 if (sbap != &ip->i_db[0]) { 505 if (doasyncfree) 506 bdwrite(sbp); 507 else 508 bwrite(sbp); 509 } else { 510 ip->i_flag |= IN_CHANGE | IN_UPDATE; 511 if (!doasyncfree) 512 UFS_UPDATE(vp, 1); 513 } 514 if (ssize < len) { 515 if (doasyncfree) 516 bdwrite(ebp); 517 else 518 bwrite(ebp); 519 } 520 /* 521 * Last, free the old blocks and assign the new blocks to the buffers. 522 */ 523 #ifdef DEBUG 524 if (prtrealloc) 525 printf("\n\tnew:"); 526 #endif 527 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 528 if (!DOINGSOFTDEP(vp)) 529 ffs_blkfree(ip, 530 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 531 fs->fs_bsize); 532 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 533 #ifdef DIAGNOSTIC 534 if (!ffs_checkblk(ip, 535 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 536 panic("ffs_reallocblks: unallocated block 3"); 537 #endif 538 #ifdef DEBUG 539 if (prtrealloc) 540 printf(" %d,", blkno); 541 #endif 542 } 543 #ifdef DEBUG 544 if (prtrealloc) { 545 prtrealloc--; 546 printf("\n"); 547 } 548 #endif 549 return (0); 550 551 fail: 552 if (ssize < len) 553 brelse(ebp); 554 if (sbap != &ip->i_db[0]) 555 brelse(sbp); 556 return (ENOSPC); 557 } 558 559 /* 560 * Allocate an inode in the file system. 561 * 562 * If allocating a directory, use ffs_dirpref to select the inode. 563 * If allocating in a directory, the following hierarchy is followed: 564 * 1) allocate the preferred inode. 565 * 2) allocate an inode in the same cylinder group. 566 * 3) quadradically rehash into other cylinder groups, until an 567 * available inode is located. 568 * If no inode preference is given the following heirarchy is used 569 * to allocate an inode: 570 * 1) allocate an inode in cylinder group 0. 571 * 2) quadradically rehash into other cylinder groups, until an 572 * available inode is located. 573 */ 574 int 575 ffs_valloc(pvp, mode, cred, vpp) 576 struct vnode *pvp; 577 int mode; 578 struct ucred *cred; 579 struct vnode **vpp; 580 { 581 register struct inode *pip; 582 register struct fs *fs; 583 register struct inode *ip; 584 ino_t ino, ipref; 585 int cg, error; 586 587 *vpp = NULL; 588 pip = VTOI(pvp); 589 fs = pip->i_fs; 590 if (fs->fs_cstotal.cs_nifree == 0) 591 goto noinodes; 592 593 if ((mode & IFMT) == IFDIR) 594 ipref = ffs_dirpref(pip); 595 else 596 ipref = pip->i_number; 597 if (ipref >= fs->fs_ncg * fs->fs_ipg) 598 ipref = 0; 599 cg = ino_to_cg(fs, ipref); 600 /* 601 * Track number of dirs created one after another 602 * in a same cg without intervening by files. 603 */ 604 if ((mode & IFMT) == IFDIR) { 605 if (fs->fs_contigdirs[cg] < 255) 606 fs->fs_contigdirs[cg]++; 607 } else { 608 if (fs->fs_contigdirs[cg] > 0) 609 fs->fs_contigdirs[cg]--; 610 } 611 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, 612 (allocfcn_t *)ffs_nodealloccg); 613 if (ino == 0) 614 goto noinodes; 615 error = VFS_VGET(pvp->v_mount, ino, vpp); 616 if (error) { 617 UFS_VFREE(pvp, ino, mode); 618 return (error); 619 } 620 ip = VTOI(*vpp); 621 if (ip->i_mode) { 622 printf("mode = 0%o, inum = %lu, fs = %s\n", 623 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); 624 panic("ffs_valloc: dup alloc"); 625 } 626 if (ip->i_blocks && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */ 627 printf("free inode %s/%lu had %ld blocks\n", 628 fs->fs_fsmnt, (u_long)ino, (long)ip->i_blocks); 629 ip->i_blocks = 0; 630 } 631 ip->i_flags = 0; 632 /* 633 * Set up a new generation number for this inode. 634 */ 635 if (ip->i_gen == 0 || ++ip->i_gen == 0) 636 ip->i_gen = random() / 2 + 1; 637 return (0); 638 noinodes: 639 ffs_fserr(fs, cred->cr_uid, "out of inodes"); 640 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 641 return (ENOSPC); 642 } 643 644 /* 645 * Find a cylinder group to place a directory. 646 * 647 * The policy implemented by this algorithm is to allocate a 648 * directory inode in the same cylinder group as its parent 649 * directory, but also to reserve space for its files inodes 650 * and data. Restrict the number of directories which may be 651 * allocated one after another in the same cylinder group 652 * without intervening allocation of files. 653 * 654 * If we allocate a first level directory then force allocation 655 * in another cylinder group. 656 */ 657 static ino_t 658 ffs_dirpref(pip) 659 struct inode *pip; 660 { 661 register struct fs *fs; 662 int cg, prefcg, dirsize, cgsize; 663 int avgifree, avgbfree, avgndir, curdirsize; 664 int minifree, minbfree, maxndir; 665 int mincg, minndir; 666 int maxcontigdirs; 667 668 fs = pip->i_fs; 669 670 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 671 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 672 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; 673 674 /* 675 * Force allocation in another cg if creating a first level dir. 676 */ 677 if (ITOV(pip)->v_flag & VROOT) { 678 prefcg = arc4random() % fs->fs_ncg; 679 mincg = prefcg; 680 minndir = fs->fs_ipg; 681 for (cg = prefcg; cg < fs->fs_ncg; cg++) 682 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 683 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 684 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 685 mincg = cg; 686 minndir = fs->fs_cs(fs, cg).cs_ndir; 687 } 688 for (cg = 0; cg < prefcg; cg++) 689 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 690 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 691 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 692 mincg = cg; 693 minndir = fs->fs_cs(fs, cg).cs_ndir; 694 } 695 return ((ino_t)(fs->fs_ipg * mincg)); 696 } 697 698 /* 699 * Count various limits which used for 700 * optimal allocation of a directory inode. 701 */ 702 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); 703 minifree = avgifree - fs->fs_ipg / 4; 704 if (minifree < 0) 705 minifree = 0; 706 minbfree = avgbfree - fs->fs_fpg / fs->fs_frag / 4; 707 if (minbfree < 0) 708 minbfree = 0; 709 cgsize = fs->fs_fsize * fs->fs_fpg; 710 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir; 711 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; 712 if (dirsize < curdirsize) 713 dirsize = curdirsize; 714 maxcontigdirs = min(cgsize / dirsize, 255); 715 if (fs->fs_avgfpdir > 0) 716 maxcontigdirs = min(maxcontigdirs, 717 fs->fs_ipg / fs->fs_avgfpdir); 718 if (maxcontigdirs == 0) 719 maxcontigdirs = 1; 720 721 /* 722 * Limit number of dirs in one cg and reserve space for 723 * regular files, but only if we have no deficit in 724 * inodes or space. 725 */ 726 prefcg = ino_to_cg(fs, pip->i_number); 727 for (cg = prefcg; cg < fs->fs_ncg; cg++) 728 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 729 fs->fs_cs(fs, cg).cs_nifree >= minifree && 730 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 731 if (fs->fs_contigdirs[cg] < maxcontigdirs) 732 return ((ino_t)(fs->fs_ipg * cg)); 733 } 734 for (cg = 0; cg < prefcg; cg++) 735 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 736 fs->fs_cs(fs, cg).cs_nifree >= minifree && 737 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 738 if (fs->fs_contigdirs[cg] < maxcontigdirs) 739 return ((ino_t)(fs->fs_ipg * cg)); 740 } 741 /* 742 * This is a backstop when we have deficit in space. 743 */ 744 for (cg = prefcg; cg < fs->fs_ncg; cg++) 745 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 746 return ((ino_t)(fs->fs_ipg * cg)); 747 for (cg = 0; cg < prefcg; cg++) 748 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 749 break; 750 return ((ino_t)(fs->fs_ipg * cg)); 751 } 752 753 /* 754 * Select the desired position for the next block in a file. The file is 755 * logically divided into sections. The first section is composed of the 756 * direct blocks. Each additional section contains fs_maxbpg blocks. 757 * 758 * If no blocks have been allocated in the first section, the policy is to 759 * request a block in the same cylinder group as the inode that describes 760 * the file. If no blocks have been allocated in any other section, the 761 * policy is to place the section in a cylinder group with a greater than 762 * average number of free blocks. An appropriate cylinder group is found 763 * by using a rotor that sweeps the cylinder groups. When a new group of 764 * blocks is needed, the sweep begins in the cylinder group following the 765 * cylinder group from which the previous allocation was made. The sweep 766 * continues until a cylinder group with greater than the average number 767 * of free blocks is found. If the allocation is for the first block in an 768 * indirect block, the information on the previous allocation is unavailable; 769 * here a best guess is made based upon the logical block number being 770 * allocated. 771 * 772 * If a section is already partially allocated, the policy is to 773 * contiguously allocate fs_maxcontig blocks. The end of one of these 774 * contiguous blocks and the beginning of the next is physically separated 775 * so that the disk head will be in transit between them for at least 776 * fs_rotdelay milliseconds. This is to allow time for the processor to 777 * schedule another I/O transfer. 778 */ 779 ufs_daddr_t 780 ffs_blkpref(ip, lbn, indx, bap) 781 struct inode *ip; 782 ufs_daddr_t lbn; 783 int indx; 784 ufs_daddr_t *bap; 785 { 786 register struct fs *fs; 787 register int cg; 788 int avgbfree, startcg; 789 ufs_daddr_t nextblk; 790 791 fs = ip->i_fs; 792 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 793 if (lbn < NDADDR + NINDIR(fs)) { 794 cg = ino_to_cg(fs, ip->i_number); 795 return (fs->fs_fpg * cg + fs->fs_frag); 796 } 797 /* 798 * Find a cylinder with greater than average number of 799 * unused data blocks. 800 */ 801 if (indx == 0 || bap[indx - 1] == 0) 802 startcg = 803 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 804 else 805 startcg = dtog(fs, bap[indx - 1]) + 1; 806 startcg %= fs->fs_ncg; 807 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 808 for (cg = startcg; cg < fs->fs_ncg; cg++) 809 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 810 fs->fs_cgrotor = cg; 811 return (fs->fs_fpg * cg + fs->fs_frag); 812 } 813 for (cg = 0; cg <= startcg; cg++) 814 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 815 fs->fs_cgrotor = cg; 816 return (fs->fs_fpg * cg + fs->fs_frag); 817 } 818 return (0); 819 } 820 /* 821 * One or more previous blocks have been laid out. If less 822 * than fs_maxcontig previous blocks are contiguous, the 823 * next block is requested contiguously, otherwise it is 824 * requested rotationally delayed by fs_rotdelay milliseconds. 825 */ 826 nextblk = bap[indx - 1] + fs->fs_frag; 827 if (fs->fs_rotdelay == 0 || indx < fs->fs_maxcontig || 828 bap[indx - fs->fs_maxcontig] + 829 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 830 return (nextblk); 831 /* 832 * Here we convert ms of delay to frags as: 833 * (frags) = (ms) * (rev/sec) * (sect/rev) / 834 * ((sect/frag) * (ms/sec)) 835 * then round up to the next block. 836 */ 837 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 838 (NSPF(fs) * 1000), fs->fs_frag); 839 return (nextblk); 840 } 841 842 /* 843 * Implement the cylinder overflow algorithm. 844 * 845 * The policy implemented by this algorithm is: 846 * 1) allocate the block in its requested cylinder group. 847 * 2) quadradically rehash on the cylinder group number. 848 * 3) brute force search for a free block. 849 */ 850 /*VARARGS5*/ 851 static u_long 852 ffs_hashalloc(ip, cg, pref, size, allocator) 853 struct inode *ip; 854 int cg; 855 long pref; 856 int size; /* size for data blocks, mode for inodes */ 857 allocfcn_t *allocator; 858 { 859 register struct fs *fs; 860 long result; /* XXX why not same type as we return? */ 861 int i, icg = cg; 862 863 #ifdef DIAGNOSTIC 864 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 865 panic("ffs_hashalloc: allocation on suspended filesystem"); 866 #endif 867 fs = ip->i_fs; 868 /* 869 * 1: preferred cylinder group 870 */ 871 result = (*allocator)(ip, cg, pref, size); 872 if (result) 873 return (result); 874 /* 875 * 2: quadratic rehash 876 */ 877 for (i = 1; i < fs->fs_ncg; i *= 2) { 878 cg += i; 879 if (cg >= fs->fs_ncg) 880 cg -= fs->fs_ncg; 881 result = (*allocator)(ip, cg, 0, size); 882 if (result) 883 return (result); 884 } 885 /* 886 * 3: brute force search 887 * Note that we start at i == 2, since 0 was checked initially, 888 * and 1 is always checked in the quadratic rehash. 889 */ 890 cg = (icg + 2) % fs->fs_ncg; 891 for (i = 2; i < fs->fs_ncg; i++) { 892 result = (*allocator)(ip, cg, 0, size); 893 if (result) 894 return (result); 895 cg++; 896 if (cg == fs->fs_ncg) 897 cg = 0; 898 } 899 return (0); 900 } 901 902 /* 903 * Determine whether a fragment can be extended. 904 * 905 * Check to see if the necessary fragments are available, and 906 * if they are, allocate them. 907 */ 908 static ufs_daddr_t 909 ffs_fragextend(ip, cg, bprev, osize, nsize) 910 struct inode *ip; 911 int cg; 912 long bprev; 913 int osize, nsize; 914 { 915 register struct fs *fs; 916 register struct cg *cgp; 917 struct buf *bp; 918 long bno; 919 int frags, bbase; 920 int i, error; 921 u_int8_t *blksfree; 922 923 fs = ip->i_fs; 924 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 925 return (0); 926 frags = numfrags(fs, nsize); 927 bbase = fragnum(fs, bprev); 928 if (bbase > fragnum(fs, (bprev + frags - 1))) { 929 /* cannot extend across a block boundary */ 930 return (0); 931 } 932 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 933 (int)fs->fs_cgsize, NOCRED, &bp); 934 if (error) { 935 brelse(bp); 936 return (0); 937 } 938 cgp = (struct cg *)bp->b_data; 939 if (!cg_chkmagic(cgp)) { 940 brelse(bp); 941 return (0); 942 } 943 bp->b_xflags |= BX_BKGRDWRITE; 944 cgp->cg_time = time_second; 945 bno = dtogd(fs, bprev); 946 blksfree = cg_blksfree(cgp); 947 for (i = numfrags(fs, osize); i < frags; i++) 948 if (isclr(blksfree, bno + i)) { 949 brelse(bp); 950 return (0); 951 } 952 /* 953 * the current fragment can be extended 954 * deduct the count on fragment being extended into 955 * increase the count on the remaining fragment (if any) 956 * allocate the extended piece 957 */ 958 for (i = frags; i < fs->fs_frag - bbase; i++) 959 if (isclr(blksfree, bno + i)) 960 break; 961 cgp->cg_frsum[i - numfrags(fs, osize)]--; 962 if (i != frags) 963 cgp->cg_frsum[i - frags]++; 964 for (i = numfrags(fs, osize); i < frags; i++) { 965 clrbit(blksfree, bno + i); 966 cgp->cg_cs.cs_nffree--; 967 fs->fs_cstotal.cs_nffree--; 968 fs->fs_cs(fs, cg).cs_nffree--; 969 } 970 fs->fs_fmod = 1; 971 if (DOINGSOFTDEP(ITOV(ip))) 972 softdep_setup_blkmapdep(bp, fs, bprev); 973 bdwrite(bp); 974 return (bprev); 975 } 976 977 /* 978 * Determine whether a block can be allocated. 979 * 980 * Check to see if a block of the appropriate size is available, 981 * and if it is, allocate it. 982 */ 983 static ufs_daddr_t 984 ffs_alloccg(ip, cg, bpref, size) 985 struct inode *ip; 986 int cg; 987 ufs_daddr_t bpref; 988 int size; 989 { 990 register struct fs *fs; 991 register struct cg *cgp; 992 struct buf *bp; 993 register int i; 994 ufs_daddr_t bno, blkno; 995 int allocsiz, error, frags; 996 u_int8_t *blksfree; 997 998 fs = ip->i_fs; 999 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 1000 return (0); 1001 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1002 (int)fs->fs_cgsize, NOCRED, &bp); 1003 if (error) { 1004 brelse(bp); 1005 return (0); 1006 } 1007 cgp = (struct cg *)bp->b_data; 1008 if (!cg_chkmagic(cgp) || 1009 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 1010 brelse(bp); 1011 return (0); 1012 } 1013 bp->b_xflags |= BX_BKGRDWRITE; 1014 cgp->cg_time = time_second; 1015 if (size == fs->fs_bsize) { 1016 bno = ffs_alloccgblk(ip, bp, bpref); 1017 bdwrite(bp); 1018 return (bno); 1019 } 1020 /* 1021 * check to see if any fragments are already available 1022 * allocsiz is the size which will be allocated, hacking 1023 * it down to a smaller size if necessary 1024 */ 1025 blksfree = cg_blksfree(cgp); 1026 frags = numfrags(fs, size); 1027 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 1028 if (cgp->cg_frsum[allocsiz] != 0) 1029 break; 1030 if (allocsiz == fs->fs_frag) { 1031 /* 1032 * no fragments were available, so a block will be 1033 * allocated, and hacked up 1034 */ 1035 if (cgp->cg_cs.cs_nbfree == 0) { 1036 brelse(bp); 1037 return (0); 1038 } 1039 bno = ffs_alloccgblk(ip, bp, bpref); 1040 bpref = dtogd(fs, bno); 1041 for (i = frags; i < fs->fs_frag; i++) 1042 setbit(blksfree, bpref + i); 1043 i = fs->fs_frag - frags; 1044 cgp->cg_cs.cs_nffree += i; 1045 fs->fs_cstotal.cs_nffree += i; 1046 fs->fs_cs(fs, cg).cs_nffree += i; 1047 fs->fs_fmod = 1; 1048 cgp->cg_frsum[i]++; 1049 bdwrite(bp); 1050 return (bno); 1051 } 1052 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 1053 if (bno < 0) { 1054 brelse(bp); 1055 return (0); 1056 } 1057 for (i = 0; i < frags; i++) 1058 clrbit(blksfree, bno + i); 1059 cgp->cg_cs.cs_nffree -= frags; 1060 fs->fs_cstotal.cs_nffree -= frags; 1061 fs->fs_cs(fs, cg).cs_nffree -= frags; 1062 fs->fs_fmod = 1; 1063 cgp->cg_frsum[allocsiz]--; 1064 if (frags != allocsiz) 1065 cgp->cg_frsum[allocsiz - frags]++; 1066 blkno = cg * fs->fs_fpg + bno; 1067 if (DOINGSOFTDEP(ITOV(ip))) 1068 softdep_setup_blkmapdep(bp, fs, blkno); 1069 bdwrite(bp); 1070 return ((u_long)blkno); 1071 } 1072 1073 /* 1074 * Allocate a block in a cylinder group. 1075 * 1076 * This algorithm implements the following policy: 1077 * 1) allocate the requested block. 1078 * 2) allocate a rotationally optimal block in the same cylinder. 1079 * 3) allocate the next available block on the block rotor for the 1080 * specified cylinder group. 1081 * Note that this routine only allocates fs_bsize blocks; these 1082 * blocks may be fragmented by the routine that allocates them. 1083 */ 1084 static ufs_daddr_t 1085 ffs_alloccgblk(ip, bp, bpref) 1086 struct inode *ip; 1087 struct buf *bp; 1088 ufs_daddr_t bpref; 1089 { 1090 struct fs *fs; 1091 struct cg *cgp; 1092 ufs_daddr_t bno, blkno; 1093 int cylno, pos, delta; 1094 short *cylbp; 1095 register int i; 1096 u_int8_t *blksfree; 1097 1098 fs = ip->i_fs; 1099 cgp = (struct cg *)bp->b_data; 1100 blksfree = cg_blksfree(cgp); 1101 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 1102 bpref = cgp->cg_rotor; 1103 goto norot; 1104 } 1105 bpref = blknum(fs, bpref); 1106 bpref = dtogd(fs, bpref); 1107 /* 1108 * if the requested block is available, use it 1109 */ 1110 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bpref))) { 1111 bno = bpref; 1112 goto gotit; 1113 } 1114 if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) { 1115 /* 1116 * Block layout information is not available. 1117 * Leaving bpref unchanged means we take the 1118 * next available free block following the one 1119 * we just allocated. Hopefully this will at 1120 * least hit a track cache on drives of unknown 1121 * geometry (e.g. SCSI). 1122 */ 1123 goto norot; 1124 } 1125 /* 1126 * check for a block available on the same cylinder 1127 */ 1128 cylno = cbtocylno(fs, bpref); 1129 if (cg_blktot(cgp)[cylno] == 0) 1130 goto norot; 1131 /* 1132 * check the summary information to see if a block is 1133 * available in the requested cylinder starting at the 1134 * requested rotational position and proceeding around. 1135 */ 1136 cylbp = cg_blks(fs, cgp, cylno); 1137 pos = cbtorpos(fs, bpref); 1138 for (i = pos; i < fs->fs_nrpos; i++) 1139 if (cylbp[i] > 0) 1140 break; 1141 if (i == fs->fs_nrpos) 1142 for (i = 0; i < pos; i++) 1143 if (cylbp[i] > 0) 1144 break; 1145 if (cylbp[i] > 0) { 1146 /* 1147 * found a rotational position, now find the actual 1148 * block. A panic if none is actually there. 1149 */ 1150 pos = cylno % fs->fs_cpc; 1151 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 1152 if (fs_postbl(fs, pos)[i] == -1) { 1153 printf("pos = %d, i = %d, fs = %s\n", 1154 pos, i, fs->fs_fsmnt); 1155 panic("ffs_alloccgblk: cyl groups corrupted"); 1156 } 1157 for (i = fs_postbl(fs, pos)[i];; ) { 1158 if (ffs_isblock(fs, blksfree, bno + i)) { 1159 bno = blkstofrags(fs, (bno + i)); 1160 goto gotit; 1161 } 1162 delta = fs_rotbl(fs)[i]; 1163 if (delta <= 0 || 1164 delta + i > fragstoblks(fs, fs->fs_fpg)) 1165 break; 1166 i += delta; 1167 } 1168 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 1169 panic("ffs_alloccgblk: can't find blk in cyl"); 1170 } 1171 norot: 1172 /* 1173 * no blocks in the requested cylinder, so take next 1174 * available one in this cylinder group. 1175 */ 1176 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1177 if (bno < 0) 1178 return (0); 1179 cgp->cg_rotor = bno; 1180 gotit: 1181 blkno = fragstoblks(fs, bno); 1182 ffs_clrblock(fs, blksfree, (long)blkno); 1183 ffs_clusteracct(fs, cgp, blkno, -1); 1184 cgp->cg_cs.cs_nbfree--; 1185 fs->fs_cstotal.cs_nbfree--; 1186 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1187 cylno = cbtocylno(fs, bno); 1188 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 1189 cg_blktot(cgp)[cylno]--; 1190 fs->fs_fmod = 1; 1191 blkno = cgp->cg_cgx * fs->fs_fpg + bno; 1192 if (DOINGSOFTDEP(ITOV(ip))) 1193 softdep_setup_blkmapdep(bp, fs, blkno); 1194 return (blkno); 1195 } 1196 1197 /* 1198 * Determine whether a cluster can be allocated. 1199 * 1200 * We do not currently check for optimal rotational layout if there 1201 * are multiple choices in the same cylinder group. Instead we just 1202 * take the first one that we find following bpref. 1203 */ 1204 static ufs_daddr_t 1205 ffs_clusteralloc(ip, cg, bpref, len) 1206 struct inode *ip; 1207 int cg; 1208 ufs_daddr_t bpref; 1209 int len; 1210 { 1211 register struct fs *fs; 1212 register struct cg *cgp; 1213 struct buf *bp; 1214 int i, got, run, bno, bit, map; 1215 u_char *mapp; 1216 int32_t *lp; 1217 u_int8_t *blksfree; 1218 1219 fs = ip->i_fs; 1220 if (fs->fs_maxcluster[cg] < len) 1221 return (0); 1222 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1223 NOCRED, &bp)) 1224 goto fail; 1225 cgp = (struct cg *)bp->b_data; 1226 if (!cg_chkmagic(cgp)) 1227 goto fail; 1228 bp->b_xflags |= BX_BKGRDWRITE; 1229 /* 1230 * Check to see if a cluster of the needed size (or bigger) is 1231 * available in this cylinder group. 1232 */ 1233 lp = &cg_clustersum(cgp)[len]; 1234 for (i = len; i <= fs->fs_contigsumsize; i++) 1235 if (*lp++ > 0) 1236 break; 1237 if (i > fs->fs_contigsumsize) { 1238 /* 1239 * This is the first time looking for a cluster in this 1240 * cylinder group. Update the cluster summary information 1241 * to reflect the true maximum sized cluster so that 1242 * future cluster allocation requests can avoid reading 1243 * the cylinder group map only to find no clusters. 1244 */ 1245 lp = &cg_clustersum(cgp)[len - 1]; 1246 for (i = len - 1; i > 0; i--) 1247 if (*lp-- > 0) 1248 break; 1249 fs->fs_maxcluster[cg] = i; 1250 goto fail; 1251 } 1252 /* 1253 * Search the cluster map to find a big enough cluster. 1254 * We take the first one that we find, even if it is larger 1255 * than we need as we prefer to get one close to the previous 1256 * block allocation. We do not search before the current 1257 * preference point as we do not want to allocate a block 1258 * that is allocated before the previous one (as we will 1259 * then have to wait for another pass of the elevator 1260 * algorithm before it will be read). We prefer to fail and 1261 * be recalled to try an allocation in the next cylinder group. 1262 */ 1263 if (dtog(fs, bpref) != cg) 1264 bpref = 0; 1265 else 1266 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1267 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1268 map = *mapp++; 1269 bit = 1 << (bpref % NBBY); 1270 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1271 if ((map & bit) == 0) { 1272 run = 0; 1273 } else { 1274 run++; 1275 if (run == len) 1276 break; 1277 } 1278 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1279 bit <<= 1; 1280 } else { 1281 map = *mapp++; 1282 bit = 1; 1283 } 1284 } 1285 if (got >= cgp->cg_nclusterblks) 1286 goto fail; 1287 /* 1288 * Allocate the cluster that we have found. 1289 */ 1290 blksfree = cg_blksfree(cgp); 1291 for (i = 1; i <= len; i++) 1292 if (!ffs_isblock(fs, blksfree, got - run + i)) 1293 panic("ffs_clusteralloc: map mismatch"); 1294 bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); 1295 if (dtog(fs, bno) != cg) 1296 panic("ffs_clusteralloc: allocated out of group"); 1297 len = blkstofrags(fs, len); 1298 for (i = 0; i < len; i += fs->fs_frag) 1299 if ((got = ffs_alloccgblk(ip, bp, bno + i)) != bno + i) 1300 panic("ffs_clusteralloc: lost block"); 1301 bdwrite(bp); 1302 return (bno); 1303 1304 fail: 1305 brelse(bp); 1306 return (0); 1307 } 1308 1309 /* 1310 * Determine whether an inode can be allocated. 1311 * 1312 * Check to see if an inode is available, and if it is, 1313 * allocate it using the following policy: 1314 * 1) allocate the requested inode. 1315 * 2) allocate the next available inode after the requested 1316 * inode in the specified cylinder group. 1317 */ 1318 static ino_t 1319 ffs_nodealloccg(ip, cg, ipref, mode) 1320 struct inode *ip; 1321 int cg; 1322 ufs_daddr_t ipref; 1323 int mode; 1324 { 1325 register struct fs *fs; 1326 register struct cg *cgp; 1327 struct buf *bp; 1328 u_int8_t *inosused; 1329 int error, start, len, loc, map, i; 1330 1331 fs = ip->i_fs; 1332 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1333 return (0); 1334 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1335 (int)fs->fs_cgsize, NOCRED, &bp); 1336 if (error) { 1337 brelse(bp); 1338 return (0); 1339 } 1340 cgp = (struct cg *)bp->b_data; 1341 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1342 brelse(bp); 1343 return (0); 1344 } 1345 bp->b_xflags |= BX_BKGRDWRITE; 1346 cgp->cg_time = time_second; 1347 inosused = cg_inosused(cgp); 1348 if (ipref) { 1349 ipref %= fs->fs_ipg; 1350 if (isclr(inosused, ipref)) 1351 goto gotit; 1352 } 1353 start = cgp->cg_irotor / NBBY; 1354 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 1355 loc = skpc(0xff, len, &inosused[start]); 1356 if (loc == 0) { 1357 len = start + 1; 1358 start = 0; 1359 loc = skpc(0xff, len, &inosused[0]); 1360 if (loc == 0) { 1361 printf("cg = %d, irotor = %ld, fs = %s\n", 1362 cg, (long)cgp->cg_irotor, fs->fs_fsmnt); 1363 panic("ffs_nodealloccg: map corrupted"); 1364 /* NOTREACHED */ 1365 } 1366 } 1367 i = start + len - loc; 1368 map = inosused[i]; 1369 ipref = i * NBBY; 1370 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 1371 if ((map & i) == 0) { 1372 cgp->cg_irotor = ipref; 1373 goto gotit; 1374 } 1375 } 1376 printf("fs = %s\n", fs->fs_fsmnt); 1377 panic("ffs_nodealloccg: block not in map"); 1378 /* NOTREACHED */ 1379 gotit: 1380 if (DOINGSOFTDEP(ITOV(ip))) 1381 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref); 1382 setbit(inosused, ipref); 1383 cgp->cg_cs.cs_nifree--; 1384 fs->fs_cstotal.cs_nifree--; 1385 fs->fs_cs(fs, cg).cs_nifree--; 1386 fs->fs_fmod = 1; 1387 if ((mode & IFMT) == IFDIR) { 1388 cgp->cg_cs.cs_ndir++; 1389 fs->fs_cstotal.cs_ndir++; 1390 fs->fs_cs(fs, cg).cs_ndir++; 1391 } 1392 bdwrite(bp); 1393 return (cg * fs->fs_ipg + ipref); 1394 } 1395 1396 /* 1397 * Free a block or fragment. 1398 * 1399 * The specified block or fragment is placed back in the 1400 * free map. If a fragment is deallocated, a possible 1401 * block reassembly is checked. 1402 */ 1403 void 1404 ffs_blkfree(ip, bno, size) 1405 register struct inode *ip; 1406 ufs_daddr_t bno; 1407 long size; 1408 { 1409 register struct fs *fs; 1410 register struct cg *cgp; 1411 struct buf *bp; 1412 ufs_daddr_t fragno, cgbno; 1413 int i, error, cg, blk, frags, bbase; 1414 u_int8_t *blksfree; 1415 #ifdef DIAGNOSTIC 1416 struct vnode *vp; 1417 #endif 1418 1419 fs = ip->i_fs; 1420 #ifdef DIAGNOSTIC 1421 if ((vp = ITOV(ip)) != NULL && vp->v_mount != NULL && 1422 (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)) 1423 panic("ffs_blkfree: deallocation on suspended filesystem"); 1424 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 1425 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 1426 printf("dev=%s, bno = %ld, bsize = %ld, size = %ld, fs = %s\n", 1427 devtoname(ip->i_dev), (long)bno, (long)fs->fs_bsize, size, 1428 fs->fs_fsmnt); 1429 panic("ffs_blkfree: bad size"); 1430 } 1431 #endif 1432 if ((ip->i_devvp->v_flag & VCOPYONWRITE) && 1433 ffs_snapblkfree(ip, bno, size)) 1434 return; 1435 VOP_FREEBLKS(ip->i_devvp, fsbtodb(fs, bno), size); 1436 cg = dtog(fs, bno); 1437 if ((u_int)bno >= fs->fs_size) { 1438 printf("bad block %ld, ino %lu\n", 1439 (long)bno, (u_long)ip->i_number); 1440 ffs_fserr(fs, ip->i_uid, "bad block"); 1441 return; 1442 } 1443 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1444 (int)fs->fs_cgsize, NOCRED, &bp); 1445 if (error) { 1446 brelse(bp); 1447 return; 1448 } 1449 cgp = (struct cg *)bp->b_data; 1450 if (!cg_chkmagic(cgp)) { 1451 brelse(bp); 1452 return; 1453 } 1454 bp->b_xflags |= BX_BKGRDWRITE; 1455 cgp->cg_time = time_second; 1456 cgbno = dtogd(fs, bno); 1457 blksfree = cg_blksfree(cgp); 1458 if (size == fs->fs_bsize) { 1459 fragno = fragstoblks(fs, cgbno); 1460 if (!ffs_isfreeblock(fs, blksfree, fragno)) { 1461 printf("dev = %s, block = %ld, fs = %s\n", 1462 devtoname(ip->i_dev), (long)bno, fs->fs_fsmnt); 1463 panic("ffs_blkfree: freeing free block"); 1464 } 1465 ffs_setblock(fs, blksfree, fragno); 1466 ffs_clusteracct(fs, cgp, fragno, 1); 1467 cgp->cg_cs.cs_nbfree++; 1468 fs->fs_cstotal.cs_nbfree++; 1469 fs->fs_cs(fs, cg).cs_nbfree++; 1470 i = cbtocylno(fs, cgbno); 1471 cg_blks(fs, cgp, i)[cbtorpos(fs, cgbno)]++; 1472 cg_blktot(cgp)[i]++; 1473 } else { 1474 bbase = cgbno - fragnum(fs, cgbno); 1475 /* 1476 * decrement the counts associated with the old frags 1477 */ 1478 blk = blkmap(fs, blksfree, bbase); 1479 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1480 /* 1481 * deallocate the fragment 1482 */ 1483 frags = numfrags(fs, size); 1484 for (i = 0; i < frags; i++) { 1485 if (isset(blksfree, cgbno + i)) { 1486 printf("dev = %s, block = %ld, fs = %s\n", 1487 devtoname(ip->i_dev), (long)(bno + i), 1488 fs->fs_fsmnt); 1489 panic("ffs_blkfree: freeing free frag"); 1490 } 1491 setbit(blksfree, cgbno + i); 1492 } 1493 cgp->cg_cs.cs_nffree += i; 1494 fs->fs_cstotal.cs_nffree += i; 1495 fs->fs_cs(fs, cg).cs_nffree += i; 1496 /* 1497 * add back in counts associated with the new frags 1498 */ 1499 blk = blkmap(fs, blksfree, bbase); 1500 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1501 /* 1502 * if a complete block has been reassembled, account for it 1503 */ 1504 fragno = fragstoblks(fs, bbase); 1505 if (ffs_isblock(fs, blksfree, fragno)) { 1506 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1507 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1508 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1509 ffs_clusteracct(fs, cgp, fragno, 1); 1510 cgp->cg_cs.cs_nbfree++; 1511 fs->fs_cstotal.cs_nbfree++; 1512 fs->fs_cs(fs, cg).cs_nbfree++; 1513 i = cbtocylno(fs, bbase); 1514 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 1515 cg_blktot(cgp)[i]++; 1516 } 1517 } 1518 fs->fs_fmod = 1; 1519 bdwrite(bp); 1520 } 1521 1522 #ifdef DIAGNOSTIC 1523 /* 1524 * Verify allocation of a block or fragment. Returns true if block or 1525 * fragment is allocated, false if it is free. 1526 */ 1527 static int 1528 ffs_checkblk(ip, bno, size) 1529 struct inode *ip; 1530 ufs_daddr_t bno; 1531 long size; 1532 { 1533 struct fs *fs; 1534 struct cg *cgp; 1535 struct buf *bp; 1536 int i, error, frags, free; 1537 u_int8_t *blksfree; 1538 1539 fs = ip->i_fs; 1540 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1541 printf("bsize = %ld, size = %ld, fs = %s\n", 1542 (long)fs->fs_bsize, size, fs->fs_fsmnt); 1543 panic("ffs_checkblk: bad size"); 1544 } 1545 if ((u_int)bno >= fs->fs_size) 1546 panic("ffs_checkblk: bad block %d", bno); 1547 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), 1548 (int)fs->fs_cgsize, NOCRED, &bp); 1549 if (error) 1550 panic("ffs_checkblk: cg bread failed"); 1551 cgp = (struct cg *)bp->b_data; 1552 if (!cg_chkmagic(cgp)) 1553 panic("ffs_checkblk: cg magic mismatch"); 1554 bp->b_xflags |= BX_BKGRDWRITE; 1555 blksfree = cg_blksfree(cgp); 1556 bno = dtogd(fs, bno); 1557 if (size == fs->fs_bsize) { 1558 free = ffs_isblock(fs, blksfree, fragstoblks(fs, bno)); 1559 } else { 1560 frags = numfrags(fs, size); 1561 for (free = 0, i = 0; i < frags; i++) 1562 if (isset(blksfree, bno + i)) 1563 free++; 1564 if (free != 0 && free != frags) 1565 panic("ffs_checkblk: partially free fragment"); 1566 } 1567 brelse(bp); 1568 return (!free); 1569 } 1570 #endif /* DIAGNOSTIC */ 1571 1572 /* 1573 * Free an inode. 1574 */ 1575 int 1576 ffs_vfree(pvp, ino, mode) 1577 struct vnode *pvp; 1578 ino_t ino; 1579 int mode; 1580 { 1581 if (DOINGSOFTDEP(pvp)) { 1582 softdep_freefile(pvp, ino, mode); 1583 return (0); 1584 } 1585 return (ffs_freefile(VTOI(pvp), ino, mode)); 1586 } 1587 1588 /* 1589 * Do the actual free operation. 1590 * The specified inode is placed back in the free map. 1591 */ 1592 int 1593 ffs_freefile(pip, ino, mode) 1594 struct inode *pip; 1595 ino_t ino; 1596 int mode; 1597 { 1598 register struct fs *fs; 1599 register struct cg *cgp; 1600 struct buf *bp; 1601 int error, cg; 1602 u_int8_t *inosused; 1603 1604 fs = pip->i_fs; 1605 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 1606 panic("ffs_vfree: range: dev = (%d,%d), ino = %d, fs = %s", 1607 major(pip->i_dev), minor(pip->i_dev), ino, fs->fs_fsmnt); 1608 cg = ino_to_cg(fs, ino); 1609 error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1610 (int)fs->fs_cgsize, NOCRED, &bp); 1611 if (error) { 1612 brelse(bp); 1613 return (error); 1614 } 1615 cgp = (struct cg *)bp->b_data; 1616 if (!cg_chkmagic(cgp)) { 1617 brelse(bp); 1618 return (0); 1619 } 1620 bp->b_xflags |= BX_BKGRDWRITE; 1621 cgp->cg_time = time_second; 1622 inosused = cg_inosused(cgp); 1623 ino %= fs->fs_ipg; 1624 if (isclr(inosused, ino)) { 1625 printf("dev = %s, ino = %lu, fs = %s\n", devtoname(pip->i_dev), 1626 (u_long)ino + cg * fs->fs_ipg, fs->fs_fsmnt); 1627 if (fs->fs_ronly == 0) 1628 panic("ffs_vfree: freeing free inode"); 1629 } 1630 clrbit(inosused, ino); 1631 if (ino < cgp->cg_irotor) 1632 cgp->cg_irotor = ino; 1633 cgp->cg_cs.cs_nifree++; 1634 fs->fs_cstotal.cs_nifree++; 1635 fs->fs_cs(fs, cg).cs_nifree++; 1636 if ((mode & IFMT) == IFDIR) { 1637 cgp->cg_cs.cs_ndir--; 1638 fs->fs_cstotal.cs_ndir--; 1639 fs->fs_cs(fs, cg).cs_ndir--; 1640 } 1641 fs->fs_fmod = 1; 1642 bdwrite(bp); 1643 return (0); 1644 } 1645 1646 /* 1647 * Find a block of the specified size in the specified cylinder group. 1648 * 1649 * It is a panic if a request is made to find a block if none are 1650 * available. 1651 */ 1652 static ufs_daddr_t 1653 ffs_mapsearch(fs, cgp, bpref, allocsiz) 1654 register struct fs *fs; 1655 register struct cg *cgp; 1656 ufs_daddr_t bpref; 1657 int allocsiz; 1658 { 1659 ufs_daddr_t bno; 1660 int start, len, loc, i; 1661 int blk, field, subfield, pos; 1662 u_int8_t *blksfree; 1663 1664 /* 1665 * find the fragment by searching through the free block 1666 * map for an appropriate bit pattern 1667 */ 1668 if (bpref) 1669 start = dtogd(fs, bpref) / NBBY; 1670 else 1671 start = cgp->cg_frotor / NBBY; 1672 blksfree = cg_blksfree(cgp); 1673 len = howmany(fs->fs_fpg, NBBY) - start; 1674 loc = scanc((u_int)len, (u_char *)&blksfree[start], 1675 (u_char *)fragtbl[fs->fs_frag], 1676 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1677 if (loc == 0) { 1678 len = start + 1; 1679 start = 0; 1680 loc = scanc((u_int)len, (u_char *)&blksfree[0], 1681 (u_char *)fragtbl[fs->fs_frag], 1682 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1683 if (loc == 0) { 1684 printf("start = %d, len = %d, fs = %s\n", 1685 start, len, fs->fs_fsmnt); 1686 panic("ffs_alloccg: map corrupted"); 1687 /* NOTREACHED */ 1688 } 1689 } 1690 bno = (start + len - loc) * NBBY; 1691 cgp->cg_frotor = bno; 1692 /* 1693 * found the byte in the map 1694 * sift through the bits to find the selected frag 1695 */ 1696 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1697 blk = blkmap(fs, blksfree, bno); 1698 blk <<= 1; 1699 field = around[allocsiz]; 1700 subfield = inside[allocsiz]; 1701 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1702 if ((blk & field) == subfield) 1703 return (bno + pos); 1704 field <<= 1; 1705 subfield <<= 1; 1706 } 1707 } 1708 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 1709 panic("ffs_alloccg: block not in map"); 1710 return (-1); 1711 } 1712 1713 /* 1714 * Update the cluster map because of an allocation or free. 1715 * 1716 * Cnt == 1 means free; cnt == -1 means allocating. 1717 */ 1718 void 1719 ffs_clusteracct(fs, cgp, blkno, cnt) 1720 struct fs *fs; 1721 struct cg *cgp; 1722 ufs_daddr_t blkno; 1723 int cnt; 1724 { 1725 int32_t *sump; 1726 int32_t *lp; 1727 u_char *freemapp, *mapp; 1728 int i, start, end, forw, back, map, bit; 1729 1730 if (fs->fs_contigsumsize <= 0) 1731 return; 1732 freemapp = cg_clustersfree(cgp); 1733 sump = cg_clustersum(cgp); 1734 /* 1735 * Allocate or clear the actual block. 1736 */ 1737 if (cnt > 0) 1738 setbit(freemapp, blkno); 1739 else 1740 clrbit(freemapp, blkno); 1741 /* 1742 * Find the size of the cluster going forward. 1743 */ 1744 start = blkno + 1; 1745 end = start + fs->fs_contigsumsize; 1746 if (end >= cgp->cg_nclusterblks) 1747 end = cgp->cg_nclusterblks; 1748 mapp = &freemapp[start / NBBY]; 1749 map = *mapp++; 1750 bit = 1 << (start % NBBY); 1751 for (i = start; i < end; i++) { 1752 if ((map & bit) == 0) 1753 break; 1754 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1755 bit <<= 1; 1756 } else { 1757 map = *mapp++; 1758 bit = 1; 1759 } 1760 } 1761 forw = i - start; 1762 /* 1763 * Find the size of the cluster going backward. 1764 */ 1765 start = blkno - 1; 1766 end = start - fs->fs_contigsumsize; 1767 if (end < 0) 1768 end = -1; 1769 mapp = &freemapp[start / NBBY]; 1770 map = *mapp--; 1771 bit = 1 << (start % NBBY); 1772 for (i = start; i > end; i--) { 1773 if ((map & bit) == 0) 1774 break; 1775 if ((i & (NBBY - 1)) != 0) { 1776 bit >>= 1; 1777 } else { 1778 map = *mapp--; 1779 bit = 1 << (NBBY - 1); 1780 } 1781 } 1782 back = start - i; 1783 /* 1784 * Account for old cluster and the possibly new forward and 1785 * back clusters. 1786 */ 1787 i = back + forw + 1; 1788 if (i > fs->fs_contigsumsize) 1789 i = fs->fs_contigsumsize; 1790 sump[i] += cnt; 1791 if (back > 0) 1792 sump[back] -= cnt; 1793 if (forw > 0) 1794 sump[forw] -= cnt; 1795 /* 1796 * Update cluster summary information. 1797 */ 1798 lp = &sump[fs->fs_contigsumsize]; 1799 for (i = fs->fs_contigsumsize; i > 0; i--) 1800 if (*lp-- > 0) 1801 break; 1802 fs->fs_maxcluster[cgp->cg_cgx] = i; 1803 } 1804 1805 /* 1806 * Fserr prints the name of a file system with an error diagnostic. 1807 * 1808 * The form of the error message is: 1809 * fs: error message 1810 */ 1811 static void 1812 ffs_fserr(fs, uid, cp) 1813 struct fs *fs; 1814 u_int uid; 1815 char *cp; 1816 { 1817 struct proc *p = curproc; /* XXX */ 1818 1819 log(LOG_ERR, "pid %d (%s), uid %d on %s: %s\n", p ? p->p_pid : -1, 1820 p ? p->p_comm : "-", uid, fs->fs_fsmnt, cp); 1821 } 1822 1823 /* 1824 * This function provides the capability for the fsck program to 1825 * update an active filesystem. Six operations are provided: 1826 * 1827 * adjrefcnt(inode, amt) - adjusts the reference count on the 1828 * specified inode by the specified amount. Under normal 1829 * operation the count should always go down. Decrementing 1830 * the count to zero will cause the inode to be freed. 1831 * adjblkcnt(inode, amt) - adjust the number of blocks used to 1832 * by the specifed amount. 1833 * freedirs(inode, count) - directory inodes [inode..inode + count - 1] 1834 * are marked as free. Inodes should never have to be marked 1835 * as in use. 1836 * freefiles(inode, count) - file inodes [inode..inode + count - 1] 1837 * are marked as free. Inodes should never have to be marked 1838 * as in use. 1839 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1] 1840 * are marked as free. Blocks should never have to be marked 1841 * as in use. 1842 * setflags(flags, set/clear) - the fs_flags field has the specified 1843 * flags set (second parameter +1) or cleared (second parameter -1). 1844 */ 1845 1846 static int sysctl_ffs_fsck __P((SYSCTL_HANDLER_ARGS)); 1847 1848 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT, 1849 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count"); 1850 1851 SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR, 1852 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count"); 1853 1854 SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR, 1855 sysctl_ffs_fsck, "Free Range of Directory Inodes"); 1856 1857 SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR, 1858 sysctl_ffs_fsck, "Free Range of File Inodes"); 1859 1860 SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR, 1861 sysctl_ffs_fsck, "Free Range of Blocks"); 1862 1863 SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR, 1864 sysctl_ffs_fsck, "Change Filesystem Flags"); 1865 1866 #ifdef DEBUG 1867 static int fsckcmds = 0; 1868 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, ""); 1869 #endif /* DEBUG */ 1870 1871 static int 1872 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS) 1873 { 1874 struct fsck_cmd cmd; 1875 struct inode tip; 1876 struct ufsmount *ump; 1877 struct vnode *vp; 1878 struct inode *ip; 1879 struct mount *mp; 1880 struct fs *fs; 1881 ufs_daddr_t blkno; 1882 long blkcnt, blksize; 1883 struct file *fp; 1884 int filetype, error; 1885 1886 if (req->newlen > sizeof cmd) 1887 return (EBADRPC); 1888 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0) 1889 return (error); 1890 if (cmd.version != FFS_CMD_VERSION) 1891 return (ERPCMISMATCH); 1892 if ((error = getvnode(curproc->p_fd, cmd.handle, &fp)) != 0) 1893 return (error); 1894 vn_start_write((struct vnode *)fp->f_data, &mp, V_WAIT); 1895 if (mp == 0 || strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) { 1896 vn_finished_write(mp); 1897 return (EINVAL); 1898 } 1899 if (mp->mnt_flag & MNT_RDONLY) { 1900 vn_finished_write(mp); 1901 return (EROFS); 1902 } 1903 ump = VFSTOUFS(mp); 1904 fs = ump->um_fs; 1905 filetype = IFREG; 1906 1907 switch (oidp->oid_number) { 1908 1909 case FFS_SET_FLAGS: 1910 #ifdef DEBUG 1911 if (fsckcmds) 1912 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname, 1913 cmd.size > 0 ? "set" : "clear"); 1914 #endif /* DEBUG */ 1915 if (cmd.size > 0) 1916 fs->fs_flags |= (long)cmd.value; 1917 else 1918 fs->fs_flags &= ~(long)cmd.value; 1919 break; 1920 1921 case FFS_ADJ_REFCNT: 1922 #ifdef DEBUG 1923 if (fsckcmds) { 1924 printf("%s: adjust inode %d count by %ld\n", 1925 mp->mnt_stat.f_mntonname, (ino_t)cmd.value, 1926 cmd.size); 1927 } 1928 #endif /* DEBUG */ 1929 if ((error = VFS_VGET(mp, (ino_t)cmd.value, &vp)) != 0) 1930 break; 1931 ip = VTOI(vp); 1932 ip->i_nlink += cmd.size; 1933 ip->i_effnlink += cmd.size; 1934 ip->i_flag |= IN_CHANGE; 1935 if (DOINGSOFTDEP(vp)) 1936 softdep_change_linkcnt(ip); 1937 vput(vp); 1938 break; 1939 1940 case FFS_ADJ_BLKCNT: 1941 #ifdef DEBUG 1942 if (fsckcmds) { 1943 printf("%s: adjust inode %d block count by %ld\n", 1944 mp->mnt_stat.f_mntonname, (ino_t)cmd.value, 1945 cmd.size); 1946 } 1947 #endif /* DEBUG */ 1948 if ((error = VFS_VGET(mp, (ino_t)cmd.value, &vp)) != 0) 1949 break; 1950 ip = VTOI(vp); 1951 ip->i_blocks += cmd.size; 1952 ip->i_flag |= IN_CHANGE; 1953 vput(vp); 1954 break; 1955 1956 case FFS_DIR_FREE: 1957 filetype = IFDIR; 1958 /* fall through */ 1959 1960 case FFS_FILE_FREE: 1961 #ifdef DEBUG 1962 if (fsckcmds) { 1963 if (cmd.size == 1) 1964 printf("%s: free %s inode %d\n", 1965 mp->mnt_stat.f_mntonname, 1966 filetype == IFDIR ? "directory" : "file", 1967 (ino_t)cmd.value); 1968 else 1969 printf("%s: free %s inodes %d-%d\n", 1970 mp->mnt_stat.f_mntonname, 1971 filetype == IFDIR ? "directory" : "file", 1972 (ino_t)cmd.value, 1973 (ino_t)(cmd.value + cmd.size - 1)); 1974 } 1975 #endif /* DEBUG */ 1976 tip.i_devvp = ump->um_devvp; 1977 tip.i_dev = ump->um_dev; 1978 tip.i_fs = fs; 1979 while (cmd.size > 0) { 1980 if ((error = ffs_freefile(&tip, cmd.value, filetype))) 1981 break; 1982 cmd.size -= 1; 1983 cmd.value += 1; 1984 } 1985 break; 1986 1987 case FFS_BLK_FREE: 1988 #ifdef DEBUG 1989 if (fsckcmds) { 1990 if (cmd.size == 1) 1991 printf("%s: free block %d\n", 1992 mp->mnt_stat.f_mntonname, 1993 (ufs_daddr_t)cmd.value); 1994 else 1995 printf("%s: free blocks %d-%ld\n", 1996 mp->mnt_stat.f_mntonname, 1997 (ufs_daddr_t)cmd.value, 1998 (ufs_daddr_t)cmd.value + cmd.size - 1); 1999 } 2000 #endif /* DEBUG */ 2001 tip.i_number = ROOTINO; 2002 tip.i_devvp = ump->um_devvp; 2003 tip.i_dev = ump->um_dev; 2004 tip.i_fs = fs; 2005 tip.i_size = cmd.size * fs->fs_fsize; 2006 tip.i_uid = 0; 2007 tip.i_vnode = NULL; 2008 blkno = (ufs_daddr_t)cmd.value; 2009 blkcnt = cmd.size; 2010 blksize = fs->fs_frag - (blkno % fs->fs_frag); 2011 while (blkcnt > 0) { 2012 if (blksize > blkcnt) 2013 blksize = blkcnt; 2014 ffs_blkfree(&tip, blkno, blksize * fs->fs_fsize); 2015 blkno += blksize; 2016 blkcnt -= blksize; 2017 blksize = fs->fs_frag; 2018 } 2019 break; 2020 2021 default: 2022 #ifdef DEBUG 2023 if (fsckcmds) { 2024 printf("Invalid request %d from fsck\n", 2025 oidp->oid_number); 2026 } 2027 #endif /* DEBUG */ 2028 error = EINVAL; 2029 break; 2030 2031 } 2032 vn_finished_write(mp); 2033 return (error); 2034 } 2035