1 /*- 2 * Copyright (c) 2002 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Copyright (c) 1982, 1986, 1989, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 4. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 60 */ 61 62 #include <sys/cdefs.h> 63 __FBSDID("$FreeBSD$"); 64 65 #include "opt_quota.h" 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/bio.h> 70 #include <sys/buf.h> 71 #include <sys/conf.h> 72 #include <sys/fcntl.h> 73 #include <sys/file.h> 74 #include <sys/filedesc.h> 75 #include <sys/priv.h> 76 #include <sys/proc.h> 77 #include <sys/vnode.h> 78 #include <sys/mount.h> 79 #include <sys/kernel.h> 80 #include <sys/syscallsubr.h> 81 #include <sys/sysctl.h> 82 #include <sys/syslog.h> 83 #include <sys/taskqueue.h> 84 85 #include <security/audit/audit.h> 86 87 #include <geom/geom.h> 88 89 #include <ufs/ufs/dir.h> 90 #include <ufs/ufs/extattr.h> 91 #include <ufs/ufs/quota.h> 92 #include <ufs/ufs/inode.h> 93 #include <ufs/ufs/ufs_extern.h> 94 #include <ufs/ufs/ufsmount.h> 95 96 #include <ufs/ffs/fs.h> 97 #include <ufs/ffs/ffs_extern.h> 98 #include <ufs/ffs/softdep.h> 99 100 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref, 101 int size, int rsize); 102 103 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int); 104 static ufs2_daddr_t 105 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int); 106 static void ffs_blkfree_cg(struct ufsmount *, struct fs *, 107 struct vnode *, ufs2_daddr_t, long, ino_t, 108 struct workhead *); 109 static void ffs_blkfree_trim_completed(struct bio *); 110 static void ffs_blkfree_trim_task(void *ctx, int pending __unused); 111 #ifdef INVARIANTS 112 static int ffs_checkblk(struct inode *, ufs2_daddr_t, long); 113 #endif 114 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int, 115 int); 116 static ino_t ffs_dirpref(struct inode *); 117 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t, 118 int, int); 119 static void ffs_fserr(struct fs *, ino_t, char *); 120 static ufs2_daddr_t ffs_hashalloc 121 (struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *); 122 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int, 123 int); 124 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int); 125 static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *); 126 static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *); 127 128 /* 129 * Allocate a block in the filesystem. 130 * 131 * The size of the requested block is given, which must be some 132 * multiple of fs_fsize and <= fs_bsize. 133 * A preference may be optionally specified. If a preference is given 134 * the following hierarchy is used to allocate a block: 135 * 1) allocate the requested block. 136 * 2) allocate a rotationally optimal block in the same cylinder. 137 * 3) allocate a block in the same cylinder group. 138 * 4) quadradically rehash into other cylinder groups, until an 139 * available block is located. 140 * If no block preference is given the following hierarchy is used 141 * to allocate a block: 142 * 1) allocate a block in the cylinder group that contains the 143 * inode for the file. 144 * 2) quadradically rehash into other cylinder groups, until an 145 * available block is located. 146 */ 147 int 148 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp) 149 struct inode *ip; 150 ufs2_daddr_t lbn, bpref; 151 int size, flags; 152 struct ucred *cred; 153 ufs2_daddr_t *bnp; 154 { 155 struct fs *fs; 156 struct ufsmount *ump; 157 ufs2_daddr_t bno; 158 u_int cg, reclaimed; 159 static struct timeval lastfail; 160 static int curfail; 161 int64_t delta; 162 #ifdef QUOTA 163 int error; 164 #endif 165 166 *bnp = 0; 167 fs = ip->i_fs; 168 ump = ip->i_ump; 169 mtx_assert(UFS_MTX(ump), MA_OWNED); 170 #ifdef INVARIANTS 171 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 172 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n", 173 devtoname(ip->i_dev), (long)fs->fs_bsize, size, 174 fs->fs_fsmnt); 175 panic("ffs_alloc: bad size"); 176 } 177 if (cred == NOCRED) 178 panic("ffs_alloc: missing credential"); 179 #endif /* INVARIANTS */ 180 reclaimed = 0; 181 retry: 182 #ifdef QUOTA 183 UFS_UNLOCK(ump); 184 error = chkdq(ip, btodb(size), cred, 0); 185 if (error) 186 return (error); 187 UFS_LOCK(ump); 188 #endif 189 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 190 goto nospace; 191 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) && 192 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 193 goto nospace; 194 if (bpref >= fs->fs_size) 195 bpref = 0; 196 if (bpref == 0) 197 cg = ino_to_cg(fs, ip->i_number); 198 else 199 cg = dtog(fs, bpref); 200 bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg); 201 if (bno > 0) { 202 delta = btodb(size); 203 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 204 if (flags & IO_EXT) 205 ip->i_flag |= IN_CHANGE; 206 else 207 ip->i_flag |= IN_CHANGE | IN_UPDATE; 208 *bnp = bno; 209 return (0); 210 } 211 nospace: 212 #ifdef QUOTA 213 UFS_UNLOCK(ump); 214 /* 215 * Restore user's disk quota because allocation failed. 216 */ 217 (void) chkdq(ip, -btodb(size), cred, FORCE); 218 UFS_LOCK(ump); 219 #endif 220 if (reclaimed == 0) { 221 reclaimed = 1; 222 softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT); 223 goto retry; 224 } 225 UFS_UNLOCK(ump); 226 if (ppsratecheck(&lastfail, &curfail, 1)) { 227 ffs_fserr(fs, ip->i_number, "filesystem full"); 228 uprintf("\n%s: write failed, filesystem is full\n", 229 fs->fs_fsmnt); 230 } 231 return (ENOSPC); 232 } 233 234 /* 235 * Reallocate a fragment to a bigger size 236 * 237 * The number and size of the old block is given, and a preference 238 * and new size is also specified. The allocator attempts to extend 239 * the original block. Failing that, the regular block allocator is 240 * invoked to get an appropriate block. 241 */ 242 int 243 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp) 244 struct inode *ip; 245 ufs2_daddr_t lbprev; 246 ufs2_daddr_t bprev; 247 ufs2_daddr_t bpref; 248 int osize, nsize, flags; 249 struct ucred *cred; 250 struct buf **bpp; 251 { 252 struct vnode *vp; 253 struct fs *fs; 254 struct buf *bp; 255 struct ufsmount *ump; 256 u_int cg, request, reclaimed; 257 int error; 258 ufs2_daddr_t bno; 259 static struct timeval lastfail; 260 static int curfail; 261 int64_t delta; 262 263 *bpp = 0; 264 vp = ITOV(ip); 265 fs = ip->i_fs; 266 bp = NULL; 267 ump = ip->i_ump; 268 mtx_assert(UFS_MTX(ump), MA_OWNED); 269 #ifdef INVARIANTS 270 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 271 panic("ffs_realloccg: allocation on suspended filesystem"); 272 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 273 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 274 printf( 275 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 276 devtoname(ip->i_dev), (long)fs->fs_bsize, osize, 277 nsize, fs->fs_fsmnt); 278 panic("ffs_realloccg: bad size"); 279 } 280 if (cred == NOCRED) 281 panic("ffs_realloccg: missing credential"); 282 #endif /* INVARIANTS */ 283 reclaimed = 0; 284 retry: 285 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) && 286 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) { 287 goto nospace; 288 } 289 if (bprev == 0) { 290 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n", 291 devtoname(ip->i_dev), (long)fs->fs_bsize, (intmax_t)bprev, 292 fs->fs_fsmnt); 293 panic("ffs_realloccg: bad bprev"); 294 } 295 UFS_UNLOCK(ump); 296 /* 297 * Allocate the extra space in the buffer. 298 */ 299 error = bread(vp, lbprev, osize, NOCRED, &bp); 300 if (error) { 301 brelse(bp); 302 return (error); 303 } 304 305 if (bp->b_blkno == bp->b_lblkno) { 306 if (lbprev >= NDADDR) 307 panic("ffs_realloccg: lbprev out of range"); 308 bp->b_blkno = fsbtodb(fs, bprev); 309 } 310 311 #ifdef QUOTA 312 error = chkdq(ip, btodb(nsize - osize), cred, 0); 313 if (error) { 314 brelse(bp); 315 return (error); 316 } 317 #endif 318 /* 319 * Check for extension in the existing location. 320 */ 321 cg = dtog(fs, bprev); 322 UFS_LOCK(ump); 323 bno = ffs_fragextend(ip, cg, bprev, osize, nsize); 324 if (bno) { 325 if (bp->b_blkno != fsbtodb(fs, bno)) 326 panic("ffs_realloccg: bad blockno"); 327 delta = btodb(nsize - osize); 328 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 329 if (flags & IO_EXT) 330 ip->i_flag |= IN_CHANGE; 331 else 332 ip->i_flag |= IN_CHANGE | IN_UPDATE; 333 allocbuf(bp, nsize); 334 bp->b_flags |= B_DONE; 335 bzero(bp->b_data + osize, nsize - osize); 336 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) 337 vfs_bio_set_valid(bp, osize, nsize - osize); 338 *bpp = bp; 339 return (0); 340 } 341 /* 342 * Allocate a new disk location. 343 */ 344 if (bpref >= fs->fs_size) 345 bpref = 0; 346 switch ((int)fs->fs_optim) { 347 case FS_OPTSPACE: 348 /* 349 * Allocate an exact sized fragment. Although this makes 350 * best use of space, we will waste time relocating it if 351 * the file continues to grow. If the fragmentation is 352 * less than half of the minimum free reserve, we choose 353 * to begin optimizing for time. 354 */ 355 request = nsize; 356 if (fs->fs_minfree <= 5 || 357 fs->fs_cstotal.cs_nffree > 358 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100)) 359 break; 360 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 361 fs->fs_fsmnt); 362 fs->fs_optim = FS_OPTTIME; 363 break; 364 case FS_OPTTIME: 365 /* 366 * At this point we have discovered a file that is trying to 367 * grow a small fragment to a larger fragment. To save time, 368 * we allocate a full sized block, then free the unused portion. 369 * If the file continues to grow, the `ffs_fragextend' call 370 * above will be able to grow it in place without further 371 * copying. If aberrant programs cause disk fragmentation to 372 * grow within 2% of the free reserve, we choose to begin 373 * optimizing for space. 374 */ 375 request = fs->fs_bsize; 376 if (fs->fs_cstotal.cs_nffree < 377 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100) 378 break; 379 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 380 fs->fs_fsmnt); 381 fs->fs_optim = FS_OPTSPACE; 382 break; 383 default: 384 printf("dev = %s, optim = %ld, fs = %s\n", 385 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt); 386 panic("ffs_realloccg: bad optim"); 387 /* NOTREACHED */ 388 } 389 bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg); 390 if (bno > 0) { 391 bp->b_blkno = fsbtodb(fs, bno); 392 if (!DOINGSOFTDEP(vp)) 393 ffs_blkfree(ump, fs, ip->i_devvp, bprev, (long)osize, 394 ip->i_number, NULL); 395 delta = btodb(nsize - osize); 396 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 397 if (flags & IO_EXT) 398 ip->i_flag |= IN_CHANGE; 399 else 400 ip->i_flag |= IN_CHANGE | IN_UPDATE; 401 allocbuf(bp, nsize); 402 bp->b_flags |= B_DONE; 403 bzero(bp->b_data + osize, nsize - osize); 404 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) 405 vfs_bio_set_valid(bp, osize, nsize - osize); 406 *bpp = bp; 407 return (0); 408 } 409 #ifdef QUOTA 410 UFS_UNLOCK(ump); 411 /* 412 * Restore user's disk quota because allocation failed. 413 */ 414 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE); 415 UFS_LOCK(ump); 416 #endif 417 nospace: 418 /* 419 * no space available 420 */ 421 if (reclaimed == 0) { 422 reclaimed = 1; 423 softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT); 424 UFS_UNLOCK(ump); 425 if (bp) { 426 brelse(bp); 427 bp = NULL; 428 } 429 UFS_LOCK(ump); 430 goto retry; 431 } 432 UFS_UNLOCK(ump); 433 if (bp) 434 brelse(bp); 435 if (ppsratecheck(&lastfail, &curfail, 1)) { 436 ffs_fserr(fs, ip->i_number, "filesystem full"); 437 uprintf("\n%s: write failed, filesystem is full\n", 438 fs->fs_fsmnt); 439 } 440 return (ENOSPC); 441 } 442 443 /* 444 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 445 * 446 * The vnode and an array of buffer pointers for a range of sequential 447 * logical blocks to be made contiguous is given. The allocator attempts 448 * to find a range of sequential blocks starting as close as possible 449 * from the end of the allocation for the logical block immediately 450 * preceding the current range. If successful, the physical block numbers 451 * in the buffer pointers and in the inode are changed to reflect the new 452 * allocation. If unsuccessful, the allocation is left unchanged. The 453 * success in doing the reallocation is returned. Note that the error 454 * return is not reflected back to the user. Rather the previous block 455 * allocation will be used. 456 */ 457 458 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 459 460 static int doasyncfree = 1; 461 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, ""); 462 463 static int doreallocblks = 1; 464 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 465 466 #ifdef DEBUG 467 static volatile int prtrealloc = 0; 468 #endif 469 470 int 471 ffs_reallocblks(ap) 472 struct vop_reallocblks_args /* { 473 struct vnode *a_vp; 474 struct cluster_save *a_buflist; 475 } */ *ap; 476 { 477 478 if (doreallocblks == 0) 479 return (ENOSPC); 480 /* 481 * We can't wait in softdep prealloc as it may fsync and recurse 482 * here. Instead we simply fail to reallocate blocks if this 483 * rare condition arises. 484 */ 485 if (DOINGSOFTDEP(ap->a_vp)) 486 if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0) 487 return (ENOSPC); 488 if (VTOI(ap->a_vp)->i_ump->um_fstype == UFS1) 489 return (ffs_reallocblks_ufs1(ap)); 490 return (ffs_reallocblks_ufs2(ap)); 491 } 492 493 static int 494 ffs_reallocblks_ufs1(ap) 495 struct vop_reallocblks_args /* { 496 struct vnode *a_vp; 497 struct cluster_save *a_buflist; 498 } */ *ap; 499 { 500 struct fs *fs; 501 struct inode *ip; 502 struct vnode *vp; 503 struct buf *sbp, *ebp; 504 ufs1_daddr_t *bap, *sbap, *ebap = 0; 505 struct cluster_save *buflist; 506 struct ufsmount *ump; 507 ufs_lbn_t start_lbn, end_lbn; 508 ufs1_daddr_t soff, newblk, blkno; 509 ufs2_daddr_t pref; 510 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 511 int i, len, start_lvl, end_lvl, ssize; 512 513 vp = ap->a_vp; 514 ip = VTOI(vp); 515 fs = ip->i_fs; 516 ump = ip->i_ump; 517 if (fs->fs_contigsumsize <= 0) 518 return (ENOSPC); 519 buflist = ap->a_buflist; 520 len = buflist->bs_nchildren; 521 start_lbn = buflist->bs_children[0]->b_lblkno; 522 end_lbn = start_lbn + len - 1; 523 #ifdef INVARIANTS 524 for (i = 0; i < len; i++) 525 if (!ffs_checkblk(ip, 526 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 527 panic("ffs_reallocblks: unallocated block 1"); 528 for (i = 1; i < len; i++) 529 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 530 panic("ffs_reallocblks: non-logical cluster"); 531 blkno = buflist->bs_children[0]->b_blkno; 532 ssize = fsbtodb(fs, fs->fs_frag); 533 for (i = 1; i < len - 1; i++) 534 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 535 panic("ffs_reallocblks: non-physical cluster %d", i); 536 #endif 537 /* 538 * If the latest allocation is in a new cylinder group, assume that 539 * the filesystem has decided to move and do not force it back to 540 * the previous cylinder group. 541 */ 542 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 543 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 544 return (ENOSPC); 545 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 546 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 547 return (ENOSPC); 548 /* 549 * Get the starting offset and block map for the first block. 550 */ 551 if (start_lvl == 0) { 552 sbap = &ip->i_din1->di_db[0]; 553 soff = start_lbn; 554 } else { 555 idp = &start_ap[start_lvl - 1]; 556 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 557 brelse(sbp); 558 return (ENOSPC); 559 } 560 sbap = (ufs1_daddr_t *)sbp->b_data; 561 soff = idp->in_off; 562 } 563 /* 564 * If the block range spans two block maps, get the second map. 565 */ 566 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 567 ssize = len; 568 } else { 569 #ifdef INVARIANTS 570 if (start_lvl > 0 && 571 start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 572 panic("ffs_reallocblk: start == end"); 573 #endif 574 ssize = len - (idp->in_off + 1); 575 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 576 goto fail; 577 ebap = (ufs1_daddr_t *)ebp->b_data; 578 } 579 /* 580 * Find the preferred location for the cluster. 581 */ 582 UFS_LOCK(ump); 583 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap); 584 /* 585 * Search the block map looking for an allocation of the desired size. 586 */ 587 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref, 588 len, len, ffs_clusteralloc)) == 0) { 589 UFS_UNLOCK(ump); 590 goto fail; 591 } 592 /* 593 * We have found a new contiguous block. 594 * 595 * First we have to replace the old block pointers with the new 596 * block pointers in the inode and indirect blocks associated 597 * with the file. 598 */ 599 #ifdef DEBUG 600 if (prtrealloc) 601 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number, 602 (intmax_t)start_lbn, (intmax_t)end_lbn); 603 #endif 604 blkno = newblk; 605 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 606 if (i == ssize) { 607 bap = ebap; 608 soff = -i; 609 } 610 #ifdef INVARIANTS 611 if (!ffs_checkblk(ip, 612 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 613 panic("ffs_reallocblks: unallocated block 2"); 614 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 615 panic("ffs_reallocblks: alloc mismatch"); 616 #endif 617 #ifdef DEBUG 618 if (prtrealloc) 619 printf(" %d,", *bap); 620 #endif 621 if (DOINGSOFTDEP(vp)) { 622 if (sbap == &ip->i_din1->di_db[0] && i < ssize) 623 softdep_setup_allocdirect(ip, start_lbn + i, 624 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 625 buflist->bs_children[i]); 626 else 627 softdep_setup_allocindir_page(ip, start_lbn + i, 628 i < ssize ? sbp : ebp, soff + i, blkno, 629 *bap, buflist->bs_children[i]); 630 } 631 *bap++ = blkno; 632 } 633 /* 634 * Next we must write out the modified inode and indirect blocks. 635 * For strict correctness, the writes should be synchronous since 636 * the old block values may have been written to disk. In practise 637 * they are almost never written, but if we are concerned about 638 * strict correctness, the `doasyncfree' flag should be set to zero. 639 * 640 * The test on `doasyncfree' should be changed to test a flag 641 * that shows whether the associated buffers and inodes have 642 * been written. The flag should be set when the cluster is 643 * started and cleared whenever the buffer or inode is flushed. 644 * We can then check below to see if it is set, and do the 645 * synchronous write only when it has been cleared. 646 */ 647 if (sbap != &ip->i_din1->di_db[0]) { 648 if (doasyncfree) 649 bdwrite(sbp); 650 else 651 bwrite(sbp); 652 } else { 653 ip->i_flag |= IN_CHANGE | IN_UPDATE; 654 if (!doasyncfree) 655 ffs_update(vp, 1); 656 } 657 if (ssize < len) { 658 if (doasyncfree) 659 bdwrite(ebp); 660 else 661 bwrite(ebp); 662 } 663 /* 664 * Last, free the old blocks and assign the new blocks to the buffers. 665 */ 666 #ifdef DEBUG 667 if (prtrealloc) 668 printf("\n\tnew:"); 669 #endif 670 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 671 if (!DOINGSOFTDEP(vp)) 672 ffs_blkfree(ump, fs, ip->i_devvp, 673 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 674 fs->fs_bsize, ip->i_number, NULL); 675 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 676 #ifdef INVARIANTS 677 if (!ffs_checkblk(ip, 678 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 679 panic("ffs_reallocblks: unallocated block 3"); 680 #endif 681 #ifdef DEBUG 682 if (prtrealloc) 683 printf(" %d,", blkno); 684 #endif 685 } 686 #ifdef DEBUG 687 if (prtrealloc) { 688 prtrealloc--; 689 printf("\n"); 690 } 691 #endif 692 return (0); 693 694 fail: 695 if (ssize < len) 696 brelse(ebp); 697 if (sbap != &ip->i_din1->di_db[0]) 698 brelse(sbp); 699 return (ENOSPC); 700 } 701 702 static int 703 ffs_reallocblks_ufs2(ap) 704 struct vop_reallocblks_args /* { 705 struct vnode *a_vp; 706 struct cluster_save *a_buflist; 707 } */ *ap; 708 { 709 struct fs *fs; 710 struct inode *ip; 711 struct vnode *vp; 712 struct buf *sbp, *ebp; 713 ufs2_daddr_t *bap, *sbap, *ebap = 0; 714 struct cluster_save *buflist; 715 struct ufsmount *ump; 716 ufs_lbn_t start_lbn, end_lbn; 717 ufs2_daddr_t soff, newblk, blkno, pref; 718 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 719 int i, len, start_lvl, end_lvl, ssize; 720 721 vp = ap->a_vp; 722 ip = VTOI(vp); 723 fs = ip->i_fs; 724 ump = ip->i_ump; 725 if (fs->fs_contigsumsize <= 0) 726 return (ENOSPC); 727 buflist = ap->a_buflist; 728 len = buflist->bs_nchildren; 729 start_lbn = buflist->bs_children[0]->b_lblkno; 730 end_lbn = start_lbn + len - 1; 731 #ifdef INVARIANTS 732 for (i = 0; i < len; i++) 733 if (!ffs_checkblk(ip, 734 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 735 panic("ffs_reallocblks: unallocated block 1"); 736 for (i = 1; i < len; i++) 737 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 738 panic("ffs_reallocblks: non-logical cluster"); 739 blkno = buflist->bs_children[0]->b_blkno; 740 ssize = fsbtodb(fs, fs->fs_frag); 741 for (i = 1; i < len - 1; i++) 742 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 743 panic("ffs_reallocblks: non-physical cluster %d", i); 744 #endif 745 /* 746 * If the latest allocation is in a new cylinder group, assume that 747 * the filesystem has decided to move and do not force it back to 748 * the previous cylinder group. 749 */ 750 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 751 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 752 return (ENOSPC); 753 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 754 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 755 return (ENOSPC); 756 /* 757 * Get the starting offset and block map for the first block. 758 */ 759 if (start_lvl == 0) { 760 sbap = &ip->i_din2->di_db[0]; 761 soff = start_lbn; 762 } else { 763 idp = &start_ap[start_lvl - 1]; 764 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 765 brelse(sbp); 766 return (ENOSPC); 767 } 768 sbap = (ufs2_daddr_t *)sbp->b_data; 769 soff = idp->in_off; 770 } 771 /* 772 * If the block range spans two block maps, get the second map. 773 */ 774 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 775 ssize = len; 776 } else { 777 #ifdef INVARIANTS 778 if (start_lvl > 0 && 779 start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 780 panic("ffs_reallocblk: start == end"); 781 #endif 782 ssize = len - (idp->in_off + 1); 783 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 784 goto fail; 785 ebap = (ufs2_daddr_t *)ebp->b_data; 786 } 787 /* 788 * Find the preferred location for the cluster. 789 */ 790 UFS_LOCK(ump); 791 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap); 792 /* 793 * Search the block map looking for an allocation of the desired size. 794 */ 795 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref, 796 len, len, ffs_clusteralloc)) == 0) { 797 UFS_UNLOCK(ump); 798 goto fail; 799 } 800 /* 801 * We have found a new contiguous block. 802 * 803 * First we have to replace the old block pointers with the new 804 * block pointers in the inode and indirect blocks associated 805 * with the file. 806 */ 807 #ifdef DEBUG 808 if (prtrealloc) 809 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number, 810 (intmax_t)start_lbn, (intmax_t)end_lbn); 811 #endif 812 blkno = newblk; 813 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 814 if (i == ssize) { 815 bap = ebap; 816 soff = -i; 817 } 818 #ifdef INVARIANTS 819 if (!ffs_checkblk(ip, 820 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 821 panic("ffs_reallocblks: unallocated block 2"); 822 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 823 panic("ffs_reallocblks: alloc mismatch"); 824 #endif 825 #ifdef DEBUG 826 if (prtrealloc) 827 printf(" %jd,", (intmax_t)*bap); 828 #endif 829 if (DOINGSOFTDEP(vp)) { 830 if (sbap == &ip->i_din2->di_db[0] && i < ssize) 831 softdep_setup_allocdirect(ip, start_lbn + i, 832 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 833 buflist->bs_children[i]); 834 else 835 softdep_setup_allocindir_page(ip, start_lbn + i, 836 i < ssize ? sbp : ebp, soff + i, blkno, 837 *bap, buflist->bs_children[i]); 838 } 839 *bap++ = blkno; 840 } 841 /* 842 * Next we must write out the modified inode and indirect blocks. 843 * For strict correctness, the writes should be synchronous since 844 * the old block values may have been written to disk. In practise 845 * they are almost never written, but if we are concerned about 846 * strict correctness, the `doasyncfree' flag should be set to zero. 847 * 848 * The test on `doasyncfree' should be changed to test a flag 849 * that shows whether the associated buffers and inodes have 850 * been written. The flag should be set when the cluster is 851 * started and cleared whenever the buffer or inode is flushed. 852 * We can then check below to see if it is set, and do the 853 * synchronous write only when it has been cleared. 854 */ 855 if (sbap != &ip->i_din2->di_db[0]) { 856 if (doasyncfree) 857 bdwrite(sbp); 858 else 859 bwrite(sbp); 860 } else { 861 ip->i_flag |= IN_CHANGE | IN_UPDATE; 862 if (!doasyncfree) 863 ffs_update(vp, 1); 864 } 865 if (ssize < len) { 866 if (doasyncfree) 867 bdwrite(ebp); 868 else 869 bwrite(ebp); 870 } 871 /* 872 * Last, free the old blocks and assign the new blocks to the buffers. 873 */ 874 #ifdef DEBUG 875 if (prtrealloc) 876 printf("\n\tnew:"); 877 #endif 878 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 879 if (!DOINGSOFTDEP(vp)) 880 ffs_blkfree(ump, fs, ip->i_devvp, 881 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 882 fs->fs_bsize, ip->i_number, NULL); 883 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 884 #ifdef INVARIANTS 885 if (!ffs_checkblk(ip, 886 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 887 panic("ffs_reallocblks: unallocated block 3"); 888 #endif 889 #ifdef DEBUG 890 if (prtrealloc) 891 printf(" %jd,", (intmax_t)blkno); 892 #endif 893 } 894 #ifdef DEBUG 895 if (prtrealloc) { 896 prtrealloc--; 897 printf("\n"); 898 } 899 #endif 900 return (0); 901 902 fail: 903 if (ssize < len) 904 brelse(ebp); 905 if (sbap != &ip->i_din2->di_db[0]) 906 brelse(sbp); 907 return (ENOSPC); 908 } 909 910 /* 911 * Allocate an inode in the filesystem. 912 * 913 * If allocating a directory, use ffs_dirpref to select the inode. 914 * If allocating in a directory, the following hierarchy is followed: 915 * 1) allocate the preferred inode. 916 * 2) allocate an inode in the same cylinder group. 917 * 3) quadradically rehash into other cylinder groups, until an 918 * available inode is located. 919 * If no inode preference is given the following hierarchy is used 920 * to allocate an inode: 921 * 1) allocate an inode in cylinder group 0. 922 * 2) quadradically rehash into other cylinder groups, until an 923 * available inode is located. 924 */ 925 int 926 ffs_valloc(pvp, mode, cred, vpp) 927 struct vnode *pvp; 928 int mode; 929 struct ucred *cred; 930 struct vnode **vpp; 931 { 932 struct inode *pip; 933 struct fs *fs; 934 struct inode *ip; 935 struct timespec ts; 936 struct ufsmount *ump; 937 ino_t ino, ipref; 938 u_int cg; 939 int error, error1, reclaimed; 940 static struct timeval lastfail; 941 static int curfail; 942 943 *vpp = NULL; 944 pip = VTOI(pvp); 945 fs = pip->i_fs; 946 ump = pip->i_ump; 947 948 UFS_LOCK(ump); 949 reclaimed = 0; 950 retry: 951 if (fs->fs_cstotal.cs_nifree == 0) 952 goto noinodes; 953 954 if ((mode & IFMT) == IFDIR) 955 ipref = ffs_dirpref(pip); 956 else 957 ipref = pip->i_number; 958 if (ipref >= fs->fs_ncg * fs->fs_ipg) 959 ipref = 0; 960 cg = ino_to_cg(fs, ipref); 961 /* 962 * Track number of dirs created one after another 963 * in a same cg without intervening by files. 964 */ 965 if ((mode & IFMT) == IFDIR) { 966 if (fs->fs_contigdirs[cg] < 255) 967 fs->fs_contigdirs[cg]++; 968 } else { 969 if (fs->fs_contigdirs[cg] > 0) 970 fs->fs_contigdirs[cg]--; 971 } 972 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0, 973 (allocfcn_t *)ffs_nodealloccg); 974 if (ino == 0) 975 goto noinodes; 976 error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp); 977 if (error) { 978 error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp, 979 FFSV_FORCEINSMQ); 980 ffs_vfree(pvp, ino, mode); 981 if (error1 == 0) { 982 ip = VTOI(*vpp); 983 if (ip->i_mode) 984 goto dup_alloc; 985 ip->i_flag |= IN_MODIFIED; 986 vput(*vpp); 987 } 988 return (error); 989 } 990 ip = VTOI(*vpp); 991 if (ip->i_mode) { 992 dup_alloc: 993 printf("mode = 0%o, inum = %lu, fs = %s\n", 994 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); 995 panic("ffs_valloc: dup alloc"); 996 } 997 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */ 998 printf("free inode %s/%lu had %ld blocks\n", 999 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks)); 1000 DIP_SET(ip, i_blocks, 0); 1001 } 1002 ip->i_flags = 0; 1003 DIP_SET(ip, i_flags, 0); 1004 /* 1005 * Set up a new generation number for this inode. 1006 */ 1007 if (ip->i_gen == 0 || ++ip->i_gen == 0) 1008 ip->i_gen = arc4random() / 2 + 1; 1009 DIP_SET(ip, i_gen, ip->i_gen); 1010 if (fs->fs_magic == FS_UFS2_MAGIC) { 1011 vfs_timestamp(&ts); 1012 ip->i_din2->di_birthtime = ts.tv_sec; 1013 ip->i_din2->di_birthnsec = ts.tv_nsec; 1014 } 1015 ip->i_flag = 0; 1016 vnode_destroy_vobject(*vpp); 1017 (*vpp)->v_type = VNON; 1018 if (fs->fs_magic == FS_UFS2_MAGIC) 1019 (*vpp)->v_op = &ffs_vnodeops2; 1020 else 1021 (*vpp)->v_op = &ffs_vnodeops1; 1022 return (0); 1023 noinodes: 1024 if (fs->fs_pendinginodes > 0 && reclaimed == 0) { 1025 reclaimed = 1; 1026 softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT); 1027 goto retry; 1028 } 1029 UFS_UNLOCK(ump); 1030 if (ppsratecheck(&lastfail, &curfail, 1)) { 1031 ffs_fserr(fs, pip->i_number, "out of inodes"); 1032 uprintf("\n%s: create/symlink failed, no inodes free\n", 1033 fs->fs_fsmnt); 1034 } 1035 return (ENOSPC); 1036 } 1037 1038 /* 1039 * Find a cylinder group to place a directory. 1040 * 1041 * The policy implemented by this algorithm is to allocate a 1042 * directory inode in the same cylinder group as its parent 1043 * directory, but also to reserve space for its files inodes 1044 * and data. Restrict the number of directories which may be 1045 * allocated one after another in the same cylinder group 1046 * without intervening allocation of files. 1047 * 1048 * If we allocate a first level directory then force allocation 1049 * in another cylinder group. 1050 */ 1051 static ino_t 1052 ffs_dirpref(pip) 1053 struct inode *pip; 1054 { 1055 struct fs *fs; 1056 u_int cg, prefcg, dirsize, cgsize; 1057 u_int avgifree, avgbfree, avgndir, curdirsize; 1058 u_int minifree, minbfree, maxndir; 1059 u_int mincg, minndir; 1060 u_int maxcontigdirs; 1061 1062 mtx_assert(UFS_MTX(pip->i_ump), MA_OWNED); 1063 fs = pip->i_fs; 1064 1065 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 1066 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1067 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; 1068 1069 /* 1070 * Force allocation in another cg if creating a first level dir. 1071 */ 1072 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref"); 1073 if (ITOV(pip)->v_vflag & VV_ROOT) { 1074 prefcg = arc4random() % fs->fs_ncg; 1075 mincg = prefcg; 1076 minndir = fs->fs_ipg; 1077 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1078 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 1079 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 1080 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1081 mincg = cg; 1082 minndir = fs->fs_cs(fs, cg).cs_ndir; 1083 } 1084 for (cg = 0; cg < prefcg; cg++) 1085 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 1086 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 1087 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1088 mincg = cg; 1089 minndir = fs->fs_cs(fs, cg).cs_ndir; 1090 } 1091 return ((ino_t)(fs->fs_ipg * mincg)); 1092 } 1093 1094 /* 1095 * Count various limits which used for 1096 * optimal allocation of a directory inode. 1097 */ 1098 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); 1099 minifree = avgifree - avgifree / 4; 1100 if (minifree < 1) 1101 minifree = 1; 1102 minbfree = avgbfree - avgbfree / 4; 1103 if (minbfree < 1) 1104 minbfree = 1; 1105 cgsize = fs->fs_fsize * fs->fs_fpg; 1106 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir; 1107 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; 1108 if (dirsize < curdirsize) 1109 dirsize = curdirsize; 1110 if (dirsize <= 0) 1111 maxcontigdirs = 0; /* dirsize overflowed */ 1112 else 1113 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255); 1114 if (fs->fs_avgfpdir > 0) 1115 maxcontigdirs = min(maxcontigdirs, 1116 fs->fs_ipg / fs->fs_avgfpdir); 1117 if (maxcontigdirs == 0) 1118 maxcontigdirs = 1; 1119 1120 /* 1121 * Limit number of dirs in one cg and reserve space for 1122 * regular files, but only if we have no deficit in 1123 * inodes or space. 1124 */ 1125 prefcg = ino_to_cg(fs, pip->i_number); 1126 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1127 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 1128 fs->fs_cs(fs, cg).cs_nifree >= minifree && 1129 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 1130 if (fs->fs_contigdirs[cg] < maxcontigdirs) 1131 return ((ino_t)(fs->fs_ipg * cg)); 1132 } 1133 for (cg = 0; cg < prefcg; cg++) 1134 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 1135 fs->fs_cs(fs, cg).cs_nifree >= minifree && 1136 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 1137 if (fs->fs_contigdirs[cg] < maxcontigdirs) 1138 return ((ino_t)(fs->fs_ipg * cg)); 1139 } 1140 /* 1141 * This is a backstop when we have deficit in space. 1142 */ 1143 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1144 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 1145 return ((ino_t)(fs->fs_ipg * cg)); 1146 for (cg = 0; cg < prefcg; cg++) 1147 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 1148 break; 1149 return ((ino_t)(fs->fs_ipg * cg)); 1150 } 1151 1152 /* 1153 * Select the desired position for the next block in a file. The file is 1154 * logically divided into sections. The first section is composed of the 1155 * direct blocks. Each additional section contains fs_maxbpg blocks. 1156 * 1157 * If no blocks have been allocated in the first section, the policy is to 1158 * request a block in the same cylinder group as the inode that describes 1159 * the file. If no blocks have been allocated in any other section, the 1160 * policy is to place the section in a cylinder group with a greater than 1161 * average number of free blocks. An appropriate cylinder group is found 1162 * by using a rotor that sweeps the cylinder groups. When a new group of 1163 * blocks is needed, the sweep begins in the cylinder group following the 1164 * cylinder group from which the previous allocation was made. The sweep 1165 * continues until a cylinder group with greater than the average number 1166 * of free blocks is found. If the allocation is for the first block in an 1167 * indirect block, the information on the previous allocation is unavailable; 1168 * here a best guess is made based upon the logical block number being 1169 * allocated. 1170 * 1171 * If a section is already partially allocated, the policy is to 1172 * contiguously allocate fs_maxcontig blocks. The end of one of these 1173 * contiguous blocks and the beginning of the next is laid out 1174 * contiguously if possible. 1175 */ 1176 ufs2_daddr_t 1177 ffs_blkpref_ufs1(ip, lbn, indx, bap) 1178 struct inode *ip; 1179 ufs_lbn_t lbn; 1180 int indx; 1181 ufs1_daddr_t *bap; 1182 { 1183 struct fs *fs; 1184 u_int cg; 1185 u_int avgbfree, startcg; 1186 1187 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1188 fs = ip->i_fs; 1189 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1190 if (lbn < NDADDR + NINDIR(fs)) { 1191 cg = ino_to_cg(fs, ip->i_number); 1192 return (cgbase(fs, cg) + fs->fs_frag); 1193 } 1194 /* 1195 * Find a cylinder with greater than average number of 1196 * unused data blocks. 1197 */ 1198 if (indx == 0 || bap[indx - 1] == 0) 1199 startcg = 1200 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 1201 else 1202 startcg = dtog(fs, bap[indx - 1]) + 1; 1203 startcg %= fs->fs_ncg; 1204 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1205 for (cg = startcg; cg < fs->fs_ncg; cg++) 1206 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1207 fs->fs_cgrotor = cg; 1208 return (cgbase(fs, cg) + fs->fs_frag); 1209 } 1210 for (cg = 0; cg <= startcg; cg++) 1211 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1212 fs->fs_cgrotor = cg; 1213 return (cgbase(fs, cg) + fs->fs_frag); 1214 } 1215 return (0); 1216 } 1217 /* 1218 * We just always try to lay things out contiguously. 1219 */ 1220 return (bap[indx - 1] + fs->fs_frag); 1221 } 1222 1223 /* 1224 * Same as above, but for UFS2 1225 */ 1226 ufs2_daddr_t 1227 ffs_blkpref_ufs2(ip, lbn, indx, bap) 1228 struct inode *ip; 1229 ufs_lbn_t lbn; 1230 int indx; 1231 ufs2_daddr_t *bap; 1232 { 1233 struct fs *fs; 1234 u_int cg; 1235 u_int avgbfree, startcg; 1236 1237 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1238 fs = ip->i_fs; 1239 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1240 if (lbn < NDADDR + NINDIR(fs)) { 1241 cg = ino_to_cg(fs, ip->i_number); 1242 return (cgbase(fs, cg) + fs->fs_frag); 1243 } 1244 /* 1245 * Find a cylinder with greater than average number of 1246 * unused data blocks. 1247 */ 1248 if (indx == 0 || bap[indx - 1] == 0) 1249 startcg = 1250 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 1251 else 1252 startcg = dtog(fs, bap[indx - 1]) + 1; 1253 startcg %= fs->fs_ncg; 1254 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1255 for (cg = startcg; cg < fs->fs_ncg; cg++) 1256 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1257 fs->fs_cgrotor = cg; 1258 return (cgbase(fs, cg) + fs->fs_frag); 1259 } 1260 for (cg = 0; cg <= startcg; cg++) 1261 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1262 fs->fs_cgrotor = cg; 1263 return (cgbase(fs, cg) + fs->fs_frag); 1264 } 1265 return (0); 1266 } 1267 /* 1268 * We just always try to lay things out contiguously. 1269 */ 1270 return (bap[indx - 1] + fs->fs_frag); 1271 } 1272 1273 /* 1274 * Implement the cylinder overflow algorithm. 1275 * 1276 * The policy implemented by this algorithm is: 1277 * 1) allocate the block in its requested cylinder group. 1278 * 2) quadradically rehash on the cylinder group number. 1279 * 3) brute force search for a free block. 1280 * 1281 * Must be called with the UFS lock held. Will release the lock on success 1282 * and return with it held on failure. 1283 */ 1284 /*VARARGS5*/ 1285 static ufs2_daddr_t 1286 ffs_hashalloc(ip, cg, pref, size, rsize, allocator) 1287 struct inode *ip; 1288 u_int cg; 1289 ufs2_daddr_t pref; 1290 int size; /* Search size for data blocks, mode for inodes */ 1291 int rsize; /* Real allocated size. */ 1292 allocfcn_t *allocator; 1293 { 1294 struct fs *fs; 1295 ufs2_daddr_t result; 1296 u_int i, icg = cg; 1297 1298 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1299 #ifdef INVARIANTS 1300 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 1301 panic("ffs_hashalloc: allocation on suspended filesystem"); 1302 #endif 1303 fs = ip->i_fs; 1304 /* 1305 * 1: preferred cylinder group 1306 */ 1307 result = (*allocator)(ip, cg, pref, size, rsize); 1308 if (result) 1309 return (result); 1310 /* 1311 * 2: quadratic rehash 1312 */ 1313 for (i = 1; i < fs->fs_ncg; i *= 2) { 1314 cg += i; 1315 if (cg >= fs->fs_ncg) 1316 cg -= fs->fs_ncg; 1317 result = (*allocator)(ip, cg, 0, size, rsize); 1318 if (result) 1319 return (result); 1320 } 1321 /* 1322 * 3: brute force search 1323 * Note that we start at i == 2, since 0 was checked initially, 1324 * and 1 is always checked in the quadratic rehash. 1325 */ 1326 cg = (icg + 2) % fs->fs_ncg; 1327 for (i = 2; i < fs->fs_ncg; i++) { 1328 result = (*allocator)(ip, cg, 0, size, rsize); 1329 if (result) 1330 return (result); 1331 cg++; 1332 if (cg == fs->fs_ncg) 1333 cg = 0; 1334 } 1335 return (0); 1336 } 1337 1338 /* 1339 * Determine whether a fragment can be extended. 1340 * 1341 * Check to see if the necessary fragments are available, and 1342 * if they are, allocate them. 1343 */ 1344 static ufs2_daddr_t 1345 ffs_fragextend(ip, cg, bprev, osize, nsize) 1346 struct inode *ip; 1347 u_int cg; 1348 ufs2_daddr_t bprev; 1349 int osize, nsize; 1350 { 1351 struct fs *fs; 1352 struct cg *cgp; 1353 struct buf *bp; 1354 struct ufsmount *ump; 1355 int nffree; 1356 long bno; 1357 int frags, bbase; 1358 int i, error; 1359 u_int8_t *blksfree; 1360 1361 ump = ip->i_ump; 1362 fs = ip->i_fs; 1363 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 1364 return (0); 1365 frags = numfrags(fs, nsize); 1366 bbase = fragnum(fs, bprev); 1367 if (bbase > fragnum(fs, (bprev + frags - 1))) { 1368 /* cannot extend across a block boundary */ 1369 return (0); 1370 } 1371 UFS_UNLOCK(ump); 1372 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1373 (int)fs->fs_cgsize, NOCRED, &bp); 1374 if (error) 1375 goto fail; 1376 cgp = (struct cg *)bp->b_data; 1377 if (!cg_chkmagic(cgp)) 1378 goto fail; 1379 bp->b_xflags |= BX_BKGRDWRITE; 1380 cgp->cg_old_time = cgp->cg_time = time_second; 1381 bno = dtogd(fs, bprev); 1382 blksfree = cg_blksfree(cgp); 1383 for (i = numfrags(fs, osize); i < frags; i++) 1384 if (isclr(blksfree, bno + i)) 1385 goto fail; 1386 /* 1387 * the current fragment can be extended 1388 * deduct the count on fragment being extended into 1389 * increase the count on the remaining fragment (if any) 1390 * allocate the extended piece 1391 */ 1392 for (i = frags; i < fs->fs_frag - bbase; i++) 1393 if (isclr(blksfree, bno + i)) 1394 break; 1395 cgp->cg_frsum[i - numfrags(fs, osize)]--; 1396 if (i != frags) 1397 cgp->cg_frsum[i - frags]++; 1398 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) { 1399 clrbit(blksfree, bno + i); 1400 cgp->cg_cs.cs_nffree--; 1401 nffree++; 1402 } 1403 UFS_LOCK(ump); 1404 fs->fs_cstotal.cs_nffree -= nffree; 1405 fs->fs_cs(fs, cg).cs_nffree -= nffree; 1406 fs->fs_fmod = 1; 1407 ACTIVECLEAR(fs, cg); 1408 UFS_UNLOCK(ump); 1409 if (DOINGSOFTDEP(ITOV(ip))) 1410 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev, 1411 frags, numfrags(fs, osize)); 1412 bdwrite(bp); 1413 return (bprev); 1414 1415 fail: 1416 brelse(bp); 1417 UFS_LOCK(ump); 1418 return (0); 1419 1420 } 1421 1422 /* 1423 * Determine whether a block can be allocated. 1424 * 1425 * Check to see if a block of the appropriate size is available, 1426 * and if it is, allocate it. 1427 */ 1428 static ufs2_daddr_t 1429 ffs_alloccg(ip, cg, bpref, size, rsize) 1430 struct inode *ip; 1431 u_int cg; 1432 ufs2_daddr_t bpref; 1433 int size; 1434 int rsize; 1435 { 1436 struct fs *fs; 1437 struct cg *cgp; 1438 struct buf *bp; 1439 struct ufsmount *ump; 1440 ufs1_daddr_t bno; 1441 ufs2_daddr_t blkno; 1442 int i, allocsiz, error, frags; 1443 u_int8_t *blksfree; 1444 1445 ump = ip->i_ump; 1446 fs = ip->i_fs; 1447 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 1448 return (0); 1449 UFS_UNLOCK(ump); 1450 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1451 (int)fs->fs_cgsize, NOCRED, &bp); 1452 if (error) 1453 goto fail; 1454 cgp = (struct cg *)bp->b_data; 1455 if (!cg_chkmagic(cgp) || 1456 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) 1457 goto fail; 1458 bp->b_xflags |= BX_BKGRDWRITE; 1459 cgp->cg_old_time = cgp->cg_time = time_second; 1460 if (size == fs->fs_bsize) { 1461 UFS_LOCK(ump); 1462 blkno = ffs_alloccgblk(ip, bp, bpref, rsize); 1463 ACTIVECLEAR(fs, cg); 1464 UFS_UNLOCK(ump); 1465 bdwrite(bp); 1466 return (blkno); 1467 } 1468 /* 1469 * check to see if any fragments are already available 1470 * allocsiz is the size which will be allocated, hacking 1471 * it down to a smaller size if necessary 1472 */ 1473 blksfree = cg_blksfree(cgp); 1474 frags = numfrags(fs, size); 1475 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 1476 if (cgp->cg_frsum[allocsiz] != 0) 1477 break; 1478 if (allocsiz == fs->fs_frag) { 1479 /* 1480 * no fragments were available, so a block will be 1481 * allocated, and hacked up 1482 */ 1483 if (cgp->cg_cs.cs_nbfree == 0) 1484 goto fail; 1485 UFS_LOCK(ump); 1486 blkno = ffs_alloccgblk(ip, bp, bpref, rsize); 1487 ACTIVECLEAR(fs, cg); 1488 UFS_UNLOCK(ump); 1489 bdwrite(bp); 1490 return (blkno); 1491 } 1492 KASSERT(size == rsize, 1493 ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize)); 1494 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 1495 if (bno < 0) 1496 goto fail; 1497 for (i = 0; i < frags; i++) 1498 clrbit(blksfree, bno + i); 1499 cgp->cg_cs.cs_nffree -= frags; 1500 cgp->cg_frsum[allocsiz]--; 1501 if (frags != allocsiz) 1502 cgp->cg_frsum[allocsiz - frags]++; 1503 UFS_LOCK(ump); 1504 fs->fs_cstotal.cs_nffree -= frags; 1505 fs->fs_cs(fs, cg).cs_nffree -= frags; 1506 fs->fs_fmod = 1; 1507 blkno = cgbase(fs, cg) + bno; 1508 ACTIVECLEAR(fs, cg); 1509 UFS_UNLOCK(ump); 1510 if (DOINGSOFTDEP(ITOV(ip))) 1511 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0); 1512 bdwrite(bp); 1513 return (blkno); 1514 1515 fail: 1516 brelse(bp); 1517 UFS_LOCK(ump); 1518 return (0); 1519 } 1520 1521 /* 1522 * Allocate a block in a cylinder group. 1523 * 1524 * This algorithm implements the following policy: 1525 * 1) allocate the requested block. 1526 * 2) allocate a rotationally optimal block in the same cylinder. 1527 * 3) allocate the next available block on the block rotor for the 1528 * specified cylinder group. 1529 * Note that this routine only allocates fs_bsize blocks; these 1530 * blocks may be fragmented by the routine that allocates them. 1531 */ 1532 static ufs2_daddr_t 1533 ffs_alloccgblk(ip, bp, bpref, size) 1534 struct inode *ip; 1535 struct buf *bp; 1536 ufs2_daddr_t bpref; 1537 int size; 1538 { 1539 struct fs *fs; 1540 struct cg *cgp; 1541 struct ufsmount *ump; 1542 ufs1_daddr_t bno; 1543 ufs2_daddr_t blkno; 1544 u_int8_t *blksfree; 1545 int i; 1546 1547 fs = ip->i_fs; 1548 ump = ip->i_ump; 1549 mtx_assert(UFS_MTX(ump), MA_OWNED); 1550 cgp = (struct cg *)bp->b_data; 1551 blksfree = cg_blksfree(cgp); 1552 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 1553 bpref = cgp->cg_rotor; 1554 } else { 1555 bpref = blknum(fs, bpref); 1556 bno = dtogd(fs, bpref); 1557 /* 1558 * if the requested block is available, use it 1559 */ 1560 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno))) 1561 goto gotit; 1562 } 1563 /* 1564 * Take the next available block in this cylinder group. 1565 */ 1566 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1567 if (bno < 0) 1568 return (0); 1569 cgp->cg_rotor = bno; 1570 gotit: 1571 blkno = fragstoblks(fs, bno); 1572 ffs_clrblock(fs, blksfree, (long)blkno); 1573 ffs_clusteracct(fs, cgp, blkno, -1); 1574 cgp->cg_cs.cs_nbfree--; 1575 fs->fs_cstotal.cs_nbfree--; 1576 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1577 fs->fs_fmod = 1; 1578 blkno = cgbase(fs, cgp->cg_cgx) + bno; 1579 /* 1580 * If the caller didn't want the whole block free the frags here. 1581 */ 1582 size = numfrags(fs, size); 1583 if (size != fs->fs_frag) { 1584 bno = dtogd(fs, blkno); 1585 for (i = size; i < fs->fs_frag; i++) 1586 setbit(blksfree, bno + i); 1587 i = fs->fs_frag - size; 1588 cgp->cg_cs.cs_nffree += i; 1589 fs->fs_cstotal.cs_nffree += i; 1590 fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i; 1591 fs->fs_fmod = 1; 1592 cgp->cg_frsum[i]++; 1593 } 1594 /* XXX Fixme. */ 1595 UFS_UNLOCK(ump); 1596 if (DOINGSOFTDEP(ITOV(ip))) 1597 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, 1598 size, 0); 1599 UFS_LOCK(ump); 1600 return (blkno); 1601 } 1602 1603 /* 1604 * Determine whether a cluster can be allocated. 1605 * 1606 * We do not currently check for optimal rotational layout if there 1607 * are multiple choices in the same cylinder group. Instead we just 1608 * take the first one that we find following bpref. 1609 */ 1610 static ufs2_daddr_t 1611 ffs_clusteralloc(ip, cg, bpref, len, unused) 1612 struct inode *ip; 1613 u_int cg; 1614 ufs2_daddr_t bpref; 1615 int len; 1616 int unused; 1617 { 1618 struct fs *fs; 1619 struct cg *cgp; 1620 struct buf *bp; 1621 struct ufsmount *ump; 1622 int i, run, bit, map, got; 1623 ufs2_daddr_t bno; 1624 u_char *mapp; 1625 int32_t *lp; 1626 u_int8_t *blksfree; 1627 1628 fs = ip->i_fs; 1629 ump = ip->i_ump; 1630 if (fs->fs_maxcluster[cg] < len) 1631 return (0); 1632 UFS_UNLOCK(ump); 1633 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1634 NOCRED, &bp)) 1635 goto fail_lock; 1636 cgp = (struct cg *)bp->b_data; 1637 if (!cg_chkmagic(cgp)) 1638 goto fail_lock; 1639 bp->b_xflags |= BX_BKGRDWRITE; 1640 /* 1641 * Check to see if a cluster of the needed size (or bigger) is 1642 * available in this cylinder group. 1643 */ 1644 lp = &cg_clustersum(cgp)[len]; 1645 for (i = len; i <= fs->fs_contigsumsize; i++) 1646 if (*lp++ > 0) 1647 break; 1648 if (i > fs->fs_contigsumsize) { 1649 /* 1650 * This is the first time looking for a cluster in this 1651 * cylinder group. Update the cluster summary information 1652 * to reflect the true maximum sized cluster so that 1653 * future cluster allocation requests can avoid reading 1654 * the cylinder group map only to find no clusters. 1655 */ 1656 lp = &cg_clustersum(cgp)[len - 1]; 1657 for (i = len - 1; i > 0; i--) 1658 if (*lp-- > 0) 1659 break; 1660 UFS_LOCK(ump); 1661 fs->fs_maxcluster[cg] = i; 1662 goto fail; 1663 } 1664 /* 1665 * Search the cluster map to find a big enough cluster. 1666 * We take the first one that we find, even if it is larger 1667 * than we need as we prefer to get one close to the previous 1668 * block allocation. We do not search before the current 1669 * preference point as we do not want to allocate a block 1670 * that is allocated before the previous one (as we will 1671 * then have to wait for another pass of the elevator 1672 * algorithm before it will be read). We prefer to fail and 1673 * be recalled to try an allocation in the next cylinder group. 1674 */ 1675 if (dtog(fs, bpref) != cg) 1676 bpref = 0; 1677 else 1678 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1679 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1680 map = *mapp++; 1681 bit = 1 << (bpref % NBBY); 1682 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1683 if ((map & bit) == 0) { 1684 run = 0; 1685 } else { 1686 run++; 1687 if (run == len) 1688 break; 1689 } 1690 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1691 bit <<= 1; 1692 } else { 1693 map = *mapp++; 1694 bit = 1; 1695 } 1696 } 1697 if (got >= cgp->cg_nclusterblks) 1698 goto fail_lock; 1699 /* 1700 * Allocate the cluster that we have found. 1701 */ 1702 blksfree = cg_blksfree(cgp); 1703 for (i = 1; i <= len; i++) 1704 if (!ffs_isblock(fs, blksfree, got - run + i)) 1705 panic("ffs_clusteralloc: map mismatch"); 1706 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1); 1707 if (dtog(fs, bno) != cg) 1708 panic("ffs_clusteralloc: allocated out of group"); 1709 len = blkstofrags(fs, len); 1710 UFS_LOCK(ump); 1711 for (i = 0; i < len; i += fs->fs_frag) 1712 if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i) 1713 panic("ffs_clusteralloc: lost block"); 1714 ACTIVECLEAR(fs, cg); 1715 UFS_UNLOCK(ump); 1716 bdwrite(bp); 1717 return (bno); 1718 1719 fail_lock: 1720 UFS_LOCK(ump); 1721 fail: 1722 brelse(bp); 1723 return (0); 1724 } 1725 1726 /* 1727 * Determine whether an inode can be allocated. 1728 * 1729 * Check to see if an inode is available, and if it is, 1730 * allocate it using the following policy: 1731 * 1) allocate the requested inode. 1732 * 2) allocate the next available inode after the requested 1733 * inode in the specified cylinder group. 1734 */ 1735 static ufs2_daddr_t 1736 ffs_nodealloccg(ip, cg, ipref, mode, unused) 1737 struct inode *ip; 1738 u_int cg; 1739 ufs2_daddr_t ipref; 1740 int mode; 1741 int unused; 1742 { 1743 struct fs *fs; 1744 struct cg *cgp; 1745 struct buf *bp, *ibp; 1746 struct ufsmount *ump; 1747 u_int8_t *inosused; 1748 struct ufs2_dinode *dp2; 1749 int error, start, len, loc, map, i; 1750 1751 fs = ip->i_fs; 1752 ump = ip->i_ump; 1753 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1754 return (0); 1755 UFS_UNLOCK(ump); 1756 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1757 (int)fs->fs_cgsize, NOCRED, &bp); 1758 if (error) { 1759 brelse(bp); 1760 UFS_LOCK(ump); 1761 return (0); 1762 } 1763 cgp = (struct cg *)bp->b_data; 1764 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1765 brelse(bp); 1766 UFS_LOCK(ump); 1767 return (0); 1768 } 1769 bp->b_xflags |= BX_BKGRDWRITE; 1770 cgp->cg_old_time = cgp->cg_time = time_second; 1771 inosused = cg_inosused(cgp); 1772 if (ipref) { 1773 ipref %= fs->fs_ipg; 1774 if (isclr(inosused, ipref)) 1775 goto gotit; 1776 } 1777 start = cgp->cg_irotor / NBBY; 1778 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 1779 loc = skpc(0xff, len, &inosused[start]); 1780 if (loc == 0) { 1781 len = start + 1; 1782 start = 0; 1783 loc = skpc(0xff, len, &inosused[0]); 1784 if (loc == 0) { 1785 printf("cg = %d, irotor = %ld, fs = %s\n", 1786 cg, (long)cgp->cg_irotor, fs->fs_fsmnt); 1787 panic("ffs_nodealloccg: map corrupted"); 1788 /* NOTREACHED */ 1789 } 1790 } 1791 i = start + len - loc; 1792 map = inosused[i] ^ 0xff; 1793 if (map == 0) { 1794 printf("fs = %s\n", fs->fs_fsmnt); 1795 panic("ffs_nodealloccg: block not in map"); 1796 } 1797 ipref = i * NBBY + ffs(map) - 1; 1798 cgp->cg_irotor = ipref; 1799 gotit: 1800 /* 1801 * Check to see if we need to initialize more inodes. 1802 */ 1803 ibp = NULL; 1804 if (fs->fs_magic == FS_UFS2_MAGIC && 1805 ipref + INOPB(fs) > cgp->cg_initediblk && 1806 cgp->cg_initediblk < cgp->cg_niblk) { 1807 ibp = getblk(ip->i_devvp, fsbtodb(fs, 1808 ino_to_fsba(fs, cg * fs->fs_ipg + cgp->cg_initediblk)), 1809 (int)fs->fs_bsize, 0, 0, 0); 1810 bzero(ibp->b_data, (int)fs->fs_bsize); 1811 dp2 = (struct ufs2_dinode *)(ibp->b_data); 1812 for (i = 0; i < INOPB(fs); i++) { 1813 dp2->di_gen = arc4random() / 2 + 1; 1814 dp2++; 1815 } 1816 cgp->cg_initediblk += INOPB(fs); 1817 } 1818 UFS_LOCK(ump); 1819 ACTIVECLEAR(fs, cg); 1820 setbit(inosused, ipref); 1821 cgp->cg_cs.cs_nifree--; 1822 fs->fs_cstotal.cs_nifree--; 1823 fs->fs_cs(fs, cg).cs_nifree--; 1824 fs->fs_fmod = 1; 1825 if ((mode & IFMT) == IFDIR) { 1826 cgp->cg_cs.cs_ndir++; 1827 fs->fs_cstotal.cs_ndir++; 1828 fs->fs_cs(fs, cg).cs_ndir++; 1829 } 1830 UFS_UNLOCK(ump); 1831 if (DOINGSOFTDEP(ITOV(ip))) 1832 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref); 1833 bdwrite(bp); 1834 if (ibp != NULL) 1835 bawrite(ibp); 1836 return ((ino_t)(cg * fs->fs_ipg + ipref)); 1837 } 1838 1839 /* 1840 * Free a block or fragment. 1841 * 1842 * The specified block or fragment is placed back in the 1843 * free map. If a fragment is deallocated, a possible 1844 * block reassembly is checked. 1845 */ 1846 static void 1847 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd) 1848 struct ufsmount *ump; 1849 struct fs *fs; 1850 struct vnode *devvp; 1851 ufs2_daddr_t bno; 1852 long size; 1853 ino_t inum; 1854 struct workhead *dephd; 1855 { 1856 struct mount *mp; 1857 struct cg *cgp; 1858 struct buf *bp; 1859 ufs1_daddr_t fragno, cgbno; 1860 ufs2_daddr_t cgblkno; 1861 int i, blk, frags, bbase; 1862 u_int cg; 1863 u_int8_t *blksfree; 1864 struct cdev *dev; 1865 1866 cg = dtog(fs, bno); 1867 if (devvp->v_type == VREG) { 1868 /* devvp is a snapshot */ 1869 dev = VTOI(devvp)->i_devvp->v_rdev; 1870 cgblkno = fragstoblks(fs, cgtod(fs, cg)); 1871 } else { 1872 /* devvp is a normal disk device */ 1873 dev = devvp->v_rdev; 1874 cgblkno = fsbtodb(fs, cgtod(fs, cg)); 1875 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree"); 1876 if ((devvp->v_vflag & VV_COPYONWRITE) && 1877 ffs_snapblkfree(fs, devvp, bno, size, inum)) 1878 return; 1879 } 1880 #ifdef INVARIANTS 1881 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 1882 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 1883 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n", 1884 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize, 1885 size, fs->fs_fsmnt); 1886 panic("ffs_blkfree: bad size"); 1887 } 1888 #endif 1889 if ((u_int)bno >= fs->fs_size) { 1890 printf("bad block %jd, ino %lu\n", (intmax_t)bno, 1891 (u_long)inum); 1892 ffs_fserr(fs, inum, "bad block"); 1893 return; 1894 } 1895 if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) { 1896 brelse(bp); 1897 return; 1898 } 1899 cgp = (struct cg *)bp->b_data; 1900 if (!cg_chkmagic(cgp)) { 1901 brelse(bp); 1902 return; 1903 } 1904 bp->b_xflags |= BX_BKGRDWRITE; 1905 cgp->cg_old_time = cgp->cg_time = time_second; 1906 cgbno = dtogd(fs, bno); 1907 blksfree = cg_blksfree(cgp); 1908 UFS_LOCK(ump); 1909 if (size == fs->fs_bsize) { 1910 fragno = fragstoblks(fs, cgbno); 1911 if (!ffs_isfreeblock(fs, blksfree, fragno)) { 1912 if (devvp->v_type == VREG) { 1913 UFS_UNLOCK(ump); 1914 /* devvp is a snapshot */ 1915 brelse(bp); 1916 return; 1917 } 1918 printf("dev = %s, block = %jd, fs = %s\n", 1919 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt); 1920 panic("ffs_blkfree: freeing free block"); 1921 } 1922 ffs_setblock(fs, blksfree, fragno); 1923 ffs_clusteracct(fs, cgp, fragno, 1); 1924 cgp->cg_cs.cs_nbfree++; 1925 fs->fs_cstotal.cs_nbfree++; 1926 fs->fs_cs(fs, cg).cs_nbfree++; 1927 } else { 1928 bbase = cgbno - fragnum(fs, cgbno); 1929 /* 1930 * decrement the counts associated with the old frags 1931 */ 1932 blk = blkmap(fs, blksfree, bbase); 1933 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1934 /* 1935 * deallocate the fragment 1936 */ 1937 frags = numfrags(fs, size); 1938 for (i = 0; i < frags; i++) { 1939 if (isset(blksfree, cgbno + i)) { 1940 printf("dev = %s, block = %jd, fs = %s\n", 1941 devtoname(dev), (intmax_t)(bno + i), 1942 fs->fs_fsmnt); 1943 panic("ffs_blkfree: freeing free frag"); 1944 } 1945 setbit(blksfree, cgbno + i); 1946 } 1947 cgp->cg_cs.cs_nffree += i; 1948 fs->fs_cstotal.cs_nffree += i; 1949 fs->fs_cs(fs, cg).cs_nffree += i; 1950 /* 1951 * add back in counts associated with the new frags 1952 */ 1953 blk = blkmap(fs, blksfree, bbase); 1954 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1955 /* 1956 * if a complete block has been reassembled, account for it 1957 */ 1958 fragno = fragstoblks(fs, bbase); 1959 if (ffs_isblock(fs, blksfree, fragno)) { 1960 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1961 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1962 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1963 ffs_clusteracct(fs, cgp, fragno, 1); 1964 cgp->cg_cs.cs_nbfree++; 1965 fs->fs_cstotal.cs_nbfree++; 1966 fs->fs_cs(fs, cg).cs_nbfree++; 1967 } 1968 } 1969 fs->fs_fmod = 1; 1970 ACTIVECLEAR(fs, cg); 1971 UFS_UNLOCK(ump); 1972 mp = UFSTOVFS(ump); 1973 if (mp->mnt_flag & MNT_SOFTDEP && devvp->v_type != VREG) 1974 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno, 1975 numfrags(fs, size), dephd); 1976 bdwrite(bp); 1977 } 1978 1979 TASKQUEUE_DEFINE_THREAD(ffs_trim); 1980 1981 struct ffs_blkfree_trim_params { 1982 struct task task; 1983 struct ufsmount *ump; 1984 struct vnode *devvp; 1985 ufs2_daddr_t bno; 1986 long size; 1987 ino_t inum; 1988 struct workhead *pdephd; 1989 struct workhead dephd; 1990 }; 1991 1992 static void 1993 ffs_blkfree_trim_task(ctx, pending) 1994 void *ctx; 1995 int pending; 1996 { 1997 struct ffs_blkfree_trim_params *tp; 1998 1999 tp = ctx; 2000 ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size, 2001 tp->inum, tp->pdephd); 2002 vn_finished_secondary_write(UFSTOVFS(tp->ump)); 2003 free(tp, M_TEMP); 2004 } 2005 2006 static void 2007 ffs_blkfree_trim_completed(bip) 2008 struct bio *bip; 2009 { 2010 struct ffs_blkfree_trim_params *tp; 2011 2012 tp = bip->bio_caller2; 2013 g_destroy_bio(bip); 2014 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp); 2015 taskqueue_enqueue(taskqueue_ffs_trim, &tp->task); 2016 } 2017 2018 void 2019 ffs_blkfree(ump, fs, devvp, bno, size, inum, dephd) 2020 struct ufsmount *ump; 2021 struct fs *fs; 2022 struct vnode *devvp; 2023 ufs2_daddr_t bno; 2024 long size; 2025 ino_t inum; 2026 struct workhead *dephd; 2027 { 2028 struct mount *mp; 2029 struct bio *bip; 2030 struct ffs_blkfree_trim_params *tp; 2031 2032 if (!ump->um_candelete) { 2033 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd); 2034 return; 2035 } 2036 2037 /* 2038 * Postpone the set of the free bit in the cg bitmap until the 2039 * BIO_DELETE is completed. Otherwise, due to disk queue 2040 * reordering, TRIM might be issued after we reuse the block 2041 * and write some new data into it. 2042 */ 2043 tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK); 2044 tp->ump = ump; 2045 tp->devvp = devvp; 2046 tp->bno = bno; 2047 tp->size = size; 2048 tp->inum = inum; 2049 if (dephd != NULL) { 2050 LIST_INIT(&tp->dephd); 2051 LIST_SWAP(dephd, &tp->dephd, worklist, wk_list); 2052 tp->pdephd = &tp->dephd; 2053 } else 2054 tp->pdephd = NULL; 2055 2056 bip = g_alloc_bio(); 2057 bip->bio_cmd = BIO_DELETE; 2058 bip->bio_offset = dbtob(fsbtodb(fs, bno)); 2059 bip->bio_done = ffs_blkfree_trim_completed; 2060 bip->bio_length = size; 2061 bip->bio_caller2 = tp; 2062 2063 mp = UFSTOVFS(ump); 2064 vn_start_secondary_write(NULL, &mp, 0); 2065 g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private); 2066 } 2067 2068 #ifdef INVARIANTS 2069 /* 2070 * Verify allocation of a block or fragment. Returns true if block or 2071 * fragment is allocated, false if it is free. 2072 */ 2073 static int 2074 ffs_checkblk(ip, bno, size) 2075 struct inode *ip; 2076 ufs2_daddr_t bno; 2077 long size; 2078 { 2079 struct fs *fs; 2080 struct cg *cgp; 2081 struct buf *bp; 2082 ufs1_daddr_t cgbno; 2083 int i, error, frags, free; 2084 u_int8_t *blksfree; 2085 2086 fs = ip->i_fs; 2087 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 2088 printf("bsize = %ld, size = %ld, fs = %s\n", 2089 (long)fs->fs_bsize, size, fs->fs_fsmnt); 2090 panic("ffs_checkblk: bad size"); 2091 } 2092 if ((u_int)bno >= fs->fs_size) 2093 panic("ffs_checkblk: bad block %jd", (intmax_t)bno); 2094 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), 2095 (int)fs->fs_cgsize, NOCRED, &bp); 2096 if (error) 2097 panic("ffs_checkblk: cg bread failed"); 2098 cgp = (struct cg *)bp->b_data; 2099 if (!cg_chkmagic(cgp)) 2100 panic("ffs_checkblk: cg magic mismatch"); 2101 bp->b_xflags |= BX_BKGRDWRITE; 2102 blksfree = cg_blksfree(cgp); 2103 cgbno = dtogd(fs, bno); 2104 if (size == fs->fs_bsize) { 2105 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno)); 2106 } else { 2107 frags = numfrags(fs, size); 2108 for (free = 0, i = 0; i < frags; i++) 2109 if (isset(blksfree, cgbno + i)) 2110 free++; 2111 if (free != 0 && free != frags) 2112 panic("ffs_checkblk: partially free fragment"); 2113 } 2114 brelse(bp); 2115 return (!free); 2116 } 2117 #endif /* INVARIANTS */ 2118 2119 /* 2120 * Free an inode. 2121 */ 2122 int 2123 ffs_vfree(pvp, ino, mode) 2124 struct vnode *pvp; 2125 ino_t ino; 2126 int mode; 2127 { 2128 struct inode *ip; 2129 2130 if (DOINGSOFTDEP(pvp)) { 2131 softdep_freefile(pvp, ino, mode); 2132 return (0); 2133 } 2134 ip = VTOI(pvp); 2135 return (ffs_freefile(ip->i_ump, ip->i_fs, ip->i_devvp, ino, mode, 2136 NULL)); 2137 } 2138 2139 /* 2140 * Do the actual free operation. 2141 * The specified inode is placed back in the free map. 2142 */ 2143 int 2144 ffs_freefile(ump, fs, devvp, ino, mode, wkhd) 2145 struct ufsmount *ump; 2146 struct fs *fs; 2147 struct vnode *devvp; 2148 ino_t ino; 2149 int mode; 2150 struct workhead *wkhd; 2151 { 2152 struct cg *cgp; 2153 struct buf *bp; 2154 ufs2_daddr_t cgbno; 2155 int error; 2156 u_int cg; 2157 u_int8_t *inosused; 2158 struct cdev *dev; 2159 2160 cg = ino_to_cg(fs, ino); 2161 if (devvp->v_type == VREG) { 2162 /* devvp is a snapshot */ 2163 dev = VTOI(devvp)->i_devvp->v_rdev; 2164 cgbno = fragstoblks(fs, cgtod(fs, cg)); 2165 } else { 2166 /* devvp is a normal disk device */ 2167 dev = devvp->v_rdev; 2168 cgbno = fsbtodb(fs, cgtod(fs, cg)); 2169 } 2170 if (ino >= fs->fs_ipg * fs->fs_ncg) 2171 panic("ffs_freefile: range: dev = %s, ino = %lu, fs = %s", 2172 devtoname(dev), (u_long)ino, fs->fs_fsmnt); 2173 if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) { 2174 brelse(bp); 2175 return (error); 2176 } 2177 cgp = (struct cg *)bp->b_data; 2178 if (!cg_chkmagic(cgp)) { 2179 brelse(bp); 2180 return (0); 2181 } 2182 bp->b_xflags |= BX_BKGRDWRITE; 2183 cgp->cg_old_time = cgp->cg_time = time_second; 2184 inosused = cg_inosused(cgp); 2185 ino %= fs->fs_ipg; 2186 if (isclr(inosused, ino)) { 2187 printf("dev = %s, ino = %u, fs = %s\n", devtoname(dev), 2188 ino + cg * fs->fs_ipg, fs->fs_fsmnt); 2189 if (fs->fs_ronly == 0) 2190 panic("ffs_freefile: freeing free inode"); 2191 } 2192 clrbit(inosused, ino); 2193 if (ino < cgp->cg_irotor) 2194 cgp->cg_irotor = ino; 2195 cgp->cg_cs.cs_nifree++; 2196 UFS_LOCK(ump); 2197 fs->fs_cstotal.cs_nifree++; 2198 fs->fs_cs(fs, cg).cs_nifree++; 2199 if ((mode & IFMT) == IFDIR) { 2200 cgp->cg_cs.cs_ndir--; 2201 fs->fs_cstotal.cs_ndir--; 2202 fs->fs_cs(fs, cg).cs_ndir--; 2203 } 2204 fs->fs_fmod = 1; 2205 ACTIVECLEAR(fs, cg); 2206 UFS_UNLOCK(ump); 2207 if (UFSTOVFS(ump)->mnt_flag & MNT_SOFTDEP && devvp->v_type != VREG) 2208 softdep_setup_inofree(UFSTOVFS(ump), bp, 2209 ino + cg * fs->fs_ipg, wkhd); 2210 bdwrite(bp); 2211 return (0); 2212 } 2213 2214 /* 2215 * Check to see if a file is free. 2216 */ 2217 int 2218 ffs_checkfreefile(fs, devvp, ino) 2219 struct fs *fs; 2220 struct vnode *devvp; 2221 ino_t ino; 2222 { 2223 struct cg *cgp; 2224 struct buf *bp; 2225 ufs2_daddr_t cgbno; 2226 int ret; 2227 u_int cg; 2228 u_int8_t *inosused; 2229 2230 cg = ino_to_cg(fs, ino); 2231 if (devvp->v_type == VREG) { 2232 /* devvp is a snapshot */ 2233 cgbno = fragstoblks(fs, cgtod(fs, cg)); 2234 } else { 2235 /* devvp is a normal disk device */ 2236 cgbno = fsbtodb(fs, cgtod(fs, cg)); 2237 } 2238 if (ino >= fs->fs_ipg * fs->fs_ncg) 2239 return (1); 2240 if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) { 2241 brelse(bp); 2242 return (1); 2243 } 2244 cgp = (struct cg *)bp->b_data; 2245 if (!cg_chkmagic(cgp)) { 2246 brelse(bp); 2247 return (1); 2248 } 2249 inosused = cg_inosused(cgp); 2250 ino %= fs->fs_ipg; 2251 ret = isclr(inosused, ino); 2252 brelse(bp); 2253 return (ret); 2254 } 2255 2256 /* 2257 * Find a block of the specified size in the specified cylinder group. 2258 * 2259 * It is a panic if a request is made to find a block if none are 2260 * available. 2261 */ 2262 static ufs1_daddr_t 2263 ffs_mapsearch(fs, cgp, bpref, allocsiz) 2264 struct fs *fs; 2265 struct cg *cgp; 2266 ufs2_daddr_t bpref; 2267 int allocsiz; 2268 { 2269 ufs1_daddr_t bno; 2270 int start, len, loc, i; 2271 int blk, field, subfield, pos; 2272 u_int8_t *blksfree; 2273 2274 /* 2275 * find the fragment by searching through the free block 2276 * map for an appropriate bit pattern 2277 */ 2278 if (bpref) 2279 start = dtogd(fs, bpref) / NBBY; 2280 else 2281 start = cgp->cg_frotor / NBBY; 2282 blksfree = cg_blksfree(cgp); 2283 len = howmany(fs->fs_fpg, NBBY) - start; 2284 loc = scanc((u_int)len, (u_char *)&blksfree[start], 2285 fragtbl[fs->fs_frag], 2286 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 2287 if (loc == 0) { 2288 len = start + 1; 2289 start = 0; 2290 loc = scanc((u_int)len, (u_char *)&blksfree[0], 2291 fragtbl[fs->fs_frag], 2292 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 2293 if (loc == 0) { 2294 printf("start = %d, len = %d, fs = %s\n", 2295 start, len, fs->fs_fsmnt); 2296 panic("ffs_alloccg: map corrupted"); 2297 /* NOTREACHED */ 2298 } 2299 } 2300 bno = (start + len - loc) * NBBY; 2301 cgp->cg_frotor = bno; 2302 /* 2303 * found the byte in the map 2304 * sift through the bits to find the selected frag 2305 */ 2306 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 2307 blk = blkmap(fs, blksfree, bno); 2308 blk <<= 1; 2309 field = around[allocsiz]; 2310 subfield = inside[allocsiz]; 2311 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 2312 if ((blk & field) == subfield) 2313 return (bno + pos); 2314 field <<= 1; 2315 subfield <<= 1; 2316 } 2317 } 2318 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 2319 panic("ffs_alloccg: block not in map"); 2320 return (-1); 2321 } 2322 2323 /* 2324 * Fserr prints the name of a filesystem with an error diagnostic. 2325 * 2326 * The form of the error message is: 2327 * fs: error message 2328 */ 2329 static void 2330 ffs_fserr(fs, inum, cp) 2331 struct fs *fs; 2332 ino_t inum; 2333 char *cp; 2334 { 2335 struct thread *td = curthread; /* XXX */ 2336 struct proc *p = td->td_proc; 2337 2338 log(LOG_ERR, "pid %d (%s), uid %d inumber %d on %s: %s\n", 2339 p->p_pid, p->p_comm, td->td_ucred->cr_uid, inum, fs->fs_fsmnt, cp); 2340 } 2341 2342 /* 2343 * This function provides the capability for the fsck program to 2344 * update an active filesystem. Fourteen operations are provided: 2345 * 2346 * adjrefcnt(inode, amt) - adjusts the reference count on the 2347 * specified inode by the specified amount. Under normal 2348 * operation the count should always go down. Decrementing 2349 * the count to zero will cause the inode to be freed. 2350 * adjblkcnt(inode, amt) - adjust the number of blocks used to 2351 * by the specifed amount. 2352 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) - 2353 * adjust the superblock summary. 2354 * freedirs(inode, count) - directory inodes [inode..inode + count - 1] 2355 * are marked as free. Inodes should never have to be marked 2356 * as in use. 2357 * freefiles(inode, count) - file inodes [inode..inode + count - 1] 2358 * are marked as free. Inodes should never have to be marked 2359 * as in use. 2360 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1] 2361 * are marked as free. Blocks should never have to be marked 2362 * as in use. 2363 * setflags(flags, set/clear) - the fs_flags field has the specified 2364 * flags set (second parameter +1) or cleared (second parameter -1). 2365 * setcwd(dirinode) - set the current directory to dirinode in the 2366 * filesystem associated with the snapshot. 2367 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".." 2368 * in the current directory is oldvalue then change it to newvalue. 2369 * unlink(nameptr, oldvalue) - Verify that the inode number associated 2370 * with nameptr in the current directory is oldvalue then unlink it. 2371 */ 2372 2373 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS); 2374 2375 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT, 2376 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count"); 2377 2378 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR, 2379 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count"); 2380 2381 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR, 2382 sysctl_ffs_fsck, "Adjust number of directories"); 2383 2384 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR, 2385 sysctl_ffs_fsck, "Adjust number of free blocks"); 2386 2387 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR, 2388 sysctl_ffs_fsck, "Adjust number of free inodes"); 2389 2390 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR, 2391 sysctl_ffs_fsck, "Adjust number of free frags"); 2392 2393 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR, 2394 sysctl_ffs_fsck, "Adjust number of free clusters"); 2395 2396 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR, 2397 sysctl_ffs_fsck, "Free Range of Directory Inodes"); 2398 2399 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR, 2400 sysctl_ffs_fsck, "Free Range of File Inodes"); 2401 2402 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR, 2403 sysctl_ffs_fsck, "Free Range of Blocks"); 2404 2405 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR, 2406 sysctl_ffs_fsck, "Change Filesystem Flags"); 2407 2408 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR, 2409 sysctl_ffs_fsck, "Set Current Working Directory"); 2410 2411 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR, 2412 sysctl_ffs_fsck, "Change Value of .. Entry"); 2413 2414 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR, 2415 sysctl_ffs_fsck, "Unlink a Duplicate Name"); 2416 2417 #ifdef DEBUG 2418 static int fsckcmds = 0; 2419 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, ""); 2420 #endif /* DEBUG */ 2421 2422 static int 2423 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS) 2424 { 2425 struct thread *td = curthread; 2426 struct fsck_cmd cmd; 2427 struct ufsmount *ump; 2428 struct vnode *vp, *vpold, *dvp, *fdvp; 2429 struct inode *ip, *dp; 2430 struct mount *mp; 2431 struct fs *fs; 2432 ufs2_daddr_t blkno; 2433 long blkcnt, blksize; 2434 struct filedesc *fdp; 2435 struct file *fp; 2436 int vfslocked, filetype, error; 2437 2438 if (req->newlen > sizeof cmd) 2439 return (EBADRPC); 2440 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0) 2441 return (error); 2442 if (cmd.version != FFS_CMD_VERSION) 2443 return (ERPCMISMATCH); 2444 if ((error = getvnode(curproc->p_fd, cmd.handle, &fp)) != 0) 2445 return (error); 2446 vp = fp->f_data; 2447 if (vp->v_type != VREG && vp->v_type != VDIR) { 2448 fdrop(fp, td); 2449 return (EINVAL); 2450 } 2451 vn_start_write(vp, &mp, V_WAIT); 2452 if (mp == 0 || strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) { 2453 vn_finished_write(mp); 2454 fdrop(fp, td); 2455 return (EINVAL); 2456 } 2457 if (mp->mnt_flag & MNT_RDONLY) { 2458 vn_finished_write(mp); 2459 fdrop(fp, td); 2460 return (EROFS); 2461 } 2462 ump = VFSTOUFS(mp); 2463 fs = ump->um_fs; 2464 filetype = IFREG; 2465 2466 switch (oidp->oid_number) { 2467 2468 case FFS_SET_FLAGS: 2469 #ifdef DEBUG 2470 if (fsckcmds) 2471 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname, 2472 cmd.size > 0 ? "set" : "clear"); 2473 #endif /* DEBUG */ 2474 if (cmd.size > 0) 2475 fs->fs_flags |= (long)cmd.value; 2476 else 2477 fs->fs_flags &= ~(long)cmd.value; 2478 break; 2479 2480 case FFS_ADJ_REFCNT: 2481 #ifdef DEBUG 2482 if (fsckcmds) { 2483 printf("%s: adjust inode %jd count by %jd\n", 2484 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2485 (intmax_t)cmd.size); 2486 } 2487 #endif /* DEBUG */ 2488 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2489 break; 2490 ip = VTOI(vp); 2491 ip->i_nlink += cmd.size; 2492 DIP_SET(ip, i_nlink, ip->i_nlink); 2493 ip->i_effnlink += cmd.size; 2494 ip->i_flag |= IN_CHANGE; 2495 if (DOINGSOFTDEP(vp)) 2496 softdep_change_linkcnt(ip); 2497 vput(vp); 2498 break; 2499 2500 case FFS_ADJ_BLKCNT: 2501 #ifdef DEBUG 2502 if (fsckcmds) { 2503 printf("%s: adjust inode %jd block count by %jd\n", 2504 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2505 (intmax_t)cmd.size); 2506 } 2507 #endif /* DEBUG */ 2508 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2509 break; 2510 ip = VTOI(vp); 2511 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size); 2512 ip->i_flag |= IN_CHANGE; 2513 vput(vp); 2514 break; 2515 2516 case FFS_DIR_FREE: 2517 filetype = IFDIR; 2518 /* fall through */ 2519 2520 case FFS_FILE_FREE: 2521 #ifdef DEBUG 2522 if (fsckcmds) { 2523 if (cmd.size == 1) 2524 printf("%s: free %s inode %d\n", 2525 mp->mnt_stat.f_mntonname, 2526 filetype == IFDIR ? "directory" : "file", 2527 (ino_t)cmd.value); 2528 else 2529 printf("%s: free %s inodes %d-%d\n", 2530 mp->mnt_stat.f_mntonname, 2531 filetype == IFDIR ? "directory" : "file", 2532 (ino_t)cmd.value, 2533 (ino_t)(cmd.value + cmd.size - 1)); 2534 } 2535 #endif /* DEBUG */ 2536 while (cmd.size > 0) { 2537 if ((error = ffs_freefile(ump, fs, ump->um_devvp, 2538 cmd.value, filetype, NULL))) 2539 break; 2540 cmd.size -= 1; 2541 cmd.value += 1; 2542 } 2543 break; 2544 2545 case FFS_BLK_FREE: 2546 #ifdef DEBUG 2547 if (fsckcmds) { 2548 if (cmd.size == 1) 2549 printf("%s: free block %jd\n", 2550 mp->mnt_stat.f_mntonname, 2551 (intmax_t)cmd.value); 2552 else 2553 printf("%s: free blocks %jd-%jd\n", 2554 mp->mnt_stat.f_mntonname, 2555 (intmax_t)cmd.value, 2556 (intmax_t)cmd.value + cmd.size - 1); 2557 } 2558 #endif /* DEBUG */ 2559 blkno = cmd.value; 2560 blkcnt = cmd.size; 2561 blksize = fs->fs_frag - (blkno % fs->fs_frag); 2562 while (blkcnt > 0) { 2563 if (blksize > blkcnt) 2564 blksize = blkcnt; 2565 ffs_blkfree(ump, fs, ump->um_devvp, blkno, 2566 blksize * fs->fs_fsize, ROOTINO, NULL); 2567 blkno += blksize; 2568 blkcnt -= blksize; 2569 blksize = fs->fs_frag; 2570 } 2571 break; 2572 2573 /* 2574 * Adjust superblock summaries. fsck(8) is expected to 2575 * submit deltas when necessary. 2576 */ 2577 case FFS_ADJ_NDIR: 2578 #ifdef DEBUG 2579 if (fsckcmds) { 2580 printf("%s: adjust number of directories by %jd\n", 2581 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2582 } 2583 #endif /* DEBUG */ 2584 fs->fs_cstotal.cs_ndir += cmd.value; 2585 break; 2586 2587 case FFS_ADJ_NBFREE: 2588 #ifdef DEBUG 2589 if (fsckcmds) { 2590 printf("%s: adjust number of free blocks by %+jd\n", 2591 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2592 } 2593 #endif /* DEBUG */ 2594 fs->fs_cstotal.cs_nbfree += cmd.value; 2595 break; 2596 2597 case FFS_ADJ_NIFREE: 2598 #ifdef DEBUG 2599 if (fsckcmds) { 2600 printf("%s: adjust number of free inodes by %+jd\n", 2601 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2602 } 2603 #endif /* DEBUG */ 2604 fs->fs_cstotal.cs_nifree += cmd.value; 2605 break; 2606 2607 case FFS_ADJ_NFFREE: 2608 #ifdef DEBUG 2609 if (fsckcmds) { 2610 printf("%s: adjust number of free frags by %+jd\n", 2611 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2612 } 2613 #endif /* DEBUG */ 2614 fs->fs_cstotal.cs_nffree += cmd.value; 2615 break; 2616 2617 case FFS_ADJ_NUMCLUSTERS: 2618 #ifdef DEBUG 2619 if (fsckcmds) { 2620 printf("%s: adjust number of free clusters by %+jd\n", 2621 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2622 } 2623 #endif /* DEBUG */ 2624 fs->fs_cstotal.cs_numclusters += cmd.value; 2625 break; 2626 2627 case FFS_SET_CWD: 2628 #ifdef DEBUG 2629 if (fsckcmds) { 2630 printf("%s: set current directory to inode %jd\n", 2631 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2632 } 2633 #endif /* DEBUG */ 2634 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp))) 2635 break; 2636 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2637 AUDIT_ARG_VNODE1(vp); 2638 if ((error = change_dir(vp, td)) != 0) { 2639 vput(vp); 2640 VFS_UNLOCK_GIANT(vfslocked); 2641 break; 2642 } 2643 VOP_UNLOCK(vp, 0); 2644 VFS_UNLOCK_GIANT(vfslocked); 2645 fdp = td->td_proc->p_fd; 2646 FILEDESC_XLOCK(fdp); 2647 vpold = fdp->fd_cdir; 2648 fdp->fd_cdir = vp; 2649 FILEDESC_XUNLOCK(fdp); 2650 vfslocked = VFS_LOCK_GIANT(vpold->v_mount); 2651 vrele(vpold); 2652 VFS_UNLOCK_GIANT(vfslocked); 2653 break; 2654 2655 case FFS_SET_DOTDOT: 2656 #ifdef DEBUG 2657 if (fsckcmds) { 2658 printf("%s: change .. in cwd from %jd to %jd\n", 2659 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2660 (intmax_t)cmd.size); 2661 } 2662 #endif /* DEBUG */ 2663 /* 2664 * First we have to get and lock the parent directory 2665 * to which ".." points. 2666 */ 2667 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp); 2668 if (error) 2669 break; 2670 /* 2671 * Now we get and lock the child directory containing "..". 2672 */ 2673 FILEDESC_SLOCK(td->td_proc->p_fd); 2674 dvp = td->td_proc->p_fd->fd_cdir; 2675 FILEDESC_SUNLOCK(td->td_proc->p_fd); 2676 if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) { 2677 vput(fdvp); 2678 break; 2679 } 2680 dp = VTOI(dvp); 2681 dp->i_offset = 12; /* XXX mastertemplate.dot_reclen */ 2682 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size, 2683 DT_DIR, 0); 2684 cache_purge(fdvp); 2685 cache_purge(dvp); 2686 vput(dvp); 2687 vput(fdvp); 2688 break; 2689 2690 case FFS_UNLINK: 2691 #ifdef DEBUG 2692 if (fsckcmds) { 2693 char buf[32]; 2694 2695 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL)) 2696 strncpy(buf, "Name_too_long", 32); 2697 printf("%s: unlink %s (inode %jd)\n", 2698 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size); 2699 } 2700 #endif /* DEBUG */ 2701 /* 2702 * kern_unlinkat will do its own start/finish writes and 2703 * they do not nest, so drop ours here. Setting mp == NULL 2704 * indicates that vn_finished_write is not needed down below. 2705 */ 2706 vn_finished_write(mp); 2707 mp = NULL; 2708 error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value, 2709 UIO_USERSPACE, (ino_t)cmd.size); 2710 break; 2711 2712 default: 2713 #ifdef DEBUG 2714 if (fsckcmds) { 2715 printf("Invalid request %d from fsck\n", 2716 oidp->oid_number); 2717 } 2718 #endif /* DEBUG */ 2719 error = EINVAL; 2720 break; 2721 2722 } 2723 fdrop(fp, td); 2724 vn_finished_write(mp); 2725 return (error); 2726 } 2727