1 /*- 2 * Copyright (c) 2002 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Copyright (c) 1982, 1986, 1989, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 4. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 60 */ 61 62 #include <sys/cdefs.h> 63 __FBSDID("$FreeBSD$"); 64 65 #include "opt_quota.h" 66 67 #include <sys/param.h> 68 #include <sys/capsicum.h> 69 #include <sys/systm.h> 70 #include <sys/bio.h> 71 #include <sys/buf.h> 72 #include <sys/conf.h> 73 #include <sys/fcntl.h> 74 #include <sys/file.h> 75 #include <sys/filedesc.h> 76 #include <sys/priv.h> 77 #include <sys/proc.h> 78 #include <sys/vnode.h> 79 #include <sys/mount.h> 80 #include <sys/kernel.h> 81 #include <sys/syscallsubr.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/taskqueue.h> 85 86 #include <security/audit/audit.h> 87 88 #include <geom/geom.h> 89 90 #include <ufs/ufs/dir.h> 91 #include <ufs/ufs/extattr.h> 92 #include <ufs/ufs/quota.h> 93 #include <ufs/ufs/inode.h> 94 #include <ufs/ufs/ufs_extern.h> 95 #include <ufs/ufs/ufsmount.h> 96 97 #include <ufs/ffs/fs.h> 98 #include <ufs/ffs/ffs_extern.h> 99 #include <ufs/ffs/softdep.h> 100 101 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref, 102 int size, int rsize); 103 104 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int); 105 static ufs2_daddr_t 106 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int); 107 static void ffs_blkfree_cg(struct ufsmount *, struct fs *, 108 struct vnode *, ufs2_daddr_t, long, ino_t, 109 struct workhead *); 110 static void ffs_blkfree_trim_completed(struct bio *); 111 static void ffs_blkfree_trim_task(void *ctx, int pending __unused); 112 #ifdef INVARIANTS 113 static int ffs_checkblk(struct inode *, ufs2_daddr_t, long); 114 #endif 115 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int); 116 static ino_t ffs_dirpref(struct inode *); 117 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t, 118 int, int); 119 static ufs2_daddr_t ffs_hashalloc 120 (struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *); 121 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int, 122 int); 123 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int); 124 static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *); 125 static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *); 126 127 /* 128 * Allocate a block in the filesystem. 129 * 130 * The size of the requested block is given, which must be some 131 * multiple of fs_fsize and <= fs_bsize. 132 * A preference may be optionally specified. If a preference is given 133 * the following hierarchy is used to allocate a block: 134 * 1) allocate the requested block. 135 * 2) allocate a rotationally optimal block in the same cylinder. 136 * 3) allocate a block in the same cylinder group. 137 * 4) quadradically rehash into other cylinder groups, until an 138 * available block is located. 139 * If no block preference is given the following hierarchy is used 140 * to allocate a block: 141 * 1) allocate a block in the cylinder group that contains the 142 * inode for the file. 143 * 2) quadradically rehash into other cylinder groups, until an 144 * available block is located. 145 */ 146 int 147 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp) 148 struct inode *ip; 149 ufs2_daddr_t lbn, bpref; 150 int size, flags; 151 struct ucred *cred; 152 ufs2_daddr_t *bnp; 153 { 154 struct fs *fs; 155 struct ufsmount *ump; 156 ufs2_daddr_t bno; 157 u_int cg, reclaimed; 158 static struct timeval lastfail; 159 static int curfail; 160 int64_t delta; 161 #ifdef QUOTA 162 int error; 163 #endif 164 165 *bnp = 0; 166 fs = ip->i_fs; 167 ump = ip->i_ump; 168 mtx_assert(UFS_MTX(ump), MA_OWNED); 169 #ifdef INVARIANTS 170 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 171 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n", 172 devtoname(ip->i_dev), (long)fs->fs_bsize, size, 173 fs->fs_fsmnt); 174 panic("ffs_alloc: bad size"); 175 } 176 if (cred == NOCRED) 177 panic("ffs_alloc: missing credential"); 178 #endif /* INVARIANTS */ 179 reclaimed = 0; 180 retry: 181 #ifdef QUOTA 182 UFS_UNLOCK(ump); 183 error = chkdq(ip, btodb(size), cred, 0); 184 if (error) 185 return (error); 186 UFS_LOCK(ump); 187 #endif 188 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 189 goto nospace; 190 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) && 191 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 192 goto nospace; 193 if (bpref >= fs->fs_size) 194 bpref = 0; 195 if (bpref == 0) 196 cg = ino_to_cg(fs, ip->i_number); 197 else 198 cg = dtog(fs, bpref); 199 bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg); 200 if (bno > 0) { 201 delta = btodb(size); 202 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 203 if (flags & IO_EXT) 204 ip->i_flag |= IN_CHANGE; 205 else 206 ip->i_flag |= IN_CHANGE | IN_UPDATE; 207 *bnp = bno; 208 return (0); 209 } 210 nospace: 211 #ifdef QUOTA 212 UFS_UNLOCK(ump); 213 /* 214 * Restore user's disk quota because allocation failed. 215 */ 216 (void) chkdq(ip, -btodb(size), cred, FORCE); 217 UFS_LOCK(ump); 218 #endif 219 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) { 220 reclaimed = 1; 221 softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT); 222 goto retry; 223 } 224 UFS_UNLOCK(ump); 225 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) { 226 ffs_fserr(fs, ip->i_number, "filesystem full"); 227 uprintf("\n%s: write failed, filesystem is full\n", 228 fs->fs_fsmnt); 229 } 230 return (ENOSPC); 231 } 232 233 /* 234 * Reallocate a fragment to a bigger size 235 * 236 * The number and size of the old block is given, and a preference 237 * and new size is also specified. The allocator attempts to extend 238 * the original block. Failing that, the regular block allocator is 239 * invoked to get an appropriate block. 240 */ 241 int 242 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp) 243 struct inode *ip; 244 ufs2_daddr_t lbprev; 245 ufs2_daddr_t bprev; 246 ufs2_daddr_t bpref; 247 int osize, nsize, flags; 248 struct ucred *cred; 249 struct buf **bpp; 250 { 251 struct vnode *vp; 252 struct fs *fs; 253 struct buf *bp; 254 struct ufsmount *ump; 255 u_int cg, request, reclaimed; 256 int error, gbflags; 257 ufs2_daddr_t bno; 258 static struct timeval lastfail; 259 static int curfail; 260 int64_t delta; 261 262 *bpp = 0; 263 vp = ITOV(ip); 264 fs = ip->i_fs; 265 bp = NULL; 266 ump = ip->i_ump; 267 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0; 268 269 mtx_assert(UFS_MTX(ump), MA_OWNED); 270 #ifdef INVARIANTS 271 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 272 panic("ffs_realloccg: allocation on suspended filesystem"); 273 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 274 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 275 printf( 276 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 277 devtoname(ip->i_dev), (long)fs->fs_bsize, osize, 278 nsize, fs->fs_fsmnt); 279 panic("ffs_realloccg: bad size"); 280 } 281 if (cred == NOCRED) 282 panic("ffs_realloccg: missing credential"); 283 #endif /* INVARIANTS */ 284 reclaimed = 0; 285 retry: 286 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) && 287 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) { 288 goto nospace; 289 } 290 if (bprev == 0) { 291 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n", 292 devtoname(ip->i_dev), (long)fs->fs_bsize, (intmax_t)bprev, 293 fs->fs_fsmnt); 294 panic("ffs_realloccg: bad bprev"); 295 } 296 UFS_UNLOCK(ump); 297 /* 298 * Allocate the extra space in the buffer. 299 */ 300 error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp); 301 if (error) { 302 brelse(bp); 303 return (error); 304 } 305 306 if (bp->b_blkno == bp->b_lblkno) { 307 if (lbprev >= NDADDR) 308 panic("ffs_realloccg: lbprev out of range"); 309 bp->b_blkno = fsbtodb(fs, bprev); 310 } 311 312 #ifdef QUOTA 313 error = chkdq(ip, btodb(nsize - osize), cred, 0); 314 if (error) { 315 brelse(bp); 316 return (error); 317 } 318 #endif 319 /* 320 * Check for extension in the existing location. 321 */ 322 cg = dtog(fs, bprev); 323 UFS_LOCK(ump); 324 bno = ffs_fragextend(ip, cg, bprev, osize, nsize); 325 if (bno) { 326 if (bp->b_blkno != fsbtodb(fs, bno)) 327 panic("ffs_realloccg: bad blockno"); 328 delta = btodb(nsize - osize); 329 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 330 if (flags & IO_EXT) 331 ip->i_flag |= IN_CHANGE; 332 else 333 ip->i_flag |= IN_CHANGE | IN_UPDATE; 334 allocbuf(bp, nsize); 335 bp->b_flags |= B_DONE; 336 vfs_bio_bzero_buf(bp, osize, nsize - osize); 337 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) 338 vfs_bio_set_valid(bp, osize, nsize - osize); 339 *bpp = bp; 340 return (0); 341 } 342 /* 343 * Allocate a new disk location. 344 */ 345 if (bpref >= fs->fs_size) 346 bpref = 0; 347 switch ((int)fs->fs_optim) { 348 case FS_OPTSPACE: 349 /* 350 * Allocate an exact sized fragment. Although this makes 351 * best use of space, we will waste time relocating it if 352 * the file continues to grow. If the fragmentation is 353 * less than half of the minimum free reserve, we choose 354 * to begin optimizing for time. 355 */ 356 request = nsize; 357 if (fs->fs_minfree <= 5 || 358 fs->fs_cstotal.cs_nffree > 359 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100)) 360 break; 361 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 362 fs->fs_fsmnt); 363 fs->fs_optim = FS_OPTTIME; 364 break; 365 case FS_OPTTIME: 366 /* 367 * At this point we have discovered a file that is trying to 368 * grow a small fragment to a larger fragment. To save time, 369 * we allocate a full sized block, then free the unused portion. 370 * If the file continues to grow, the `ffs_fragextend' call 371 * above will be able to grow it in place without further 372 * copying. If aberrant programs cause disk fragmentation to 373 * grow within 2% of the free reserve, we choose to begin 374 * optimizing for space. 375 */ 376 request = fs->fs_bsize; 377 if (fs->fs_cstotal.cs_nffree < 378 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100) 379 break; 380 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 381 fs->fs_fsmnt); 382 fs->fs_optim = FS_OPTSPACE; 383 break; 384 default: 385 printf("dev = %s, optim = %ld, fs = %s\n", 386 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt); 387 panic("ffs_realloccg: bad optim"); 388 /* NOTREACHED */ 389 } 390 bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg); 391 if (bno > 0) { 392 bp->b_blkno = fsbtodb(fs, bno); 393 if (!DOINGSOFTDEP(vp)) 394 ffs_blkfree(ump, fs, ip->i_devvp, bprev, (long)osize, 395 ip->i_number, vp->v_type, NULL); 396 delta = btodb(nsize - osize); 397 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 398 if (flags & IO_EXT) 399 ip->i_flag |= IN_CHANGE; 400 else 401 ip->i_flag |= IN_CHANGE | IN_UPDATE; 402 allocbuf(bp, nsize); 403 bp->b_flags |= B_DONE; 404 vfs_bio_bzero_buf(bp, osize, nsize - osize); 405 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) 406 vfs_bio_set_valid(bp, osize, nsize - osize); 407 *bpp = bp; 408 return (0); 409 } 410 #ifdef QUOTA 411 UFS_UNLOCK(ump); 412 /* 413 * Restore user's disk quota because allocation failed. 414 */ 415 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE); 416 UFS_LOCK(ump); 417 #endif 418 nospace: 419 /* 420 * no space available 421 */ 422 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) { 423 reclaimed = 1; 424 UFS_UNLOCK(ump); 425 if (bp) { 426 brelse(bp); 427 bp = NULL; 428 } 429 UFS_LOCK(ump); 430 softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT); 431 goto retry; 432 } 433 UFS_UNLOCK(ump); 434 if (bp) 435 brelse(bp); 436 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) { 437 ffs_fserr(fs, ip->i_number, "filesystem full"); 438 uprintf("\n%s: write failed, filesystem is full\n", 439 fs->fs_fsmnt); 440 } 441 return (ENOSPC); 442 } 443 444 /* 445 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 446 * 447 * The vnode and an array of buffer pointers for a range of sequential 448 * logical blocks to be made contiguous is given. The allocator attempts 449 * to find a range of sequential blocks starting as close as possible 450 * from the end of the allocation for the logical block immediately 451 * preceding the current range. If successful, the physical block numbers 452 * in the buffer pointers and in the inode are changed to reflect the new 453 * allocation. If unsuccessful, the allocation is left unchanged. The 454 * success in doing the reallocation is returned. Note that the error 455 * return is not reflected back to the user. Rather the previous block 456 * allocation will be used. 457 */ 458 459 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 460 461 static int doasyncfree = 1; 462 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, 463 "do not force synchronous writes when blocks are reallocated"); 464 465 static int doreallocblks = 1; 466 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, 467 "enable block reallocation"); 468 469 static int maxclustersearch = 10; 470 SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch, 471 0, "max number of cylinder group to search for contigous blocks"); 472 473 #ifdef DEBUG 474 static volatile int prtrealloc = 0; 475 #endif 476 477 int 478 ffs_reallocblks(ap) 479 struct vop_reallocblks_args /* { 480 struct vnode *a_vp; 481 struct cluster_save *a_buflist; 482 } */ *ap; 483 { 484 485 if (doreallocblks == 0) 486 return (ENOSPC); 487 /* 488 * We can't wait in softdep prealloc as it may fsync and recurse 489 * here. Instead we simply fail to reallocate blocks if this 490 * rare condition arises. 491 */ 492 if (DOINGSOFTDEP(ap->a_vp)) 493 if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0) 494 return (ENOSPC); 495 if (VTOI(ap->a_vp)->i_ump->um_fstype == UFS1) 496 return (ffs_reallocblks_ufs1(ap)); 497 return (ffs_reallocblks_ufs2(ap)); 498 } 499 500 static int 501 ffs_reallocblks_ufs1(ap) 502 struct vop_reallocblks_args /* { 503 struct vnode *a_vp; 504 struct cluster_save *a_buflist; 505 } */ *ap; 506 { 507 struct fs *fs; 508 struct inode *ip; 509 struct vnode *vp; 510 struct buf *sbp, *ebp; 511 ufs1_daddr_t *bap, *sbap, *ebap = 0; 512 struct cluster_save *buflist; 513 struct ufsmount *ump; 514 ufs_lbn_t start_lbn, end_lbn; 515 ufs1_daddr_t soff, newblk, blkno; 516 ufs2_daddr_t pref; 517 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 518 int i, cg, len, start_lvl, end_lvl, ssize; 519 520 vp = ap->a_vp; 521 ip = VTOI(vp); 522 fs = ip->i_fs; 523 ump = ip->i_ump; 524 /* 525 * If we are not tracking block clusters or if we have less than 4% 526 * free blocks left, then do not attempt to cluster. Running with 527 * less than 5% free block reserve is not recommended and those that 528 * choose to do so do not expect to have good file layout. 529 */ 530 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0) 531 return (ENOSPC); 532 buflist = ap->a_buflist; 533 len = buflist->bs_nchildren; 534 start_lbn = buflist->bs_children[0]->b_lblkno; 535 end_lbn = start_lbn + len - 1; 536 #ifdef INVARIANTS 537 for (i = 0; i < len; i++) 538 if (!ffs_checkblk(ip, 539 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 540 panic("ffs_reallocblks: unallocated block 1"); 541 for (i = 1; i < len; i++) 542 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 543 panic("ffs_reallocblks: non-logical cluster"); 544 blkno = buflist->bs_children[0]->b_blkno; 545 ssize = fsbtodb(fs, fs->fs_frag); 546 for (i = 1; i < len - 1; i++) 547 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 548 panic("ffs_reallocblks: non-physical cluster %d", i); 549 #endif 550 /* 551 * If the cluster crosses the boundary for the first indirect 552 * block, leave space for the indirect block. Indirect blocks 553 * are initially laid out in a position after the last direct 554 * block. Block reallocation would usually destroy locality by 555 * moving the indirect block out of the way to make room for 556 * data blocks if we didn't compensate here. We should also do 557 * this for other indirect block boundaries, but it is only 558 * important for the first one. 559 */ 560 if (start_lbn < NDADDR && end_lbn >= NDADDR) 561 return (ENOSPC); 562 /* 563 * If the latest allocation is in a new cylinder group, assume that 564 * the filesystem has decided to move and do not force it back to 565 * the previous cylinder group. 566 */ 567 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 568 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 569 return (ENOSPC); 570 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 571 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 572 return (ENOSPC); 573 /* 574 * Get the starting offset and block map for the first block. 575 */ 576 if (start_lvl == 0) { 577 sbap = &ip->i_din1->di_db[0]; 578 soff = start_lbn; 579 } else { 580 idp = &start_ap[start_lvl - 1]; 581 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 582 brelse(sbp); 583 return (ENOSPC); 584 } 585 sbap = (ufs1_daddr_t *)sbp->b_data; 586 soff = idp->in_off; 587 } 588 /* 589 * If the block range spans two block maps, get the second map. 590 */ 591 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 592 ssize = len; 593 } else { 594 #ifdef INVARIANTS 595 if (start_lvl > 0 && 596 start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 597 panic("ffs_reallocblk: start == end"); 598 #endif 599 ssize = len - (idp->in_off + 1); 600 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 601 goto fail; 602 ebap = (ufs1_daddr_t *)ebp->b_data; 603 } 604 /* 605 * Find the preferred location for the cluster. If we have not 606 * previously failed at this endeavor, then follow our standard 607 * preference calculation. If we have failed at it, then pick up 608 * where we last ended our search. 609 */ 610 UFS_LOCK(ump); 611 if (ip->i_nextclustercg == -1) 612 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap); 613 else 614 pref = cgdata(fs, ip->i_nextclustercg); 615 /* 616 * Search the block map looking for an allocation of the desired size. 617 * To avoid wasting too much time, we limit the number of cylinder 618 * groups that we will search. 619 */ 620 cg = dtog(fs, pref); 621 for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) { 622 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0) 623 break; 624 cg += 1; 625 if (cg >= fs->fs_ncg) 626 cg = 0; 627 } 628 /* 629 * If we have failed in our search, record where we gave up for 630 * next time. Otherwise, fall back to our usual search citerion. 631 */ 632 if (newblk == 0) { 633 ip->i_nextclustercg = cg; 634 UFS_UNLOCK(ump); 635 goto fail; 636 } 637 ip->i_nextclustercg = -1; 638 /* 639 * We have found a new contiguous block. 640 * 641 * First we have to replace the old block pointers with the new 642 * block pointers in the inode and indirect blocks associated 643 * with the file. 644 */ 645 #ifdef DEBUG 646 if (prtrealloc) 647 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", 648 (uintmax_t)ip->i_number, 649 (intmax_t)start_lbn, (intmax_t)end_lbn); 650 #endif 651 blkno = newblk; 652 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 653 if (i == ssize) { 654 bap = ebap; 655 soff = -i; 656 } 657 #ifdef INVARIANTS 658 if (!ffs_checkblk(ip, 659 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 660 panic("ffs_reallocblks: unallocated block 2"); 661 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 662 panic("ffs_reallocblks: alloc mismatch"); 663 #endif 664 #ifdef DEBUG 665 if (prtrealloc) 666 printf(" %d,", *bap); 667 #endif 668 if (DOINGSOFTDEP(vp)) { 669 if (sbap == &ip->i_din1->di_db[0] && i < ssize) 670 softdep_setup_allocdirect(ip, start_lbn + i, 671 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 672 buflist->bs_children[i]); 673 else 674 softdep_setup_allocindir_page(ip, start_lbn + i, 675 i < ssize ? sbp : ebp, soff + i, blkno, 676 *bap, buflist->bs_children[i]); 677 } 678 *bap++ = blkno; 679 } 680 /* 681 * Next we must write out the modified inode and indirect blocks. 682 * For strict correctness, the writes should be synchronous since 683 * the old block values may have been written to disk. In practise 684 * they are almost never written, but if we are concerned about 685 * strict correctness, the `doasyncfree' flag should be set to zero. 686 * 687 * The test on `doasyncfree' should be changed to test a flag 688 * that shows whether the associated buffers and inodes have 689 * been written. The flag should be set when the cluster is 690 * started and cleared whenever the buffer or inode is flushed. 691 * We can then check below to see if it is set, and do the 692 * synchronous write only when it has been cleared. 693 */ 694 if (sbap != &ip->i_din1->di_db[0]) { 695 if (doasyncfree) 696 bdwrite(sbp); 697 else 698 bwrite(sbp); 699 } else { 700 ip->i_flag |= IN_CHANGE | IN_UPDATE; 701 if (!doasyncfree) 702 ffs_update(vp, 1); 703 } 704 if (ssize < len) { 705 if (doasyncfree) 706 bdwrite(ebp); 707 else 708 bwrite(ebp); 709 } 710 /* 711 * Last, free the old blocks and assign the new blocks to the buffers. 712 */ 713 #ifdef DEBUG 714 if (prtrealloc) 715 printf("\n\tnew:"); 716 #endif 717 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 718 if (!DOINGSOFTDEP(vp)) 719 ffs_blkfree(ump, fs, ip->i_devvp, 720 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 721 fs->fs_bsize, ip->i_number, vp->v_type, NULL); 722 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 723 #ifdef INVARIANTS 724 if (!ffs_checkblk(ip, 725 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 726 panic("ffs_reallocblks: unallocated block 3"); 727 #endif 728 #ifdef DEBUG 729 if (prtrealloc) 730 printf(" %d,", blkno); 731 #endif 732 } 733 #ifdef DEBUG 734 if (prtrealloc) { 735 prtrealloc--; 736 printf("\n"); 737 } 738 #endif 739 return (0); 740 741 fail: 742 if (ssize < len) 743 brelse(ebp); 744 if (sbap != &ip->i_din1->di_db[0]) 745 brelse(sbp); 746 return (ENOSPC); 747 } 748 749 static int 750 ffs_reallocblks_ufs2(ap) 751 struct vop_reallocblks_args /* { 752 struct vnode *a_vp; 753 struct cluster_save *a_buflist; 754 } */ *ap; 755 { 756 struct fs *fs; 757 struct inode *ip; 758 struct vnode *vp; 759 struct buf *sbp, *ebp; 760 ufs2_daddr_t *bap, *sbap, *ebap = 0; 761 struct cluster_save *buflist; 762 struct ufsmount *ump; 763 ufs_lbn_t start_lbn, end_lbn; 764 ufs2_daddr_t soff, newblk, blkno, pref; 765 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 766 int i, cg, len, start_lvl, end_lvl, ssize; 767 768 vp = ap->a_vp; 769 ip = VTOI(vp); 770 fs = ip->i_fs; 771 ump = ip->i_ump; 772 /* 773 * If we are not tracking block clusters or if we have less than 4% 774 * free blocks left, then do not attempt to cluster. Running with 775 * less than 5% free block reserve is not recommended and those that 776 * choose to do so do not expect to have good file layout. 777 */ 778 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0) 779 return (ENOSPC); 780 buflist = ap->a_buflist; 781 len = buflist->bs_nchildren; 782 start_lbn = buflist->bs_children[0]->b_lblkno; 783 end_lbn = start_lbn + len - 1; 784 #ifdef INVARIANTS 785 for (i = 0; i < len; i++) 786 if (!ffs_checkblk(ip, 787 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 788 panic("ffs_reallocblks: unallocated block 1"); 789 for (i = 1; i < len; i++) 790 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 791 panic("ffs_reallocblks: non-logical cluster"); 792 blkno = buflist->bs_children[0]->b_blkno; 793 ssize = fsbtodb(fs, fs->fs_frag); 794 for (i = 1; i < len - 1; i++) 795 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 796 panic("ffs_reallocblks: non-physical cluster %d", i); 797 #endif 798 /* 799 * If the cluster crosses the boundary for the first indirect 800 * block, do not move anything in it. Indirect blocks are 801 * usually initially laid out in a position between the data 802 * blocks. Block reallocation would usually destroy locality by 803 * moving the indirect block out of the way to make room for 804 * data blocks if we didn't compensate here. We should also do 805 * this for other indirect block boundaries, but it is only 806 * important for the first one. 807 */ 808 if (start_lbn < NDADDR && end_lbn >= NDADDR) 809 return (ENOSPC); 810 /* 811 * If the latest allocation is in a new cylinder group, assume that 812 * the filesystem has decided to move and do not force it back to 813 * the previous cylinder group. 814 */ 815 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 816 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 817 return (ENOSPC); 818 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 819 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 820 return (ENOSPC); 821 /* 822 * Get the starting offset and block map for the first block. 823 */ 824 if (start_lvl == 0) { 825 sbap = &ip->i_din2->di_db[0]; 826 soff = start_lbn; 827 } else { 828 idp = &start_ap[start_lvl - 1]; 829 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 830 brelse(sbp); 831 return (ENOSPC); 832 } 833 sbap = (ufs2_daddr_t *)sbp->b_data; 834 soff = idp->in_off; 835 } 836 /* 837 * If the block range spans two block maps, get the second map. 838 */ 839 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 840 ssize = len; 841 } else { 842 #ifdef INVARIANTS 843 if (start_lvl > 0 && 844 start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 845 panic("ffs_reallocblk: start == end"); 846 #endif 847 ssize = len - (idp->in_off + 1); 848 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 849 goto fail; 850 ebap = (ufs2_daddr_t *)ebp->b_data; 851 } 852 /* 853 * Find the preferred location for the cluster. If we have not 854 * previously failed at this endeavor, then follow our standard 855 * preference calculation. If we have failed at it, then pick up 856 * where we last ended our search. 857 */ 858 UFS_LOCK(ump); 859 if (ip->i_nextclustercg == -1) 860 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap); 861 else 862 pref = cgdata(fs, ip->i_nextclustercg); 863 /* 864 * Search the block map looking for an allocation of the desired size. 865 * To avoid wasting too much time, we limit the number of cylinder 866 * groups that we will search. 867 */ 868 cg = dtog(fs, pref); 869 for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) { 870 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0) 871 break; 872 cg += 1; 873 if (cg >= fs->fs_ncg) 874 cg = 0; 875 } 876 /* 877 * If we have failed in our search, record where we gave up for 878 * next time. Otherwise, fall back to our usual search citerion. 879 */ 880 if (newblk == 0) { 881 ip->i_nextclustercg = cg; 882 UFS_UNLOCK(ump); 883 goto fail; 884 } 885 ip->i_nextclustercg = -1; 886 /* 887 * We have found a new contiguous block. 888 * 889 * First we have to replace the old block pointers with the new 890 * block pointers in the inode and indirect blocks associated 891 * with the file. 892 */ 893 #ifdef DEBUG 894 if (prtrealloc) 895 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number, 896 (intmax_t)start_lbn, (intmax_t)end_lbn); 897 #endif 898 blkno = newblk; 899 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 900 if (i == ssize) { 901 bap = ebap; 902 soff = -i; 903 } 904 #ifdef INVARIANTS 905 if (!ffs_checkblk(ip, 906 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 907 panic("ffs_reallocblks: unallocated block 2"); 908 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 909 panic("ffs_reallocblks: alloc mismatch"); 910 #endif 911 #ifdef DEBUG 912 if (prtrealloc) 913 printf(" %jd,", (intmax_t)*bap); 914 #endif 915 if (DOINGSOFTDEP(vp)) { 916 if (sbap == &ip->i_din2->di_db[0] && i < ssize) 917 softdep_setup_allocdirect(ip, start_lbn + i, 918 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 919 buflist->bs_children[i]); 920 else 921 softdep_setup_allocindir_page(ip, start_lbn + i, 922 i < ssize ? sbp : ebp, soff + i, blkno, 923 *bap, buflist->bs_children[i]); 924 } 925 *bap++ = blkno; 926 } 927 /* 928 * Next we must write out the modified inode and indirect blocks. 929 * For strict correctness, the writes should be synchronous since 930 * the old block values may have been written to disk. In practise 931 * they are almost never written, but if we are concerned about 932 * strict correctness, the `doasyncfree' flag should be set to zero. 933 * 934 * The test on `doasyncfree' should be changed to test a flag 935 * that shows whether the associated buffers and inodes have 936 * been written. The flag should be set when the cluster is 937 * started and cleared whenever the buffer or inode is flushed. 938 * We can then check below to see if it is set, and do the 939 * synchronous write only when it has been cleared. 940 */ 941 if (sbap != &ip->i_din2->di_db[0]) { 942 if (doasyncfree) 943 bdwrite(sbp); 944 else 945 bwrite(sbp); 946 } else { 947 ip->i_flag |= IN_CHANGE | IN_UPDATE; 948 if (!doasyncfree) 949 ffs_update(vp, 1); 950 } 951 if (ssize < len) { 952 if (doasyncfree) 953 bdwrite(ebp); 954 else 955 bwrite(ebp); 956 } 957 /* 958 * Last, free the old blocks and assign the new blocks to the buffers. 959 */ 960 #ifdef DEBUG 961 if (prtrealloc) 962 printf("\n\tnew:"); 963 #endif 964 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 965 if (!DOINGSOFTDEP(vp)) 966 ffs_blkfree(ump, fs, ip->i_devvp, 967 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 968 fs->fs_bsize, ip->i_number, vp->v_type, NULL); 969 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 970 #ifdef INVARIANTS 971 if (!ffs_checkblk(ip, 972 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 973 panic("ffs_reallocblks: unallocated block 3"); 974 #endif 975 #ifdef DEBUG 976 if (prtrealloc) 977 printf(" %jd,", (intmax_t)blkno); 978 #endif 979 } 980 #ifdef DEBUG 981 if (prtrealloc) { 982 prtrealloc--; 983 printf("\n"); 984 } 985 #endif 986 return (0); 987 988 fail: 989 if (ssize < len) 990 brelse(ebp); 991 if (sbap != &ip->i_din2->di_db[0]) 992 brelse(sbp); 993 return (ENOSPC); 994 } 995 996 /* 997 * Allocate an inode in the filesystem. 998 * 999 * If allocating a directory, use ffs_dirpref to select the inode. 1000 * If allocating in a directory, the following hierarchy is followed: 1001 * 1) allocate the preferred inode. 1002 * 2) allocate an inode in the same cylinder group. 1003 * 3) quadradically rehash into other cylinder groups, until an 1004 * available inode is located. 1005 * If no inode preference is given the following hierarchy is used 1006 * to allocate an inode: 1007 * 1) allocate an inode in cylinder group 0. 1008 * 2) quadradically rehash into other cylinder groups, until an 1009 * available inode is located. 1010 */ 1011 int 1012 ffs_valloc(pvp, mode, cred, vpp) 1013 struct vnode *pvp; 1014 int mode; 1015 struct ucred *cred; 1016 struct vnode **vpp; 1017 { 1018 struct inode *pip; 1019 struct fs *fs; 1020 struct inode *ip; 1021 struct timespec ts; 1022 struct ufsmount *ump; 1023 ino_t ino, ipref; 1024 u_int cg; 1025 int error, error1, reclaimed; 1026 static struct timeval lastfail; 1027 static int curfail; 1028 1029 *vpp = NULL; 1030 pip = VTOI(pvp); 1031 fs = pip->i_fs; 1032 ump = pip->i_ump; 1033 1034 UFS_LOCK(ump); 1035 reclaimed = 0; 1036 retry: 1037 if (fs->fs_cstotal.cs_nifree == 0) 1038 goto noinodes; 1039 1040 if ((mode & IFMT) == IFDIR) 1041 ipref = ffs_dirpref(pip); 1042 else 1043 ipref = pip->i_number; 1044 if (ipref >= fs->fs_ncg * fs->fs_ipg) 1045 ipref = 0; 1046 cg = ino_to_cg(fs, ipref); 1047 /* 1048 * Track number of dirs created one after another 1049 * in a same cg without intervening by files. 1050 */ 1051 if ((mode & IFMT) == IFDIR) { 1052 if (fs->fs_contigdirs[cg] < 255) 1053 fs->fs_contigdirs[cg]++; 1054 } else { 1055 if (fs->fs_contigdirs[cg] > 0) 1056 fs->fs_contigdirs[cg]--; 1057 } 1058 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0, 1059 (allocfcn_t *)ffs_nodealloccg); 1060 if (ino == 0) 1061 goto noinodes; 1062 error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp); 1063 if (error) { 1064 error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp, 1065 FFSV_FORCEINSMQ); 1066 ffs_vfree(pvp, ino, mode); 1067 if (error1 == 0) { 1068 ip = VTOI(*vpp); 1069 if (ip->i_mode) 1070 goto dup_alloc; 1071 ip->i_flag |= IN_MODIFIED; 1072 vput(*vpp); 1073 } 1074 return (error); 1075 } 1076 ip = VTOI(*vpp); 1077 if (ip->i_mode) { 1078 dup_alloc: 1079 printf("mode = 0%o, inum = %ju, fs = %s\n", 1080 ip->i_mode, (uintmax_t)ip->i_number, fs->fs_fsmnt); 1081 panic("ffs_valloc: dup alloc"); 1082 } 1083 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */ 1084 printf("free inode %s/%lu had %ld blocks\n", 1085 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks)); 1086 DIP_SET(ip, i_blocks, 0); 1087 } 1088 ip->i_flags = 0; 1089 DIP_SET(ip, i_flags, 0); 1090 /* 1091 * Set up a new generation number for this inode. 1092 */ 1093 if (ip->i_gen == 0 || ++ip->i_gen == 0) 1094 ip->i_gen = arc4random() / 2 + 1; 1095 DIP_SET(ip, i_gen, ip->i_gen); 1096 if (fs->fs_magic == FS_UFS2_MAGIC) { 1097 vfs_timestamp(&ts); 1098 ip->i_din2->di_birthtime = ts.tv_sec; 1099 ip->i_din2->di_birthnsec = ts.tv_nsec; 1100 } 1101 ufs_prepare_reclaim(*vpp); 1102 ip->i_flag = 0; 1103 (*vpp)->v_vflag = 0; 1104 (*vpp)->v_type = VNON; 1105 if (fs->fs_magic == FS_UFS2_MAGIC) 1106 (*vpp)->v_op = &ffs_vnodeops2; 1107 else 1108 (*vpp)->v_op = &ffs_vnodeops1; 1109 return (0); 1110 noinodes: 1111 if (reclaimed == 0) { 1112 reclaimed = 1; 1113 softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT); 1114 goto retry; 1115 } 1116 UFS_UNLOCK(ump); 1117 if (ppsratecheck(&lastfail, &curfail, 1)) { 1118 ffs_fserr(fs, pip->i_number, "out of inodes"); 1119 uprintf("\n%s: create/symlink failed, no inodes free\n", 1120 fs->fs_fsmnt); 1121 } 1122 return (ENOSPC); 1123 } 1124 1125 /* 1126 * Find a cylinder group to place a directory. 1127 * 1128 * The policy implemented by this algorithm is to allocate a 1129 * directory inode in the same cylinder group as its parent 1130 * directory, but also to reserve space for its files inodes 1131 * and data. Restrict the number of directories which may be 1132 * allocated one after another in the same cylinder group 1133 * without intervening allocation of files. 1134 * 1135 * If we allocate a first level directory then force allocation 1136 * in another cylinder group. 1137 */ 1138 static ino_t 1139 ffs_dirpref(pip) 1140 struct inode *pip; 1141 { 1142 struct fs *fs; 1143 int cg, prefcg, dirsize, cgsize; 1144 u_int avgifree, avgbfree, avgndir, curdirsize; 1145 u_int minifree, minbfree, maxndir; 1146 u_int mincg, minndir; 1147 u_int maxcontigdirs; 1148 1149 mtx_assert(UFS_MTX(pip->i_ump), MA_OWNED); 1150 fs = pip->i_fs; 1151 1152 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 1153 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1154 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; 1155 1156 /* 1157 * Force allocation in another cg if creating a first level dir. 1158 */ 1159 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref"); 1160 if (ITOV(pip)->v_vflag & VV_ROOT) { 1161 prefcg = arc4random() % fs->fs_ncg; 1162 mincg = prefcg; 1163 minndir = fs->fs_ipg; 1164 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1165 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 1166 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 1167 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1168 mincg = cg; 1169 minndir = fs->fs_cs(fs, cg).cs_ndir; 1170 } 1171 for (cg = 0; cg < prefcg; cg++) 1172 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 1173 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 1174 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1175 mincg = cg; 1176 minndir = fs->fs_cs(fs, cg).cs_ndir; 1177 } 1178 return ((ino_t)(fs->fs_ipg * mincg)); 1179 } 1180 1181 /* 1182 * Count various limits which used for 1183 * optimal allocation of a directory inode. 1184 */ 1185 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); 1186 minifree = avgifree - avgifree / 4; 1187 if (minifree < 1) 1188 minifree = 1; 1189 minbfree = avgbfree - avgbfree / 4; 1190 if (minbfree < 1) 1191 minbfree = 1; 1192 cgsize = fs->fs_fsize * fs->fs_fpg; 1193 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir; 1194 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; 1195 if (dirsize < curdirsize) 1196 dirsize = curdirsize; 1197 if (dirsize <= 0) 1198 maxcontigdirs = 0; /* dirsize overflowed */ 1199 else 1200 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255); 1201 if (fs->fs_avgfpdir > 0) 1202 maxcontigdirs = min(maxcontigdirs, 1203 fs->fs_ipg / fs->fs_avgfpdir); 1204 if (maxcontigdirs == 0) 1205 maxcontigdirs = 1; 1206 1207 /* 1208 * Limit number of dirs in one cg and reserve space for 1209 * regular files, but only if we have no deficit in 1210 * inodes or space. 1211 * 1212 * We are trying to find a suitable cylinder group nearby 1213 * our preferred cylinder group to place a new directory. 1214 * We scan from our preferred cylinder group forward looking 1215 * for a cylinder group that meets our criterion. If we get 1216 * to the final cylinder group and do not find anything, 1217 * we start scanning forwards from the beginning of the 1218 * filesystem. While it might seem sensible to start scanning 1219 * backwards or even to alternate looking forward and backward, 1220 * this approach fails badly when the filesystem is nearly full. 1221 * Specifically, we first search all the areas that have no space 1222 * and finally try the one preceeding that. We repeat this on 1223 * every request and in the case of the final block end up 1224 * searching the entire filesystem. By jumping to the front 1225 * of the filesystem, our future forward searches always look 1226 * in new cylinder groups so finds every possible block after 1227 * one pass over the filesystem. 1228 */ 1229 prefcg = ino_to_cg(fs, pip->i_number); 1230 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1231 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 1232 fs->fs_cs(fs, cg).cs_nifree >= minifree && 1233 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 1234 if (fs->fs_contigdirs[cg] < maxcontigdirs) 1235 return ((ino_t)(fs->fs_ipg * cg)); 1236 } 1237 for (cg = 0; cg < prefcg; cg++) 1238 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 1239 fs->fs_cs(fs, cg).cs_nifree >= minifree && 1240 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 1241 if (fs->fs_contigdirs[cg] < maxcontigdirs) 1242 return ((ino_t)(fs->fs_ipg * cg)); 1243 } 1244 /* 1245 * This is a backstop when we have deficit in space. 1246 */ 1247 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1248 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 1249 return ((ino_t)(fs->fs_ipg * cg)); 1250 for (cg = 0; cg < prefcg; cg++) 1251 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 1252 break; 1253 return ((ino_t)(fs->fs_ipg * cg)); 1254 } 1255 1256 /* 1257 * Select the desired position for the next block in a file. The file is 1258 * logically divided into sections. The first section is composed of the 1259 * direct blocks and the next fs_maxbpg blocks. Each additional section 1260 * contains fs_maxbpg blocks. 1261 * 1262 * If no blocks have been allocated in the first section, the policy is to 1263 * request a block in the same cylinder group as the inode that describes 1264 * the file. The first indirect is allocated immediately following the last 1265 * direct block and the data blocks for the first indirect immediately 1266 * follow it. 1267 * 1268 * If no blocks have been allocated in any other section, the indirect 1269 * block(s) are allocated in the same cylinder group as its inode in an 1270 * area reserved immediately following the inode blocks. The policy for 1271 * the data blocks is to place them in a cylinder group with a greater than 1272 * average number of free blocks. An appropriate cylinder group is found 1273 * by using a rotor that sweeps the cylinder groups. When a new group of 1274 * blocks is needed, the sweep begins in the cylinder group following the 1275 * cylinder group from which the previous allocation was made. The sweep 1276 * continues until a cylinder group with greater than the average number 1277 * of free blocks is found. If the allocation is for the first block in an 1278 * indirect block or the previous block is a hole, then the information on 1279 * the previous allocation is unavailable; here a best guess is made based 1280 * on the logical block number being allocated. 1281 * 1282 * If a section is already partially allocated, the policy is to 1283 * allocate blocks contiguously within the section if possible. 1284 */ 1285 ufs2_daddr_t 1286 ffs_blkpref_ufs1(ip, lbn, indx, bap) 1287 struct inode *ip; 1288 ufs_lbn_t lbn; 1289 int indx; 1290 ufs1_daddr_t *bap; 1291 { 1292 struct fs *fs; 1293 u_int cg, inocg; 1294 u_int avgbfree, startcg; 1295 ufs2_daddr_t pref; 1296 1297 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap")); 1298 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1299 fs = ip->i_fs; 1300 /* 1301 * Allocation of indirect blocks is indicated by passing negative 1302 * values in indx: -1 for single indirect, -2 for double indirect, 1303 * -3 for triple indirect. As noted below, we attempt to allocate 1304 * the first indirect inline with the file data. For all later 1305 * indirect blocks, the data is often allocated in other cylinder 1306 * groups. However to speed random file access and to speed up 1307 * fsck, the filesystem reserves the first fs_metaspace blocks 1308 * (typically half of fs_minfree) of the data area of each cylinder 1309 * group to hold these later indirect blocks. 1310 */ 1311 inocg = ino_to_cg(fs, ip->i_number); 1312 if (indx < 0) { 1313 /* 1314 * Our preference for indirect blocks is the zone at the 1315 * beginning of the inode's cylinder group data area that 1316 * we try to reserve for indirect blocks. 1317 */ 1318 pref = cgmeta(fs, inocg); 1319 /* 1320 * If we are allocating the first indirect block, try to 1321 * place it immediately following the last direct block. 1322 */ 1323 if (indx == -1 && lbn < NDADDR + NINDIR(fs) && 1324 ip->i_din1->di_db[NDADDR - 1] != 0) 1325 pref = ip->i_din1->di_db[NDADDR - 1] + fs->fs_frag; 1326 return (pref); 1327 } 1328 /* 1329 * If we are allocating the first data block in the first indirect 1330 * block and the indirect has been allocated in the data block area, 1331 * try to place it immediately following the indirect block. 1332 */ 1333 if (lbn == NDADDR) { 1334 pref = ip->i_din1->di_ib[0]; 1335 if (pref != 0 && pref >= cgdata(fs, inocg) && 1336 pref < cgbase(fs, inocg + 1)) 1337 return (pref + fs->fs_frag); 1338 } 1339 /* 1340 * If we are at the beginning of a file, or we have already allocated 1341 * the maximum number of blocks per cylinder group, or we do not 1342 * have a block allocated immediately preceeding us, then we need 1343 * to decide where to start allocating new blocks. 1344 */ 1345 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1346 /* 1347 * If we are allocating a directory data block, we want 1348 * to place it in the metadata area. 1349 */ 1350 if ((ip->i_mode & IFMT) == IFDIR) 1351 return (cgmeta(fs, inocg)); 1352 /* 1353 * Until we fill all the direct and all the first indirect's 1354 * blocks, we try to allocate in the data area of the inode's 1355 * cylinder group. 1356 */ 1357 if (lbn < NDADDR + NINDIR(fs)) 1358 return (cgdata(fs, inocg)); 1359 /* 1360 * Find a cylinder with greater than average number of 1361 * unused data blocks. 1362 */ 1363 if (indx == 0 || bap[indx - 1] == 0) 1364 startcg = inocg + lbn / fs->fs_maxbpg; 1365 else 1366 startcg = dtog(fs, bap[indx - 1]) + 1; 1367 startcg %= fs->fs_ncg; 1368 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1369 for (cg = startcg; cg < fs->fs_ncg; cg++) 1370 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1371 fs->fs_cgrotor = cg; 1372 return (cgdata(fs, cg)); 1373 } 1374 for (cg = 0; cg <= startcg; cg++) 1375 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1376 fs->fs_cgrotor = cg; 1377 return (cgdata(fs, cg)); 1378 } 1379 return (0); 1380 } 1381 /* 1382 * Otherwise, we just always try to lay things out contiguously. 1383 */ 1384 return (bap[indx - 1] + fs->fs_frag); 1385 } 1386 1387 /* 1388 * Same as above, but for UFS2 1389 */ 1390 ufs2_daddr_t 1391 ffs_blkpref_ufs2(ip, lbn, indx, bap) 1392 struct inode *ip; 1393 ufs_lbn_t lbn; 1394 int indx; 1395 ufs2_daddr_t *bap; 1396 { 1397 struct fs *fs; 1398 u_int cg, inocg; 1399 u_int avgbfree, startcg; 1400 ufs2_daddr_t pref; 1401 1402 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap")); 1403 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1404 fs = ip->i_fs; 1405 /* 1406 * Allocation of indirect blocks is indicated by passing negative 1407 * values in indx: -1 for single indirect, -2 for double indirect, 1408 * -3 for triple indirect. As noted below, we attempt to allocate 1409 * the first indirect inline with the file data. For all later 1410 * indirect blocks, the data is often allocated in other cylinder 1411 * groups. However to speed random file access and to speed up 1412 * fsck, the filesystem reserves the first fs_metaspace blocks 1413 * (typically half of fs_minfree) of the data area of each cylinder 1414 * group to hold these later indirect blocks. 1415 */ 1416 inocg = ino_to_cg(fs, ip->i_number); 1417 if (indx < 0) { 1418 /* 1419 * Our preference for indirect blocks is the zone at the 1420 * beginning of the inode's cylinder group data area that 1421 * we try to reserve for indirect blocks. 1422 */ 1423 pref = cgmeta(fs, inocg); 1424 /* 1425 * If we are allocating the first indirect block, try to 1426 * place it immediately following the last direct block. 1427 */ 1428 if (indx == -1 && lbn < NDADDR + NINDIR(fs) && 1429 ip->i_din2->di_db[NDADDR - 1] != 0) 1430 pref = ip->i_din2->di_db[NDADDR - 1] + fs->fs_frag; 1431 return (pref); 1432 } 1433 /* 1434 * If we are allocating the first data block in the first indirect 1435 * block and the indirect has been allocated in the data block area, 1436 * try to place it immediately following the indirect block. 1437 */ 1438 if (lbn == NDADDR) { 1439 pref = ip->i_din2->di_ib[0]; 1440 if (pref != 0 && pref >= cgdata(fs, inocg) && 1441 pref < cgbase(fs, inocg + 1)) 1442 return (pref + fs->fs_frag); 1443 } 1444 /* 1445 * If we are at the beginning of a file, or we have already allocated 1446 * the maximum number of blocks per cylinder group, or we do not 1447 * have a block allocated immediately preceeding us, then we need 1448 * to decide where to start allocating new blocks. 1449 */ 1450 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1451 /* 1452 * If we are allocating a directory data block, we want 1453 * to place it in the metadata area. 1454 */ 1455 if ((ip->i_mode & IFMT) == IFDIR) 1456 return (cgmeta(fs, inocg)); 1457 /* 1458 * Until we fill all the direct and all the first indirect's 1459 * blocks, we try to allocate in the data area of the inode's 1460 * cylinder group. 1461 */ 1462 if (lbn < NDADDR + NINDIR(fs)) 1463 return (cgdata(fs, inocg)); 1464 /* 1465 * Find a cylinder with greater than average number of 1466 * unused data blocks. 1467 */ 1468 if (indx == 0 || bap[indx - 1] == 0) 1469 startcg = inocg + lbn / fs->fs_maxbpg; 1470 else 1471 startcg = dtog(fs, bap[indx - 1]) + 1; 1472 startcg %= fs->fs_ncg; 1473 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1474 for (cg = startcg; cg < fs->fs_ncg; cg++) 1475 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1476 fs->fs_cgrotor = cg; 1477 return (cgdata(fs, cg)); 1478 } 1479 for (cg = 0; cg <= startcg; cg++) 1480 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1481 fs->fs_cgrotor = cg; 1482 return (cgdata(fs, cg)); 1483 } 1484 return (0); 1485 } 1486 /* 1487 * Otherwise, we just always try to lay things out contiguously. 1488 */ 1489 return (bap[indx - 1] + fs->fs_frag); 1490 } 1491 1492 /* 1493 * Implement the cylinder overflow algorithm. 1494 * 1495 * The policy implemented by this algorithm is: 1496 * 1) allocate the block in its requested cylinder group. 1497 * 2) quadradically rehash on the cylinder group number. 1498 * 3) brute force search for a free block. 1499 * 1500 * Must be called with the UFS lock held. Will release the lock on success 1501 * and return with it held on failure. 1502 */ 1503 /*VARARGS5*/ 1504 static ufs2_daddr_t 1505 ffs_hashalloc(ip, cg, pref, size, rsize, allocator) 1506 struct inode *ip; 1507 u_int cg; 1508 ufs2_daddr_t pref; 1509 int size; /* Search size for data blocks, mode for inodes */ 1510 int rsize; /* Real allocated size. */ 1511 allocfcn_t *allocator; 1512 { 1513 struct fs *fs; 1514 ufs2_daddr_t result; 1515 u_int i, icg = cg; 1516 1517 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1518 #ifdef INVARIANTS 1519 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 1520 panic("ffs_hashalloc: allocation on suspended filesystem"); 1521 #endif 1522 fs = ip->i_fs; 1523 /* 1524 * 1: preferred cylinder group 1525 */ 1526 result = (*allocator)(ip, cg, pref, size, rsize); 1527 if (result) 1528 return (result); 1529 /* 1530 * 2: quadratic rehash 1531 */ 1532 for (i = 1; i < fs->fs_ncg; i *= 2) { 1533 cg += i; 1534 if (cg >= fs->fs_ncg) 1535 cg -= fs->fs_ncg; 1536 result = (*allocator)(ip, cg, 0, size, rsize); 1537 if (result) 1538 return (result); 1539 } 1540 /* 1541 * 3: brute force search 1542 * Note that we start at i == 2, since 0 was checked initially, 1543 * and 1 is always checked in the quadratic rehash. 1544 */ 1545 cg = (icg + 2) % fs->fs_ncg; 1546 for (i = 2; i < fs->fs_ncg; i++) { 1547 result = (*allocator)(ip, cg, 0, size, rsize); 1548 if (result) 1549 return (result); 1550 cg++; 1551 if (cg == fs->fs_ncg) 1552 cg = 0; 1553 } 1554 return (0); 1555 } 1556 1557 /* 1558 * Determine whether a fragment can be extended. 1559 * 1560 * Check to see if the necessary fragments are available, and 1561 * if they are, allocate them. 1562 */ 1563 static ufs2_daddr_t 1564 ffs_fragextend(ip, cg, bprev, osize, nsize) 1565 struct inode *ip; 1566 u_int cg; 1567 ufs2_daddr_t bprev; 1568 int osize, nsize; 1569 { 1570 struct fs *fs; 1571 struct cg *cgp; 1572 struct buf *bp; 1573 struct ufsmount *ump; 1574 int nffree; 1575 long bno; 1576 int frags, bbase; 1577 int i, error; 1578 u_int8_t *blksfree; 1579 1580 ump = ip->i_ump; 1581 fs = ip->i_fs; 1582 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 1583 return (0); 1584 frags = numfrags(fs, nsize); 1585 bbase = fragnum(fs, bprev); 1586 if (bbase > fragnum(fs, (bprev + frags - 1))) { 1587 /* cannot extend across a block boundary */ 1588 return (0); 1589 } 1590 UFS_UNLOCK(ump); 1591 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1592 (int)fs->fs_cgsize, NOCRED, &bp); 1593 if (error) 1594 goto fail; 1595 cgp = (struct cg *)bp->b_data; 1596 if (!cg_chkmagic(cgp)) 1597 goto fail; 1598 bp->b_xflags |= BX_BKGRDWRITE; 1599 cgp->cg_old_time = cgp->cg_time = time_second; 1600 bno = dtogd(fs, bprev); 1601 blksfree = cg_blksfree(cgp); 1602 for (i = numfrags(fs, osize); i < frags; i++) 1603 if (isclr(blksfree, bno + i)) 1604 goto fail; 1605 /* 1606 * the current fragment can be extended 1607 * deduct the count on fragment being extended into 1608 * increase the count on the remaining fragment (if any) 1609 * allocate the extended piece 1610 */ 1611 for (i = frags; i < fs->fs_frag - bbase; i++) 1612 if (isclr(blksfree, bno + i)) 1613 break; 1614 cgp->cg_frsum[i - numfrags(fs, osize)]--; 1615 if (i != frags) 1616 cgp->cg_frsum[i - frags]++; 1617 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) { 1618 clrbit(blksfree, bno + i); 1619 cgp->cg_cs.cs_nffree--; 1620 nffree++; 1621 } 1622 UFS_LOCK(ump); 1623 fs->fs_cstotal.cs_nffree -= nffree; 1624 fs->fs_cs(fs, cg).cs_nffree -= nffree; 1625 fs->fs_fmod = 1; 1626 ACTIVECLEAR(fs, cg); 1627 UFS_UNLOCK(ump); 1628 if (DOINGSOFTDEP(ITOV(ip))) 1629 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev, 1630 frags, numfrags(fs, osize)); 1631 bdwrite(bp); 1632 return (bprev); 1633 1634 fail: 1635 brelse(bp); 1636 UFS_LOCK(ump); 1637 return (0); 1638 1639 } 1640 1641 /* 1642 * Determine whether a block can be allocated. 1643 * 1644 * Check to see if a block of the appropriate size is available, 1645 * and if it is, allocate it. 1646 */ 1647 static ufs2_daddr_t 1648 ffs_alloccg(ip, cg, bpref, size, rsize) 1649 struct inode *ip; 1650 u_int cg; 1651 ufs2_daddr_t bpref; 1652 int size; 1653 int rsize; 1654 { 1655 struct fs *fs; 1656 struct cg *cgp; 1657 struct buf *bp; 1658 struct ufsmount *ump; 1659 ufs1_daddr_t bno; 1660 ufs2_daddr_t blkno; 1661 int i, allocsiz, error, frags; 1662 u_int8_t *blksfree; 1663 1664 ump = ip->i_ump; 1665 fs = ip->i_fs; 1666 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 1667 return (0); 1668 UFS_UNLOCK(ump); 1669 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1670 (int)fs->fs_cgsize, NOCRED, &bp); 1671 if (error) 1672 goto fail; 1673 cgp = (struct cg *)bp->b_data; 1674 if (!cg_chkmagic(cgp) || 1675 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) 1676 goto fail; 1677 bp->b_xflags |= BX_BKGRDWRITE; 1678 cgp->cg_old_time = cgp->cg_time = time_second; 1679 if (size == fs->fs_bsize) { 1680 UFS_LOCK(ump); 1681 blkno = ffs_alloccgblk(ip, bp, bpref, rsize); 1682 ACTIVECLEAR(fs, cg); 1683 UFS_UNLOCK(ump); 1684 bdwrite(bp); 1685 return (blkno); 1686 } 1687 /* 1688 * check to see if any fragments are already available 1689 * allocsiz is the size which will be allocated, hacking 1690 * it down to a smaller size if necessary 1691 */ 1692 blksfree = cg_blksfree(cgp); 1693 frags = numfrags(fs, size); 1694 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 1695 if (cgp->cg_frsum[allocsiz] != 0) 1696 break; 1697 if (allocsiz == fs->fs_frag) { 1698 /* 1699 * no fragments were available, so a block will be 1700 * allocated, and hacked up 1701 */ 1702 if (cgp->cg_cs.cs_nbfree == 0) 1703 goto fail; 1704 UFS_LOCK(ump); 1705 blkno = ffs_alloccgblk(ip, bp, bpref, rsize); 1706 ACTIVECLEAR(fs, cg); 1707 UFS_UNLOCK(ump); 1708 bdwrite(bp); 1709 return (blkno); 1710 } 1711 KASSERT(size == rsize, 1712 ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize)); 1713 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 1714 if (bno < 0) 1715 goto fail; 1716 for (i = 0; i < frags; i++) 1717 clrbit(blksfree, bno + i); 1718 cgp->cg_cs.cs_nffree -= frags; 1719 cgp->cg_frsum[allocsiz]--; 1720 if (frags != allocsiz) 1721 cgp->cg_frsum[allocsiz - frags]++; 1722 UFS_LOCK(ump); 1723 fs->fs_cstotal.cs_nffree -= frags; 1724 fs->fs_cs(fs, cg).cs_nffree -= frags; 1725 fs->fs_fmod = 1; 1726 blkno = cgbase(fs, cg) + bno; 1727 ACTIVECLEAR(fs, cg); 1728 UFS_UNLOCK(ump); 1729 if (DOINGSOFTDEP(ITOV(ip))) 1730 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0); 1731 bdwrite(bp); 1732 return (blkno); 1733 1734 fail: 1735 brelse(bp); 1736 UFS_LOCK(ump); 1737 return (0); 1738 } 1739 1740 /* 1741 * Allocate a block in a cylinder group. 1742 * 1743 * This algorithm implements the following policy: 1744 * 1) allocate the requested block. 1745 * 2) allocate a rotationally optimal block in the same cylinder. 1746 * 3) allocate the next available block on the block rotor for the 1747 * specified cylinder group. 1748 * Note that this routine only allocates fs_bsize blocks; these 1749 * blocks may be fragmented by the routine that allocates them. 1750 */ 1751 static ufs2_daddr_t 1752 ffs_alloccgblk(ip, bp, bpref, size) 1753 struct inode *ip; 1754 struct buf *bp; 1755 ufs2_daddr_t bpref; 1756 int size; 1757 { 1758 struct fs *fs; 1759 struct cg *cgp; 1760 struct ufsmount *ump; 1761 ufs1_daddr_t bno; 1762 ufs2_daddr_t blkno; 1763 u_int8_t *blksfree; 1764 int i, cgbpref; 1765 1766 fs = ip->i_fs; 1767 ump = ip->i_ump; 1768 mtx_assert(UFS_MTX(ump), MA_OWNED); 1769 cgp = (struct cg *)bp->b_data; 1770 blksfree = cg_blksfree(cgp); 1771 if (bpref == 0) { 1772 bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag; 1773 } else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) { 1774 /* map bpref to correct zone in this cg */ 1775 if (bpref < cgdata(fs, cgbpref)) 1776 bpref = cgmeta(fs, cgp->cg_cgx); 1777 else 1778 bpref = cgdata(fs, cgp->cg_cgx); 1779 } 1780 /* 1781 * if the requested block is available, use it 1782 */ 1783 bno = dtogd(fs, blknum(fs, bpref)); 1784 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno))) 1785 goto gotit; 1786 /* 1787 * Take the next available block in this cylinder group. 1788 */ 1789 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1790 if (bno < 0) 1791 return (0); 1792 /* Update cg_rotor only if allocated from the data zone */ 1793 if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx))) 1794 cgp->cg_rotor = bno; 1795 gotit: 1796 blkno = fragstoblks(fs, bno); 1797 ffs_clrblock(fs, blksfree, (long)blkno); 1798 ffs_clusteracct(fs, cgp, blkno, -1); 1799 cgp->cg_cs.cs_nbfree--; 1800 fs->fs_cstotal.cs_nbfree--; 1801 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1802 fs->fs_fmod = 1; 1803 blkno = cgbase(fs, cgp->cg_cgx) + bno; 1804 /* 1805 * If the caller didn't want the whole block free the frags here. 1806 */ 1807 size = numfrags(fs, size); 1808 if (size != fs->fs_frag) { 1809 bno = dtogd(fs, blkno); 1810 for (i = size; i < fs->fs_frag; i++) 1811 setbit(blksfree, bno + i); 1812 i = fs->fs_frag - size; 1813 cgp->cg_cs.cs_nffree += i; 1814 fs->fs_cstotal.cs_nffree += i; 1815 fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i; 1816 fs->fs_fmod = 1; 1817 cgp->cg_frsum[i]++; 1818 } 1819 /* XXX Fixme. */ 1820 UFS_UNLOCK(ump); 1821 if (DOINGSOFTDEP(ITOV(ip))) 1822 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, 1823 size, 0); 1824 UFS_LOCK(ump); 1825 return (blkno); 1826 } 1827 1828 /* 1829 * Determine whether a cluster can be allocated. 1830 * 1831 * We do not currently check for optimal rotational layout if there 1832 * are multiple choices in the same cylinder group. Instead we just 1833 * take the first one that we find following bpref. 1834 */ 1835 static ufs2_daddr_t 1836 ffs_clusteralloc(ip, cg, bpref, len) 1837 struct inode *ip; 1838 u_int cg; 1839 ufs2_daddr_t bpref; 1840 int len; 1841 { 1842 struct fs *fs; 1843 struct cg *cgp; 1844 struct buf *bp; 1845 struct ufsmount *ump; 1846 int i, run, bit, map, got; 1847 ufs2_daddr_t bno; 1848 u_char *mapp; 1849 int32_t *lp; 1850 u_int8_t *blksfree; 1851 1852 fs = ip->i_fs; 1853 ump = ip->i_ump; 1854 if (fs->fs_maxcluster[cg] < len) 1855 return (0); 1856 UFS_UNLOCK(ump); 1857 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1858 NOCRED, &bp)) 1859 goto fail_lock; 1860 cgp = (struct cg *)bp->b_data; 1861 if (!cg_chkmagic(cgp)) 1862 goto fail_lock; 1863 bp->b_xflags |= BX_BKGRDWRITE; 1864 /* 1865 * Check to see if a cluster of the needed size (or bigger) is 1866 * available in this cylinder group. 1867 */ 1868 lp = &cg_clustersum(cgp)[len]; 1869 for (i = len; i <= fs->fs_contigsumsize; i++) 1870 if (*lp++ > 0) 1871 break; 1872 if (i > fs->fs_contigsumsize) { 1873 /* 1874 * This is the first time looking for a cluster in this 1875 * cylinder group. Update the cluster summary information 1876 * to reflect the true maximum sized cluster so that 1877 * future cluster allocation requests can avoid reading 1878 * the cylinder group map only to find no clusters. 1879 */ 1880 lp = &cg_clustersum(cgp)[len - 1]; 1881 for (i = len - 1; i > 0; i--) 1882 if (*lp-- > 0) 1883 break; 1884 UFS_LOCK(ump); 1885 fs->fs_maxcluster[cg] = i; 1886 goto fail; 1887 } 1888 /* 1889 * Search the cluster map to find a big enough cluster. 1890 * We take the first one that we find, even if it is larger 1891 * than we need as we prefer to get one close to the previous 1892 * block allocation. We do not search before the current 1893 * preference point as we do not want to allocate a block 1894 * that is allocated before the previous one (as we will 1895 * then have to wait for another pass of the elevator 1896 * algorithm before it will be read). We prefer to fail and 1897 * be recalled to try an allocation in the next cylinder group. 1898 */ 1899 if (dtog(fs, bpref) != cg) 1900 bpref = cgdata(fs, cg); 1901 else 1902 bpref = blknum(fs, bpref); 1903 bpref = fragstoblks(fs, dtogd(fs, bpref)); 1904 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1905 map = *mapp++; 1906 bit = 1 << (bpref % NBBY); 1907 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1908 if ((map & bit) == 0) { 1909 run = 0; 1910 } else { 1911 run++; 1912 if (run == len) 1913 break; 1914 } 1915 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1916 bit <<= 1; 1917 } else { 1918 map = *mapp++; 1919 bit = 1; 1920 } 1921 } 1922 if (got >= cgp->cg_nclusterblks) 1923 goto fail_lock; 1924 /* 1925 * Allocate the cluster that we have found. 1926 */ 1927 blksfree = cg_blksfree(cgp); 1928 for (i = 1; i <= len; i++) 1929 if (!ffs_isblock(fs, blksfree, got - run + i)) 1930 panic("ffs_clusteralloc: map mismatch"); 1931 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1); 1932 if (dtog(fs, bno) != cg) 1933 panic("ffs_clusteralloc: allocated out of group"); 1934 len = blkstofrags(fs, len); 1935 UFS_LOCK(ump); 1936 for (i = 0; i < len; i += fs->fs_frag) 1937 if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i) 1938 panic("ffs_clusteralloc: lost block"); 1939 ACTIVECLEAR(fs, cg); 1940 UFS_UNLOCK(ump); 1941 bdwrite(bp); 1942 return (bno); 1943 1944 fail_lock: 1945 UFS_LOCK(ump); 1946 fail: 1947 brelse(bp); 1948 return (0); 1949 } 1950 1951 static inline struct buf * 1952 getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags) 1953 { 1954 struct fs *fs; 1955 1956 fs = ip->i_fs; 1957 return (getblk(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, 1958 cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0, 1959 gbflags)); 1960 } 1961 1962 /* 1963 * Determine whether an inode can be allocated. 1964 * 1965 * Check to see if an inode is available, and if it is, 1966 * allocate it using the following policy: 1967 * 1) allocate the requested inode. 1968 * 2) allocate the next available inode after the requested 1969 * inode in the specified cylinder group. 1970 */ 1971 static ufs2_daddr_t 1972 ffs_nodealloccg(ip, cg, ipref, mode, unused) 1973 struct inode *ip; 1974 u_int cg; 1975 ufs2_daddr_t ipref; 1976 int mode; 1977 int unused; 1978 { 1979 struct fs *fs; 1980 struct cg *cgp; 1981 struct buf *bp, *ibp; 1982 struct ufsmount *ump; 1983 u_int8_t *inosused, *loc; 1984 struct ufs2_dinode *dp2; 1985 int error, start, len, i; 1986 u_int32_t old_initediblk; 1987 1988 fs = ip->i_fs; 1989 ump = ip->i_ump; 1990 check_nifree: 1991 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1992 return (0); 1993 UFS_UNLOCK(ump); 1994 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1995 (int)fs->fs_cgsize, NOCRED, &bp); 1996 if (error) { 1997 brelse(bp); 1998 UFS_LOCK(ump); 1999 return (0); 2000 } 2001 cgp = (struct cg *)bp->b_data; 2002 restart: 2003 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 2004 brelse(bp); 2005 UFS_LOCK(ump); 2006 return (0); 2007 } 2008 bp->b_xflags |= BX_BKGRDWRITE; 2009 inosused = cg_inosused(cgp); 2010 if (ipref) { 2011 ipref %= fs->fs_ipg; 2012 if (isclr(inosused, ipref)) 2013 goto gotit; 2014 } 2015 start = cgp->cg_irotor / NBBY; 2016 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 2017 loc = memcchr(&inosused[start], 0xff, len); 2018 if (loc == NULL) { 2019 len = start + 1; 2020 start = 0; 2021 loc = memcchr(&inosused[start], 0xff, len); 2022 if (loc == NULL) { 2023 printf("cg = %d, irotor = %ld, fs = %s\n", 2024 cg, (long)cgp->cg_irotor, fs->fs_fsmnt); 2025 panic("ffs_nodealloccg: map corrupted"); 2026 /* NOTREACHED */ 2027 } 2028 } 2029 ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1; 2030 gotit: 2031 /* 2032 * Check to see if we need to initialize more inodes. 2033 */ 2034 if (fs->fs_magic == FS_UFS2_MAGIC && 2035 ipref + INOPB(fs) > cgp->cg_initediblk && 2036 cgp->cg_initediblk < cgp->cg_niblk) { 2037 old_initediblk = cgp->cg_initediblk; 2038 2039 /* 2040 * Free the cylinder group lock before writing the 2041 * initialized inode block. Entering the 2042 * babarrierwrite() with the cylinder group lock 2043 * causes lock order violation between the lock and 2044 * snaplk. 2045 * 2046 * Another thread can decide to initialize the same 2047 * inode block, but whichever thread first gets the 2048 * cylinder group lock after writing the newly 2049 * allocated inode block will update it and the other 2050 * will realize that it has lost and leave the 2051 * cylinder group unchanged. 2052 */ 2053 ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT); 2054 brelse(bp); 2055 if (ibp == NULL) { 2056 /* 2057 * The inode block buffer is already owned by 2058 * another thread, which must initialize it. 2059 * Wait on the buffer to allow another thread 2060 * to finish the updates, with dropped cg 2061 * buffer lock, then retry. 2062 */ 2063 ibp = getinobuf(ip, cg, old_initediblk, 0); 2064 brelse(ibp); 2065 UFS_LOCK(ump); 2066 goto check_nifree; 2067 } 2068 bzero(ibp->b_data, (int)fs->fs_bsize); 2069 dp2 = (struct ufs2_dinode *)(ibp->b_data); 2070 for (i = 0; i < INOPB(fs); i++) { 2071 dp2->di_gen = arc4random() / 2 + 1; 2072 dp2++; 2073 } 2074 /* 2075 * Rather than adding a soft updates dependency to ensure 2076 * that the new inode block is written before it is claimed 2077 * by the cylinder group map, we just do a barrier write 2078 * here. The barrier write will ensure that the inode block 2079 * gets written before the updated cylinder group map can be 2080 * written. The barrier write should only slow down bulk 2081 * loading of newly created filesystems. 2082 */ 2083 babarrierwrite(ibp); 2084 2085 /* 2086 * After the inode block is written, try to update the 2087 * cg initediblk pointer. If another thread beat us 2088 * to it, then leave it unchanged as the other thread 2089 * has already set it correctly. 2090 */ 2091 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 2092 (int)fs->fs_cgsize, NOCRED, &bp); 2093 UFS_LOCK(ump); 2094 ACTIVECLEAR(fs, cg); 2095 UFS_UNLOCK(ump); 2096 if (error != 0) { 2097 brelse(bp); 2098 return (error); 2099 } 2100 cgp = (struct cg *)bp->b_data; 2101 if (cgp->cg_initediblk == old_initediblk) 2102 cgp->cg_initediblk += INOPB(fs); 2103 goto restart; 2104 } 2105 cgp->cg_old_time = cgp->cg_time = time_second; 2106 cgp->cg_irotor = ipref; 2107 UFS_LOCK(ump); 2108 ACTIVECLEAR(fs, cg); 2109 setbit(inosused, ipref); 2110 cgp->cg_cs.cs_nifree--; 2111 fs->fs_cstotal.cs_nifree--; 2112 fs->fs_cs(fs, cg).cs_nifree--; 2113 fs->fs_fmod = 1; 2114 if ((mode & IFMT) == IFDIR) { 2115 cgp->cg_cs.cs_ndir++; 2116 fs->fs_cstotal.cs_ndir++; 2117 fs->fs_cs(fs, cg).cs_ndir++; 2118 } 2119 UFS_UNLOCK(ump); 2120 if (DOINGSOFTDEP(ITOV(ip))) 2121 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode); 2122 bdwrite(bp); 2123 return ((ino_t)(cg * fs->fs_ipg + ipref)); 2124 } 2125 2126 /* 2127 * Free a block or fragment. 2128 * 2129 * The specified block or fragment is placed back in the 2130 * free map. If a fragment is deallocated, a possible 2131 * block reassembly is checked. 2132 */ 2133 static void 2134 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd) 2135 struct ufsmount *ump; 2136 struct fs *fs; 2137 struct vnode *devvp; 2138 ufs2_daddr_t bno; 2139 long size; 2140 ino_t inum; 2141 struct workhead *dephd; 2142 { 2143 struct mount *mp; 2144 struct cg *cgp; 2145 struct buf *bp; 2146 ufs1_daddr_t fragno, cgbno; 2147 ufs2_daddr_t cgblkno; 2148 int i, blk, frags, bbase; 2149 u_int cg; 2150 u_int8_t *blksfree; 2151 struct cdev *dev; 2152 2153 cg = dtog(fs, bno); 2154 if (devvp->v_type == VREG) { 2155 /* devvp is a snapshot */ 2156 dev = VTOI(devvp)->i_devvp->v_rdev; 2157 cgblkno = fragstoblks(fs, cgtod(fs, cg)); 2158 } else { 2159 /* devvp is a normal disk device */ 2160 dev = devvp->v_rdev; 2161 cgblkno = fsbtodb(fs, cgtod(fs, cg)); 2162 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg"); 2163 } 2164 #ifdef INVARIANTS 2165 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 2166 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 2167 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n", 2168 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize, 2169 size, fs->fs_fsmnt); 2170 panic("ffs_blkfree_cg: bad size"); 2171 } 2172 #endif 2173 if ((u_int)bno >= fs->fs_size) { 2174 printf("bad block %jd, ino %lu\n", (intmax_t)bno, 2175 (u_long)inum); 2176 ffs_fserr(fs, inum, "bad block"); 2177 return; 2178 } 2179 if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) { 2180 brelse(bp); 2181 return; 2182 } 2183 cgp = (struct cg *)bp->b_data; 2184 if (!cg_chkmagic(cgp)) { 2185 brelse(bp); 2186 return; 2187 } 2188 bp->b_xflags |= BX_BKGRDWRITE; 2189 cgp->cg_old_time = cgp->cg_time = time_second; 2190 cgbno = dtogd(fs, bno); 2191 blksfree = cg_blksfree(cgp); 2192 UFS_LOCK(ump); 2193 if (size == fs->fs_bsize) { 2194 fragno = fragstoblks(fs, cgbno); 2195 if (!ffs_isfreeblock(fs, blksfree, fragno)) { 2196 if (devvp->v_type == VREG) { 2197 UFS_UNLOCK(ump); 2198 /* devvp is a snapshot */ 2199 brelse(bp); 2200 return; 2201 } 2202 printf("dev = %s, block = %jd, fs = %s\n", 2203 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt); 2204 panic("ffs_blkfree_cg: freeing free block"); 2205 } 2206 ffs_setblock(fs, blksfree, fragno); 2207 ffs_clusteracct(fs, cgp, fragno, 1); 2208 cgp->cg_cs.cs_nbfree++; 2209 fs->fs_cstotal.cs_nbfree++; 2210 fs->fs_cs(fs, cg).cs_nbfree++; 2211 } else { 2212 bbase = cgbno - fragnum(fs, cgbno); 2213 /* 2214 * decrement the counts associated with the old frags 2215 */ 2216 blk = blkmap(fs, blksfree, bbase); 2217 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 2218 /* 2219 * deallocate the fragment 2220 */ 2221 frags = numfrags(fs, size); 2222 for (i = 0; i < frags; i++) { 2223 if (isset(blksfree, cgbno + i)) { 2224 printf("dev = %s, block = %jd, fs = %s\n", 2225 devtoname(dev), (intmax_t)(bno + i), 2226 fs->fs_fsmnt); 2227 panic("ffs_blkfree_cg: freeing free frag"); 2228 } 2229 setbit(blksfree, cgbno + i); 2230 } 2231 cgp->cg_cs.cs_nffree += i; 2232 fs->fs_cstotal.cs_nffree += i; 2233 fs->fs_cs(fs, cg).cs_nffree += i; 2234 /* 2235 * add back in counts associated with the new frags 2236 */ 2237 blk = blkmap(fs, blksfree, bbase); 2238 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 2239 /* 2240 * if a complete block has been reassembled, account for it 2241 */ 2242 fragno = fragstoblks(fs, bbase); 2243 if (ffs_isblock(fs, blksfree, fragno)) { 2244 cgp->cg_cs.cs_nffree -= fs->fs_frag; 2245 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 2246 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 2247 ffs_clusteracct(fs, cgp, fragno, 1); 2248 cgp->cg_cs.cs_nbfree++; 2249 fs->fs_cstotal.cs_nbfree++; 2250 fs->fs_cs(fs, cg).cs_nbfree++; 2251 } 2252 } 2253 fs->fs_fmod = 1; 2254 ACTIVECLEAR(fs, cg); 2255 UFS_UNLOCK(ump); 2256 mp = UFSTOVFS(ump); 2257 if (MOUNTEDSOFTDEP(mp) && devvp->v_type != VREG) 2258 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno, 2259 numfrags(fs, size), dephd); 2260 bdwrite(bp); 2261 } 2262 2263 TASKQUEUE_DEFINE_THREAD(ffs_trim); 2264 2265 struct ffs_blkfree_trim_params { 2266 struct task task; 2267 struct ufsmount *ump; 2268 struct vnode *devvp; 2269 ufs2_daddr_t bno; 2270 long size; 2271 ino_t inum; 2272 struct workhead *pdephd; 2273 struct workhead dephd; 2274 }; 2275 2276 static void 2277 ffs_blkfree_trim_task(ctx, pending) 2278 void *ctx; 2279 int pending; 2280 { 2281 struct ffs_blkfree_trim_params *tp; 2282 2283 tp = ctx; 2284 ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size, 2285 tp->inum, tp->pdephd); 2286 vn_finished_secondary_write(UFSTOVFS(tp->ump)); 2287 free(tp, M_TEMP); 2288 } 2289 2290 static void 2291 ffs_blkfree_trim_completed(bip) 2292 struct bio *bip; 2293 { 2294 struct ffs_blkfree_trim_params *tp; 2295 2296 tp = bip->bio_caller2; 2297 g_destroy_bio(bip); 2298 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp); 2299 taskqueue_enqueue(taskqueue_ffs_trim, &tp->task); 2300 } 2301 2302 void 2303 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd) 2304 struct ufsmount *ump; 2305 struct fs *fs; 2306 struct vnode *devvp; 2307 ufs2_daddr_t bno; 2308 long size; 2309 ino_t inum; 2310 enum vtype vtype; 2311 struct workhead *dephd; 2312 { 2313 struct mount *mp; 2314 struct bio *bip; 2315 struct ffs_blkfree_trim_params *tp; 2316 2317 /* 2318 * Check to see if a snapshot wants to claim the block. 2319 * Check that devvp is a normal disk device, not a snapshot, 2320 * it has a snapshot(s) associated with it, and one of the 2321 * snapshots wants to claim the block. 2322 */ 2323 if (devvp->v_type != VREG && 2324 (devvp->v_vflag & VV_COPYONWRITE) && 2325 ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) { 2326 return; 2327 } 2328 /* 2329 * Nothing to delay if TRIM is disabled, or the operation is 2330 * performed on the snapshot. 2331 */ 2332 if (!ump->um_candelete || devvp->v_type == VREG) { 2333 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd); 2334 return; 2335 } 2336 2337 /* 2338 * Postpone the set of the free bit in the cg bitmap until the 2339 * BIO_DELETE is completed. Otherwise, due to disk queue 2340 * reordering, TRIM might be issued after we reuse the block 2341 * and write some new data into it. 2342 */ 2343 tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK); 2344 tp->ump = ump; 2345 tp->devvp = devvp; 2346 tp->bno = bno; 2347 tp->size = size; 2348 tp->inum = inum; 2349 if (dephd != NULL) { 2350 LIST_INIT(&tp->dephd); 2351 LIST_SWAP(dephd, &tp->dephd, worklist, wk_list); 2352 tp->pdephd = &tp->dephd; 2353 } else 2354 tp->pdephd = NULL; 2355 2356 bip = g_alloc_bio(); 2357 bip->bio_cmd = BIO_DELETE; 2358 bip->bio_offset = dbtob(fsbtodb(fs, bno)); 2359 bip->bio_done = ffs_blkfree_trim_completed; 2360 bip->bio_length = size; 2361 bip->bio_caller2 = tp; 2362 2363 mp = UFSTOVFS(ump); 2364 vn_start_secondary_write(NULL, &mp, 0); 2365 g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private); 2366 } 2367 2368 #ifdef INVARIANTS 2369 /* 2370 * Verify allocation of a block or fragment. Returns true if block or 2371 * fragment is allocated, false if it is free. 2372 */ 2373 static int 2374 ffs_checkblk(ip, bno, size) 2375 struct inode *ip; 2376 ufs2_daddr_t bno; 2377 long size; 2378 { 2379 struct fs *fs; 2380 struct cg *cgp; 2381 struct buf *bp; 2382 ufs1_daddr_t cgbno; 2383 int i, error, frags, free; 2384 u_int8_t *blksfree; 2385 2386 fs = ip->i_fs; 2387 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 2388 printf("bsize = %ld, size = %ld, fs = %s\n", 2389 (long)fs->fs_bsize, size, fs->fs_fsmnt); 2390 panic("ffs_checkblk: bad size"); 2391 } 2392 if ((u_int)bno >= fs->fs_size) 2393 panic("ffs_checkblk: bad block %jd", (intmax_t)bno); 2394 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), 2395 (int)fs->fs_cgsize, NOCRED, &bp); 2396 if (error) 2397 panic("ffs_checkblk: cg bread failed"); 2398 cgp = (struct cg *)bp->b_data; 2399 if (!cg_chkmagic(cgp)) 2400 panic("ffs_checkblk: cg magic mismatch"); 2401 bp->b_xflags |= BX_BKGRDWRITE; 2402 blksfree = cg_blksfree(cgp); 2403 cgbno = dtogd(fs, bno); 2404 if (size == fs->fs_bsize) { 2405 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno)); 2406 } else { 2407 frags = numfrags(fs, size); 2408 for (free = 0, i = 0; i < frags; i++) 2409 if (isset(blksfree, cgbno + i)) 2410 free++; 2411 if (free != 0 && free != frags) 2412 panic("ffs_checkblk: partially free fragment"); 2413 } 2414 brelse(bp); 2415 return (!free); 2416 } 2417 #endif /* INVARIANTS */ 2418 2419 /* 2420 * Free an inode. 2421 */ 2422 int 2423 ffs_vfree(pvp, ino, mode) 2424 struct vnode *pvp; 2425 ino_t ino; 2426 int mode; 2427 { 2428 struct inode *ip; 2429 2430 if (DOINGSOFTDEP(pvp)) { 2431 softdep_freefile(pvp, ino, mode); 2432 return (0); 2433 } 2434 ip = VTOI(pvp); 2435 return (ffs_freefile(ip->i_ump, ip->i_fs, ip->i_devvp, ino, mode, 2436 NULL)); 2437 } 2438 2439 /* 2440 * Do the actual free operation. 2441 * The specified inode is placed back in the free map. 2442 */ 2443 int 2444 ffs_freefile(ump, fs, devvp, ino, mode, wkhd) 2445 struct ufsmount *ump; 2446 struct fs *fs; 2447 struct vnode *devvp; 2448 ino_t ino; 2449 int mode; 2450 struct workhead *wkhd; 2451 { 2452 struct cg *cgp; 2453 struct buf *bp; 2454 ufs2_daddr_t cgbno; 2455 int error; 2456 u_int cg; 2457 u_int8_t *inosused; 2458 struct cdev *dev; 2459 2460 cg = ino_to_cg(fs, ino); 2461 if (devvp->v_type == VREG) { 2462 /* devvp is a snapshot */ 2463 dev = VTOI(devvp)->i_devvp->v_rdev; 2464 cgbno = fragstoblks(fs, cgtod(fs, cg)); 2465 } else { 2466 /* devvp is a normal disk device */ 2467 dev = devvp->v_rdev; 2468 cgbno = fsbtodb(fs, cgtod(fs, cg)); 2469 } 2470 if (ino >= fs->fs_ipg * fs->fs_ncg) 2471 panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s", 2472 devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt); 2473 if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) { 2474 brelse(bp); 2475 return (error); 2476 } 2477 cgp = (struct cg *)bp->b_data; 2478 if (!cg_chkmagic(cgp)) { 2479 brelse(bp); 2480 return (0); 2481 } 2482 bp->b_xflags |= BX_BKGRDWRITE; 2483 cgp->cg_old_time = cgp->cg_time = time_second; 2484 inosused = cg_inosused(cgp); 2485 ino %= fs->fs_ipg; 2486 if (isclr(inosused, ino)) { 2487 printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev), 2488 (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt); 2489 if (fs->fs_ronly == 0) 2490 panic("ffs_freefile: freeing free inode"); 2491 } 2492 clrbit(inosused, ino); 2493 if (ino < cgp->cg_irotor) 2494 cgp->cg_irotor = ino; 2495 cgp->cg_cs.cs_nifree++; 2496 UFS_LOCK(ump); 2497 fs->fs_cstotal.cs_nifree++; 2498 fs->fs_cs(fs, cg).cs_nifree++; 2499 if ((mode & IFMT) == IFDIR) { 2500 cgp->cg_cs.cs_ndir--; 2501 fs->fs_cstotal.cs_ndir--; 2502 fs->fs_cs(fs, cg).cs_ndir--; 2503 } 2504 fs->fs_fmod = 1; 2505 ACTIVECLEAR(fs, cg); 2506 UFS_UNLOCK(ump); 2507 if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type != VREG) 2508 softdep_setup_inofree(UFSTOVFS(ump), bp, 2509 ino + cg * fs->fs_ipg, wkhd); 2510 bdwrite(bp); 2511 return (0); 2512 } 2513 2514 /* 2515 * Check to see if a file is free. 2516 */ 2517 int 2518 ffs_checkfreefile(fs, devvp, ino) 2519 struct fs *fs; 2520 struct vnode *devvp; 2521 ino_t ino; 2522 { 2523 struct cg *cgp; 2524 struct buf *bp; 2525 ufs2_daddr_t cgbno; 2526 int ret; 2527 u_int cg; 2528 u_int8_t *inosused; 2529 2530 cg = ino_to_cg(fs, ino); 2531 if (devvp->v_type == VREG) { 2532 /* devvp is a snapshot */ 2533 cgbno = fragstoblks(fs, cgtod(fs, cg)); 2534 } else { 2535 /* devvp is a normal disk device */ 2536 cgbno = fsbtodb(fs, cgtod(fs, cg)); 2537 } 2538 if (ino >= fs->fs_ipg * fs->fs_ncg) 2539 return (1); 2540 if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) { 2541 brelse(bp); 2542 return (1); 2543 } 2544 cgp = (struct cg *)bp->b_data; 2545 if (!cg_chkmagic(cgp)) { 2546 brelse(bp); 2547 return (1); 2548 } 2549 inosused = cg_inosused(cgp); 2550 ino %= fs->fs_ipg; 2551 ret = isclr(inosused, ino); 2552 brelse(bp); 2553 return (ret); 2554 } 2555 2556 /* 2557 * Find a block of the specified size in the specified cylinder group. 2558 * 2559 * It is a panic if a request is made to find a block if none are 2560 * available. 2561 */ 2562 static ufs1_daddr_t 2563 ffs_mapsearch(fs, cgp, bpref, allocsiz) 2564 struct fs *fs; 2565 struct cg *cgp; 2566 ufs2_daddr_t bpref; 2567 int allocsiz; 2568 { 2569 ufs1_daddr_t bno; 2570 int start, len, loc, i; 2571 int blk, field, subfield, pos; 2572 u_int8_t *blksfree; 2573 2574 /* 2575 * find the fragment by searching through the free block 2576 * map for an appropriate bit pattern 2577 */ 2578 if (bpref) 2579 start = dtogd(fs, bpref) / NBBY; 2580 else 2581 start = cgp->cg_frotor / NBBY; 2582 blksfree = cg_blksfree(cgp); 2583 len = howmany(fs->fs_fpg, NBBY) - start; 2584 loc = scanc((u_int)len, (u_char *)&blksfree[start], 2585 fragtbl[fs->fs_frag], 2586 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 2587 if (loc == 0) { 2588 len = start + 1; 2589 start = 0; 2590 loc = scanc((u_int)len, (u_char *)&blksfree[0], 2591 fragtbl[fs->fs_frag], 2592 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 2593 if (loc == 0) { 2594 printf("start = %d, len = %d, fs = %s\n", 2595 start, len, fs->fs_fsmnt); 2596 panic("ffs_alloccg: map corrupted"); 2597 /* NOTREACHED */ 2598 } 2599 } 2600 bno = (start + len - loc) * NBBY; 2601 cgp->cg_frotor = bno; 2602 /* 2603 * found the byte in the map 2604 * sift through the bits to find the selected frag 2605 */ 2606 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 2607 blk = blkmap(fs, blksfree, bno); 2608 blk <<= 1; 2609 field = around[allocsiz]; 2610 subfield = inside[allocsiz]; 2611 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 2612 if ((blk & field) == subfield) 2613 return (bno + pos); 2614 field <<= 1; 2615 subfield <<= 1; 2616 } 2617 } 2618 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 2619 panic("ffs_alloccg: block not in map"); 2620 return (-1); 2621 } 2622 2623 /* 2624 * Fserr prints the name of a filesystem with an error diagnostic. 2625 * 2626 * The form of the error message is: 2627 * fs: error message 2628 */ 2629 void 2630 ffs_fserr(fs, inum, cp) 2631 struct fs *fs; 2632 ino_t inum; 2633 char *cp; 2634 { 2635 struct thread *td = curthread; /* XXX */ 2636 struct proc *p = td->td_proc; 2637 2638 log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n", 2639 p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum, 2640 fs->fs_fsmnt, cp); 2641 } 2642 2643 /* 2644 * This function provides the capability for the fsck program to 2645 * update an active filesystem. Fourteen operations are provided: 2646 * 2647 * adjrefcnt(inode, amt) - adjusts the reference count on the 2648 * specified inode by the specified amount. Under normal 2649 * operation the count should always go down. Decrementing 2650 * the count to zero will cause the inode to be freed. 2651 * adjblkcnt(inode, amt) - adjust the number of blocks used by the 2652 * inode by the specified amount. 2653 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) - 2654 * adjust the superblock summary. 2655 * freedirs(inode, count) - directory inodes [inode..inode + count - 1] 2656 * are marked as free. Inodes should never have to be marked 2657 * as in use. 2658 * freefiles(inode, count) - file inodes [inode..inode + count - 1] 2659 * are marked as free. Inodes should never have to be marked 2660 * as in use. 2661 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1] 2662 * are marked as free. Blocks should never have to be marked 2663 * as in use. 2664 * setflags(flags, set/clear) - the fs_flags field has the specified 2665 * flags set (second parameter +1) or cleared (second parameter -1). 2666 * setcwd(dirinode) - set the current directory to dirinode in the 2667 * filesystem associated with the snapshot. 2668 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".." 2669 * in the current directory is oldvalue then change it to newvalue. 2670 * unlink(nameptr, oldvalue) - Verify that the inode number associated 2671 * with nameptr in the current directory is oldvalue then unlink it. 2672 * 2673 * The following functions may only be used on a quiescent filesystem 2674 * by the soft updates journal. They are not safe to be run on an active 2675 * filesystem. 2676 * 2677 * setinode(inode, dip) - the specified disk inode is replaced with the 2678 * contents pointed to by dip. 2679 * setbufoutput(fd, flags) - output associated with the specified file 2680 * descriptor (which must reference the character device supporting 2681 * the filesystem) switches from using physio to running through the 2682 * buffer cache when flags is set to 1. The descriptor reverts to 2683 * physio for output when flags is set to zero. 2684 */ 2685 2686 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS); 2687 2688 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT, 2689 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count"); 2690 2691 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR, 2692 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count"); 2693 2694 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR, 2695 sysctl_ffs_fsck, "Adjust number of directories"); 2696 2697 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR, 2698 sysctl_ffs_fsck, "Adjust number of free blocks"); 2699 2700 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR, 2701 sysctl_ffs_fsck, "Adjust number of free inodes"); 2702 2703 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR, 2704 sysctl_ffs_fsck, "Adjust number of free frags"); 2705 2706 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR, 2707 sysctl_ffs_fsck, "Adjust number of free clusters"); 2708 2709 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR, 2710 sysctl_ffs_fsck, "Free Range of Directory Inodes"); 2711 2712 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR, 2713 sysctl_ffs_fsck, "Free Range of File Inodes"); 2714 2715 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR, 2716 sysctl_ffs_fsck, "Free Range of Blocks"); 2717 2718 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR, 2719 sysctl_ffs_fsck, "Change Filesystem Flags"); 2720 2721 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR, 2722 sysctl_ffs_fsck, "Set Current Working Directory"); 2723 2724 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR, 2725 sysctl_ffs_fsck, "Change Value of .. Entry"); 2726 2727 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR, 2728 sysctl_ffs_fsck, "Unlink a Duplicate Name"); 2729 2730 static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR, 2731 sysctl_ffs_fsck, "Update an On-Disk Inode"); 2732 2733 static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR, 2734 sysctl_ffs_fsck, "Set Buffered Writing for Descriptor"); 2735 2736 #define DEBUG 1 2737 #ifdef DEBUG 2738 static int fsckcmds = 0; 2739 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, ""); 2740 #endif /* DEBUG */ 2741 2742 static int buffered_write(struct file *, struct uio *, struct ucred *, 2743 int, struct thread *); 2744 2745 static int 2746 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS) 2747 { 2748 struct thread *td = curthread; 2749 struct fsck_cmd cmd; 2750 struct ufsmount *ump; 2751 struct vnode *vp, *dvp, *fdvp; 2752 struct inode *ip, *dp; 2753 struct mount *mp; 2754 struct fs *fs; 2755 ufs2_daddr_t blkno; 2756 long blkcnt, blksize; 2757 struct file *fp, *vfp; 2758 cap_rights_t rights; 2759 int filetype, error; 2760 static struct fileops *origops, bufferedops; 2761 2762 if (req->newlen > sizeof cmd) 2763 return (EBADRPC); 2764 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0) 2765 return (error); 2766 if (cmd.version != FFS_CMD_VERSION) 2767 return (ERPCMISMATCH); 2768 if ((error = getvnode(td, cmd.handle, 2769 cap_rights_init(&rights, CAP_FSCK), &fp)) != 0) 2770 return (error); 2771 vp = fp->f_data; 2772 if (vp->v_type != VREG && vp->v_type != VDIR) { 2773 fdrop(fp, td); 2774 return (EINVAL); 2775 } 2776 vn_start_write(vp, &mp, V_WAIT); 2777 if (mp == 0 || strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) { 2778 vn_finished_write(mp); 2779 fdrop(fp, td); 2780 return (EINVAL); 2781 } 2782 ump = VFSTOUFS(mp); 2783 if ((mp->mnt_flag & MNT_RDONLY) && 2784 ump->um_fsckpid != td->td_proc->p_pid) { 2785 vn_finished_write(mp); 2786 fdrop(fp, td); 2787 return (EROFS); 2788 } 2789 fs = ump->um_fs; 2790 filetype = IFREG; 2791 2792 switch (oidp->oid_number) { 2793 2794 case FFS_SET_FLAGS: 2795 #ifdef DEBUG 2796 if (fsckcmds) 2797 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname, 2798 cmd.size > 0 ? "set" : "clear"); 2799 #endif /* DEBUG */ 2800 if (cmd.size > 0) 2801 fs->fs_flags |= (long)cmd.value; 2802 else 2803 fs->fs_flags &= ~(long)cmd.value; 2804 break; 2805 2806 case FFS_ADJ_REFCNT: 2807 #ifdef DEBUG 2808 if (fsckcmds) { 2809 printf("%s: adjust inode %jd link count by %jd\n", 2810 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2811 (intmax_t)cmd.size); 2812 } 2813 #endif /* DEBUG */ 2814 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2815 break; 2816 ip = VTOI(vp); 2817 ip->i_nlink += cmd.size; 2818 DIP_SET(ip, i_nlink, ip->i_nlink); 2819 ip->i_effnlink += cmd.size; 2820 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 2821 error = ffs_update(vp, 1); 2822 if (DOINGSOFTDEP(vp)) 2823 softdep_change_linkcnt(ip); 2824 vput(vp); 2825 break; 2826 2827 case FFS_ADJ_BLKCNT: 2828 #ifdef DEBUG 2829 if (fsckcmds) { 2830 printf("%s: adjust inode %jd block count by %jd\n", 2831 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2832 (intmax_t)cmd.size); 2833 } 2834 #endif /* DEBUG */ 2835 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2836 break; 2837 ip = VTOI(vp); 2838 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size); 2839 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 2840 error = ffs_update(vp, 1); 2841 vput(vp); 2842 break; 2843 2844 case FFS_DIR_FREE: 2845 filetype = IFDIR; 2846 /* fall through */ 2847 2848 case FFS_FILE_FREE: 2849 #ifdef DEBUG 2850 if (fsckcmds) { 2851 if (cmd.size == 1) 2852 printf("%s: free %s inode %ju\n", 2853 mp->mnt_stat.f_mntonname, 2854 filetype == IFDIR ? "directory" : "file", 2855 (uintmax_t)cmd.value); 2856 else 2857 printf("%s: free %s inodes %ju-%ju\n", 2858 mp->mnt_stat.f_mntonname, 2859 filetype == IFDIR ? "directory" : "file", 2860 (uintmax_t)cmd.value, 2861 (uintmax_t)(cmd.value + cmd.size - 1)); 2862 } 2863 #endif /* DEBUG */ 2864 while (cmd.size > 0) { 2865 if ((error = ffs_freefile(ump, fs, ump->um_devvp, 2866 cmd.value, filetype, NULL))) 2867 break; 2868 cmd.size -= 1; 2869 cmd.value += 1; 2870 } 2871 break; 2872 2873 case FFS_BLK_FREE: 2874 #ifdef DEBUG 2875 if (fsckcmds) { 2876 if (cmd.size == 1) 2877 printf("%s: free block %jd\n", 2878 mp->mnt_stat.f_mntonname, 2879 (intmax_t)cmd.value); 2880 else 2881 printf("%s: free blocks %jd-%jd\n", 2882 mp->mnt_stat.f_mntonname, 2883 (intmax_t)cmd.value, 2884 (intmax_t)cmd.value + cmd.size - 1); 2885 } 2886 #endif /* DEBUG */ 2887 blkno = cmd.value; 2888 blkcnt = cmd.size; 2889 blksize = fs->fs_frag - (blkno % fs->fs_frag); 2890 while (blkcnt > 0) { 2891 if (blksize > blkcnt) 2892 blksize = blkcnt; 2893 ffs_blkfree(ump, fs, ump->um_devvp, blkno, 2894 blksize * fs->fs_fsize, ROOTINO, VDIR, NULL); 2895 blkno += blksize; 2896 blkcnt -= blksize; 2897 blksize = fs->fs_frag; 2898 } 2899 break; 2900 2901 /* 2902 * Adjust superblock summaries. fsck(8) is expected to 2903 * submit deltas when necessary. 2904 */ 2905 case FFS_ADJ_NDIR: 2906 #ifdef DEBUG 2907 if (fsckcmds) { 2908 printf("%s: adjust number of directories by %jd\n", 2909 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2910 } 2911 #endif /* DEBUG */ 2912 fs->fs_cstotal.cs_ndir += cmd.value; 2913 break; 2914 2915 case FFS_ADJ_NBFREE: 2916 #ifdef DEBUG 2917 if (fsckcmds) { 2918 printf("%s: adjust number of free blocks by %+jd\n", 2919 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2920 } 2921 #endif /* DEBUG */ 2922 fs->fs_cstotal.cs_nbfree += cmd.value; 2923 break; 2924 2925 case FFS_ADJ_NIFREE: 2926 #ifdef DEBUG 2927 if (fsckcmds) { 2928 printf("%s: adjust number of free inodes by %+jd\n", 2929 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2930 } 2931 #endif /* DEBUG */ 2932 fs->fs_cstotal.cs_nifree += cmd.value; 2933 break; 2934 2935 case FFS_ADJ_NFFREE: 2936 #ifdef DEBUG 2937 if (fsckcmds) { 2938 printf("%s: adjust number of free frags by %+jd\n", 2939 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2940 } 2941 #endif /* DEBUG */ 2942 fs->fs_cstotal.cs_nffree += cmd.value; 2943 break; 2944 2945 case FFS_ADJ_NUMCLUSTERS: 2946 #ifdef DEBUG 2947 if (fsckcmds) { 2948 printf("%s: adjust number of free clusters by %+jd\n", 2949 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2950 } 2951 #endif /* DEBUG */ 2952 fs->fs_cstotal.cs_numclusters += cmd.value; 2953 break; 2954 2955 case FFS_SET_CWD: 2956 #ifdef DEBUG 2957 if (fsckcmds) { 2958 printf("%s: set current directory to inode %jd\n", 2959 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2960 } 2961 #endif /* DEBUG */ 2962 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp))) 2963 break; 2964 AUDIT_ARG_VNODE1(vp); 2965 if ((error = change_dir(vp, td)) != 0) { 2966 vput(vp); 2967 break; 2968 } 2969 VOP_UNLOCK(vp, 0); 2970 pwd_chdir(td, vp); 2971 break; 2972 2973 case FFS_SET_DOTDOT: 2974 #ifdef DEBUG 2975 if (fsckcmds) { 2976 printf("%s: change .. in cwd from %jd to %jd\n", 2977 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2978 (intmax_t)cmd.size); 2979 } 2980 #endif /* DEBUG */ 2981 /* 2982 * First we have to get and lock the parent directory 2983 * to which ".." points. 2984 */ 2985 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp); 2986 if (error) 2987 break; 2988 /* 2989 * Now we get and lock the child directory containing "..". 2990 */ 2991 FILEDESC_SLOCK(td->td_proc->p_fd); 2992 dvp = td->td_proc->p_fd->fd_cdir; 2993 FILEDESC_SUNLOCK(td->td_proc->p_fd); 2994 if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) { 2995 vput(fdvp); 2996 break; 2997 } 2998 dp = VTOI(dvp); 2999 dp->i_offset = 12; /* XXX mastertemplate.dot_reclen */ 3000 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size, 3001 DT_DIR, 0); 3002 cache_purge(fdvp); 3003 cache_purge(dvp); 3004 vput(dvp); 3005 vput(fdvp); 3006 break; 3007 3008 case FFS_UNLINK: 3009 #ifdef DEBUG 3010 if (fsckcmds) { 3011 char buf[32]; 3012 3013 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL)) 3014 strncpy(buf, "Name_too_long", 32); 3015 printf("%s: unlink %s (inode %jd)\n", 3016 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size); 3017 } 3018 #endif /* DEBUG */ 3019 /* 3020 * kern_unlinkat will do its own start/finish writes and 3021 * they do not nest, so drop ours here. Setting mp == NULL 3022 * indicates that vn_finished_write is not needed down below. 3023 */ 3024 vn_finished_write(mp); 3025 mp = NULL; 3026 error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value, 3027 UIO_USERSPACE, (ino_t)cmd.size); 3028 break; 3029 3030 case FFS_SET_INODE: 3031 if (ump->um_fsckpid != td->td_proc->p_pid) { 3032 error = EPERM; 3033 break; 3034 } 3035 #ifdef DEBUG 3036 if (fsckcmds) { 3037 printf("%s: update inode %jd\n", 3038 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 3039 } 3040 #endif /* DEBUG */ 3041 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 3042 break; 3043 AUDIT_ARG_VNODE1(vp); 3044 ip = VTOI(vp); 3045 if (ip->i_ump->um_fstype == UFS1) 3046 error = copyin((void *)(intptr_t)cmd.size, ip->i_din1, 3047 sizeof(struct ufs1_dinode)); 3048 else 3049 error = copyin((void *)(intptr_t)cmd.size, ip->i_din2, 3050 sizeof(struct ufs2_dinode)); 3051 if (error) { 3052 vput(vp); 3053 break; 3054 } 3055 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 3056 error = ffs_update(vp, 1); 3057 vput(vp); 3058 break; 3059 3060 case FFS_SET_BUFOUTPUT: 3061 if (ump->um_fsckpid != td->td_proc->p_pid) { 3062 error = EPERM; 3063 break; 3064 } 3065 if (VTOI(vp)->i_ump != ump) { 3066 error = EINVAL; 3067 break; 3068 } 3069 #ifdef DEBUG 3070 if (fsckcmds) { 3071 printf("%s: %s buffered output for descriptor %jd\n", 3072 mp->mnt_stat.f_mntonname, 3073 cmd.size == 1 ? "enable" : "disable", 3074 (intmax_t)cmd.value); 3075 } 3076 #endif /* DEBUG */ 3077 if ((error = getvnode(td, cmd.value, 3078 cap_rights_init(&rights, CAP_FSCK), &vfp)) != 0) 3079 break; 3080 if (vfp->f_vnode->v_type != VCHR) { 3081 fdrop(vfp, td); 3082 error = EINVAL; 3083 break; 3084 } 3085 if (origops == NULL) { 3086 origops = vfp->f_ops; 3087 bcopy((void *)origops, (void *)&bufferedops, 3088 sizeof(bufferedops)); 3089 bufferedops.fo_write = buffered_write; 3090 } 3091 if (cmd.size == 1) 3092 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops, 3093 (uintptr_t)&bufferedops); 3094 else 3095 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops, 3096 (uintptr_t)origops); 3097 fdrop(vfp, td); 3098 break; 3099 3100 default: 3101 #ifdef DEBUG 3102 if (fsckcmds) { 3103 printf("Invalid request %d from fsck\n", 3104 oidp->oid_number); 3105 } 3106 #endif /* DEBUG */ 3107 error = EINVAL; 3108 break; 3109 3110 } 3111 fdrop(fp, td); 3112 vn_finished_write(mp); 3113 return (error); 3114 } 3115 3116 /* 3117 * Function to switch a descriptor to use the buffer cache to stage 3118 * its I/O. This is needed so that writes to the filesystem device 3119 * will give snapshots a chance to copy modified blocks for which it 3120 * needs to retain copies. 3121 */ 3122 static int 3123 buffered_write(fp, uio, active_cred, flags, td) 3124 struct file *fp; 3125 struct uio *uio; 3126 struct ucred *active_cred; 3127 int flags; 3128 struct thread *td; 3129 { 3130 struct vnode *devvp, *vp; 3131 struct inode *ip; 3132 struct buf *bp; 3133 struct fs *fs; 3134 struct filedesc *fdp; 3135 int error; 3136 daddr_t lbn; 3137 3138 /* 3139 * The devvp is associated with the /dev filesystem. To discover 3140 * the filesystem with which the device is associated, we depend 3141 * on the application setting the current directory to a location 3142 * within the filesystem being written. Yes, this is an ugly hack. 3143 */ 3144 devvp = fp->f_vnode; 3145 if (!vn_isdisk(devvp, NULL)) 3146 return (EINVAL); 3147 fdp = td->td_proc->p_fd; 3148 FILEDESC_SLOCK(fdp); 3149 vp = fdp->fd_cdir; 3150 vref(vp); 3151 FILEDESC_SUNLOCK(fdp); 3152 vn_lock(vp, LK_SHARED | LK_RETRY); 3153 /* 3154 * Check that the current directory vnode indeed belongs to 3155 * UFS before trying to dereference UFS-specific v_data fields. 3156 */ 3157 if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) { 3158 vput(vp); 3159 return (EINVAL); 3160 } 3161 ip = VTOI(vp); 3162 if (ip->i_devvp != devvp) { 3163 vput(vp); 3164 return (EINVAL); 3165 } 3166 fs = ip->i_fs; 3167 vput(vp); 3168 foffset_lock_uio(fp, uio, flags); 3169 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 3170 #ifdef DEBUG 3171 if (fsckcmds) { 3172 printf("%s: buffered write for block %jd\n", 3173 fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset)); 3174 } 3175 #endif /* DEBUG */ 3176 /* 3177 * All I/O must be contained within a filesystem block, start on 3178 * a fragment boundary, and be a multiple of fragments in length. 3179 */ 3180 if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) || 3181 fragoff(fs, uio->uio_offset) != 0 || 3182 fragoff(fs, uio->uio_resid) != 0) { 3183 error = EINVAL; 3184 goto out; 3185 } 3186 lbn = numfrags(fs, uio->uio_offset); 3187 bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0); 3188 bp->b_flags |= B_RELBUF; 3189 if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) { 3190 brelse(bp); 3191 goto out; 3192 } 3193 error = bwrite(bp); 3194 out: 3195 VOP_UNLOCK(devvp, 0); 3196 foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF); 3197 return (error); 3198 } 3199