1 /*- 2 * Copyright (c) 2002 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Copyright (c) 1982, 1986, 1989, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 4. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 60 */ 61 62 #include <sys/cdefs.h> 63 __FBSDID("$FreeBSD$"); 64 65 #include "opt_quota.h" 66 67 #include <sys/param.h> 68 #include <sys/capability.h> 69 #include <sys/systm.h> 70 #include <sys/bio.h> 71 #include <sys/buf.h> 72 #include <sys/conf.h> 73 #include <sys/fcntl.h> 74 #include <sys/file.h> 75 #include <sys/filedesc.h> 76 #include <sys/priv.h> 77 #include <sys/proc.h> 78 #include <sys/vnode.h> 79 #include <sys/mount.h> 80 #include <sys/kernel.h> 81 #include <sys/syscallsubr.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/taskqueue.h> 85 86 #include <security/audit/audit.h> 87 88 #include <geom/geom.h> 89 90 #include <ufs/ufs/dir.h> 91 #include <ufs/ufs/extattr.h> 92 #include <ufs/ufs/quota.h> 93 #include <ufs/ufs/inode.h> 94 #include <ufs/ufs/ufs_extern.h> 95 #include <ufs/ufs/ufsmount.h> 96 97 #include <ufs/ffs/fs.h> 98 #include <ufs/ffs/ffs_extern.h> 99 #include <ufs/ffs/softdep.h> 100 101 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref, 102 int size, int rsize); 103 104 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int); 105 static ufs2_daddr_t 106 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int); 107 static void ffs_blkfree_cg(struct ufsmount *, struct fs *, 108 struct vnode *, ufs2_daddr_t, long, ino_t, 109 struct workhead *); 110 static void ffs_blkfree_trim_completed(struct bio *); 111 static void ffs_blkfree_trim_task(void *ctx, int pending __unused); 112 #ifdef INVARIANTS 113 static int ffs_checkblk(struct inode *, ufs2_daddr_t, long); 114 #endif 115 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int, 116 int); 117 static ino_t ffs_dirpref(struct inode *); 118 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t, 119 int, int); 120 static ufs2_daddr_t ffs_hashalloc 121 (struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *); 122 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int, 123 int); 124 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int); 125 static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *); 126 static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *); 127 128 /* 129 * Allocate a block in the filesystem. 130 * 131 * The size of the requested block is given, which must be some 132 * multiple of fs_fsize and <= fs_bsize. 133 * A preference may be optionally specified. If a preference is given 134 * the following hierarchy is used to allocate a block: 135 * 1) allocate the requested block. 136 * 2) allocate a rotationally optimal block in the same cylinder. 137 * 3) allocate a block in the same cylinder group. 138 * 4) quadradically rehash into other cylinder groups, until an 139 * available block is located. 140 * If no block preference is given the following hierarchy is used 141 * to allocate a block: 142 * 1) allocate a block in the cylinder group that contains the 143 * inode for the file. 144 * 2) quadradically rehash into other cylinder groups, until an 145 * available block is located. 146 */ 147 int 148 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp) 149 struct inode *ip; 150 ufs2_daddr_t lbn, bpref; 151 int size, flags; 152 struct ucred *cred; 153 ufs2_daddr_t *bnp; 154 { 155 struct fs *fs; 156 struct ufsmount *ump; 157 ufs2_daddr_t bno; 158 u_int cg, reclaimed; 159 static struct timeval lastfail; 160 static int curfail; 161 int64_t delta; 162 #ifdef QUOTA 163 int error; 164 #endif 165 166 *bnp = 0; 167 fs = ip->i_fs; 168 ump = ip->i_ump; 169 mtx_assert(UFS_MTX(ump), MA_OWNED); 170 #ifdef INVARIANTS 171 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 172 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n", 173 devtoname(ip->i_dev), (long)fs->fs_bsize, size, 174 fs->fs_fsmnt); 175 panic("ffs_alloc: bad size"); 176 } 177 if (cred == NOCRED) 178 panic("ffs_alloc: missing credential"); 179 #endif /* INVARIANTS */ 180 reclaimed = 0; 181 retry: 182 #ifdef QUOTA 183 UFS_UNLOCK(ump); 184 error = chkdq(ip, btodb(size), cred, 0); 185 if (error) 186 return (error); 187 UFS_LOCK(ump); 188 #endif 189 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 190 goto nospace; 191 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) && 192 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 193 goto nospace; 194 if (bpref >= fs->fs_size) 195 bpref = 0; 196 if (bpref == 0) 197 cg = ino_to_cg(fs, ip->i_number); 198 else 199 cg = dtog(fs, bpref); 200 bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg); 201 if (bno > 0) { 202 delta = btodb(size); 203 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 204 if (flags & IO_EXT) 205 ip->i_flag |= IN_CHANGE; 206 else 207 ip->i_flag |= IN_CHANGE | IN_UPDATE; 208 *bnp = bno; 209 return (0); 210 } 211 nospace: 212 #ifdef QUOTA 213 UFS_UNLOCK(ump); 214 /* 215 * Restore user's disk quota because allocation failed. 216 */ 217 (void) chkdq(ip, -btodb(size), cred, FORCE); 218 UFS_LOCK(ump); 219 #endif 220 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) { 221 reclaimed = 1; 222 softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT); 223 goto retry; 224 } 225 UFS_UNLOCK(ump); 226 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) { 227 ffs_fserr(fs, ip->i_number, "filesystem full"); 228 uprintf("\n%s: write failed, filesystem is full\n", 229 fs->fs_fsmnt); 230 } 231 return (ENOSPC); 232 } 233 234 /* 235 * Reallocate a fragment to a bigger size 236 * 237 * The number and size of the old block is given, and a preference 238 * and new size is also specified. The allocator attempts to extend 239 * the original block. Failing that, the regular block allocator is 240 * invoked to get an appropriate block. 241 */ 242 int 243 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp) 244 struct inode *ip; 245 ufs2_daddr_t lbprev; 246 ufs2_daddr_t bprev; 247 ufs2_daddr_t bpref; 248 int osize, nsize, flags; 249 struct ucred *cred; 250 struct buf **bpp; 251 { 252 struct vnode *vp; 253 struct fs *fs; 254 struct buf *bp; 255 struct ufsmount *ump; 256 u_int cg, request, reclaimed; 257 int error, gbflags; 258 ufs2_daddr_t bno; 259 static struct timeval lastfail; 260 static int curfail; 261 int64_t delta; 262 263 *bpp = 0; 264 vp = ITOV(ip); 265 fs = ip->i_fs; 266 bp = NULL; 267 ump = ip->i_ump; 268 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0; 269 270 mtx_assert(UFS_MTX(ump), MA_OWNED); 271 #ifdef INVARIANTS 272 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 273 panic("ffs_realloccg: allocation on suspended filesystem"); 274 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 275 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 276 printf( 277 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 278 devtoname(ip->i_dev), (long)fs->fs_bsize, osize, 279 nsize, fs->fs_fsmnt); 280 panic("ffs_realloccg: bad size"); 281 } 282 if (cred == NOCRED) 283 panic("ffs_realloccg: missing credential"); 284 #endif /* INVARIANTS */ 285 reclaimed = 0; 286 retry: 287 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) && 288 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) { 289 goto nospace; 290 } 291 if (bprev == 0) { 292 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n", 293 devtoname(ip->i_dev), (long)fs->fs_bsize, (intmax_t)bprev, 294 fs->fs_fsmnt); 295 panic("ffs_realloccg: bad bprev"); 296 } 297 UFS_UNLOCK(ump); 298 /* 299 * Allocate the extra space in the buffer. 300 */ 301 error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp); 302 if (error) { 303 brelse(bp); 304 return (error); 305 } 306 307 if (bp->b_blkno == bp->b_lblkno) { 308 if (lbprev >= NDADDR) 309 panic("ffs_realloccg: lbprev out of range"); 310 bp->b_blkno = fsbtodb(fs, bprev); 311 } 312 313 #ifdef QUOTA 314 error = chkdq(ip, btodb(nsize - osize), cred, 0); 315 if (error) { 316 brelse(bp); 317 return (error); 318 } 319 #endif 320 /* 321 * Check for extension in the existing location. 322 */ 323 cg = dtog(fs, bprev); 324 UFS_LOCK(ump); 325 bno = ffs_fragextend(ip, cg, bprev, osize, nsize); 326 if (bno) { 327 if (bp->b_blkno != fsbtodb(fs, bno)) 328 panic("ffs_realloccg: bad blockno"); 329 delta = btodb(nsize - osize); 330 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 331 if (flags & IO_EXT) 332 ip->i_flag |= IN_CHANGE; 333 else 334 ip->i_flag |= IN_CHANGE | IN_UPDATE; 335 allocbuf(bp, nsize); 336 bp->b_flags |= B_DONE; 337 vfs_bio_bzero_buf(bp, osize, nsize - osize); 338 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) 339 vfs_bio_set_valid(bp, osize, nsize - osize); 340 *bpp = bp; 341 return (0); 342 } 343 /* 344 * Allocate a new disk location. 345 */ 346 if (bpref >= fs->fs_size) 347 bpref = 0; 348 switch ((int)fs->fs_optim) { 349 case FS_OPTSPACE: 350 /* 351 * Allocate an exact sized fragment. Although this makes 352 * best use of space, we will waste time relocating it if 353 * the file continues to grow. If the fragmentation is 354 * less than half of the minimum free reserve, we choose 355 * to begin optimizing for time. 356 */ 357 request = nsize; 358 if (fs->fs_minfree <= 5 || 359 fs->fs_cstotal.cs_nffree > 360 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100)) 361 break; 362 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 363 fs->fs_fsmnt); 364 fs->fs_optim = FS_OPTTIME; 365 break; 366 case FS_OPTTIME: 367 /* 368 * At this point we have discovered a file that is trying to 369 * grow a small fragment to a larger fragment. To save time, 370 * we allocate a full sized block, then free the unused portion. 371 * If the file continues to grow, the `ffs_fragextend' call 372 * above will be able to grow it in place without further 373 * copying. If aberrant programs cause disk fragmentation to 374 * grow within 2% of the free reserve, we choose to begin 375 * optimizing for space. 376 */ 377 request = fs->fs_bsize; 378 if (fs->fs_cstotal.cs_nffree < 379 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100) 380 break; 381 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 382 fs->fs_fsmnt); 383 fs->fs_optim = FS_OPTSPACE; 384 break; 385 default: 386 printf("dev = %s, optim = %ld, fs = %s\n", 387 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt); 388 panic("ffs_realloccg: bad optim"); 389 /* NOTREACHED */ 390 } 391 bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg); 392 if (bno > 0) { 393 bp->b_blkno = fsbtodb(fs, bno); 394 if (!DOINGSOFTDEP(vp)) 395 ffs_blkfree(ump, fs, ip->i_devvp, bprev, (long)osize, 396 ip->i_number, vp->v_type, NULL); 397 delta = btodb(nsize - osize); 398 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 399 if (flags & IO_EXT) 400 ip->i_flag |= IN_CHANGE; 401 else 402 ip->i_flag |= IN_CHANGE | IN_UPDATE; 403 allocbuf(bp, nsize); 404 bp->b_flags |= B_DONE; 405 vfs_bio_bzero_buf(bp, osize, nsize - osize); 406 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) 407 vfs_bio_set_valid(bp, osize, nsize - osize); 408 *bpp = bp; 409 return (0); 410 } 411 #ifdef QUOTA 412 UFS_UNLOCK(ump); 413 /* 414 * Restore user's disk quota because allocation failed. 415 */ 416 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE); 417 UFS_LOCK(ump); 418 #endif 419 nospace: 420 /* 421 * no space available 422 */ 423 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) { 424 reclaimed = 1; 425 UFS_UNLOCK(ump); 426 if (bp) { 427 brelse(bp); 428 bp = NULL; 429 } 430 UFS_LOCK(ump); 431 softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT); 432 goto retry; 433 } 434 UFS_UNLOCK(ump); 435 if (bp) 436 brelse(bp); 437 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) { 438 ffs_fserr(fs, ip->i_number, "filesystem full"); 439 uprintf("\n%s: write failed, filesystem is full\n", 440 fs->fs_fsmnt); 441 } 442 return (ENOSPC); 443 } 444 445 /* 446 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 447 * 448 * The vnode and an array of buffer pointers for a range of sequential 449 * logical blocks to be made contiguous is given. The allocator attempts 450 * to find a range of sequential blocks starting as close as possible 451 * from the end of the allocation for the logical block immediately 452 * preceding the current range. If successful, the physical block numbers 453 * in the buffer pointers and in the inode are changed to reflect the new 454 * allocation. If unsuccessful, the allocation is left unchanged. The 455 * success in doing the reallocation is returned. Note that the error 456 * return is not reflected back to the user. Rather the previous block 457 * allocation will be used. 458 */ 459 460 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 461 462 static int doasyncfree = 1; 463 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, ""); 464 465 static int doreallocblks = 1; 466 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 467 468 #ifdef DEBUG 469 static volatile int prtrealloc = 0; 470 #endif 471 472 int 473 ffs_reallocblks(ap) 474 struct vop_reallocblks_args /* { 475 struct vnode *a_vp; 476 struct cluster_save *a_buflist; 477 } */ *ap; 478 { 479 480 if (doreallocblks == 0) 481 return (ENOSPC); 482 /* 483 * We can't wait in softdep prealloc as it may fsync and recurse 484 * here. Instead we simply fail to reallocate blocks if this 485 * rare condition arises. 486 */ 487 if (DOINGSOFTDEP(ap->a_vp)) 488 if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0) 489 return (ENOSPC); 490 if (VTOI(ap->a_vp)->i_ump->um_fstype == UFS1) 491 return (ffs_reallocblks_ufs1(ap)); 492 return (ffs_reallocblks_ufs2(ap)); 493 } 494 495 static int 496 ffs_reallocblks_ufs1(ap) 497 struct vop_reallocblks_args /* { 498 struct vnode *a_vp; 499 struct cluster_save *a_buflist; 500 } */ *ap; 501 { 502 struct fs *fs; 503 struct inode *ip; 504 struct vnode *vp; 505 struct buf *sbp, *ebp; 506 ufs1_daddr_t *bap, *sbap, *ebap = 0; 507 struct cluster_save *buflist; 508 struct ufsmount *ump; 509 ufs_lbn_t start_lbn, end_lbn; 510 ufs1_daddr_t soff, newblk, blkno; 511 ufs2_daddr_t pref; 512 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 513 int i, len, start_lvl, end_lvl, ssize; 514 515 vp = ap->a_vp; 516 ip = VTOI(vp); 517 fs = ip->i_fs; 518 ump = ip->i_ump; 519 if (fs->fs_contigsumsize <= 0) 520 return (ENOSPC); 521 buflist = ap->a_buflist; 522 len = buflist->bs_nchildren; 523 start_lbn = buflist->bs_children[0]->b_lblkno; 524 end_lbn = start_lbn + len - 1; 525 #ifdef INVARIANTS 526 for (i = 0; i < len; i++) 527 if (!ffs_checkblk(ip, 528 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 529 panic("ffs_reallocblks: unallocated block 1"); 530 for (i = 1; i < len; i++) 531 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 532 panic("ffs_reallocblks: non-logical cluster"); 533 blkno = buflist->bs_children[0]->b_blkno; 534 ssize = fsbtodb(fs, fs->fs_frag); 535 for (i = 1; i < len - 1; i++) 536 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 537 panic("ffs_reallocblks: non-physical cluster %d", i); 538 #endif 539 /* 540 * If the cluster crosses the boundary for the first indirect 541 * block, leave space for the indirect block. Indirect blocks 542 * are initially laid out in a position after the last direct 543 * block. Block reallocation would usually destroy locality by 544 * moving the indirect block out of the way to make room for 545 * data blocks if we didn't compensate here. We should also do 546 * this for other indirect block boundaries, but it is only 547 * important for the first one. 548 */ 549 if (start_lbn < NDADDR && end_lbn >= NDADDR) 550 return (ENOSPC); 551 /* 552 * If the latest allocation is in a new cylinder group, assume that 553 * the filesystem has decided to move and do not force it back to 554 * the previous cylinder group. 555 */ 556 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 557 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 558 return (ENOSPC); 559 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 560 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 561 return (ENOSPC); 562 /* 563 * Get the starting offset and block map for the first block. 564 */ 565 if (start_lvl == 0) { 566 sbap = &ip->i_din1->di_db[0]; 567 soff = start_lbn; 568 } else { 569 idp = &start_ap[start_lvl - 1]; 570 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 571 brelse(sbp); 572 return (ENOSPC); 573 } 574 sbap = (ufs1_daddr_t *)sbp->b_data; 575 soff = idp->in_off; 576 } 577 /* 578 * If the block range spans two block maps, get the second map. 579 */ 580 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 581 ssize = len; 582 } else { 583 #ifdef INVARIANTS 584 if (start_lvl > 0 && 585 start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 586 panic("ffs_reallocblk: start == end"); 587 #endif 588 ssize = len - (idp->in_off + 1); 589 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 590 goto fail; 591 ebap = (ufs1_daddr_t *)ebp->b_data; 592 } 593 /* 594 * Find the preferred location for the cluster. 595 */ 596 UFS_LOCK(ump); 597 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap); 598 /* 599 * Search the block map looking for an allocation of the desired size. 600 */ 601 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref, 602 len, len, ffs_clusteralloc)) == 0) { 603 UFS_UNLOCK(ump); 604 goto fail; 605 } 606 /* 607 * We have found a new contiguous block. 608 * 609 * First we have to replace the old block pointers with the new 610 * block pointers in the inode and indirect blocks associated 611 * with the file. 612 */ 613 #ifdef DEBUG 614 if (prtrealloc) 615 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", 616 (uintmax_t)ip->i_number, 617 (intmax_t)start_lbn, (intmax_t)end_lbn); 618 #endif 619 blkno = newblk; 620 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 621 if (i == ssize) { 622 bap = ebap; 623 soff = -i; 624 } 625 #ifdef INVARIANTS 626 if (!ffs_checkblk(ip, 627 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 628 panic("ffs_reallocblks: unallocated block 2"); 629 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 630 panic("ffs_reallocblks: alloc mismatch"); 631 #endif 632 #ifdef DEBUG 633 if (prtrealloc) 634 printf(" %d,", *bap); 635 #endif 636 if (DOINGSOFTDEP(vp)) { 637 if (sbap == &ip->i_din1->di_db[0] && i < ssize) 638 softdep_setup_allocdirect(ip, start_lbn + i, 639 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 640 buflist->bs_children[i]); 641 else 642 softdep_setup_allocindir_page(ip, start_lbn + i, 643 i < ssize ? sbp : ebp, soff + i, blkno, 644 *bap, buflist->bs_children[i]); 645 } 646 *bap++ = blkno; 647 } 648 /* 649 * Next we must write out the modified inode and indirect blocks. 650 * For strict correctness, the writes should be synchronous since 651 * the old block values may have been written to disk. In practise 652 * they are almost never written, but if we are concerned about 653 * strict correctness, the `doasyncfree' flag should be set to zero. 654 * 655 * The test on `doasyncfree' should be changed to test a flag 656 * that shows whether the associated buffers and inodes have 657 * been written. The flag should be set when the cluster is 658 * started and cleared whenever the buffer or inode is flushed. 659 * We can then check below to see if it is set, and do the 660 * synchronous write only when it has been cleared. 661 */ 662 if (sbap != &ip->i_din1->di_db[0]) { 663 if (doasyncfree) 664 bdwrite(sbp); 665 else 666 bwrite(sbp); 667 } else { 668 ip->i_flag |= IN_CHANGE | IN_UPDATE; 669 if (!doasyncfree) 670 ffs_update(vp, 1); 671 } 672 if (ssize < len) { 673 if (doasyncfree) 674 bdwrite(ebp); 675 else 676 bwrite(ebp); 677 } 678 /* 679 * Last, free the old blocks and assign the new blocks to the buffers. 680 */ 681 #ifdef DEBUG 682 if (prtrealloc) 683 printf("\n\tnew:"); 684 #endif 685 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 686 if (!DOINGSOFTDEP(vp)) 687 ffs_blkfree(ump, fs, ip->i_devvp, 688 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 689 fs->fs_bsize, ip->i_number, vp->v_type, NULL); 690 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 691 #ifdef INVARIANTS 692 if (!ffs_checkblk(ip, 693 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 694 panic("ffs_reallocblks: unallocated block 3"); 695 #endif 696 #ifdef DEBUG 697 if (prtrealloc) 698 printf(" %d,", blkno); 699 #endif 700 } 701 #ifdef DEBUG 702 if (prtrealloc) { 703 prtrealloc--; 704 printf("\n"); 705 } 706 #endif 707 return (0); 708 709 fail: 710 if (ssize < len) 711 brelse(ebp); 712 if (sbap != &ip->i_din1->di_db[0]) 713 brelse(sbp); 714 return (ENOSPC); 715 } 716 717 static int 718 ffs_reallocblks_ufs2(ap) 719 struct vop_reallocblks_args /* { 720 struct vnode *a_vp; 721 struct cluster_save *a_buflist; 722 } */ *ap; 723 { 724 struct fs *fs; 725 struct inode *ip; 726 struct vnode *vp; 727 struct buf *sbp, *ebp; 728 ufs2_daddr_t *bap, *sbap, *ebap = 0; 729 struct cluster_save *buflist; 730 struct ufsmount *ump; 731 ufs_lbn_t start_lbn, end_lbn; 732 ufs2_daddr_t soff, newblk, blkno, pref; 733 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 734 int i, len, start_lvl, end_lvl, ssize; 735 736 vp = ap->a_vp; 737 ip = VTOI(vp); 738 fs = ip->i_fs; 739 ump = ip->i_ump; 740 if (fs->fs_contigsumsize <= 0) 741 return (ENOSPC); 742 buflist = ap->a_buflist; 743 len = buflist->bs_nchildren; 744 start_lbn = buflist->bs_children[0]->b_lblkno; 745 end_lbn = start_lbn + len - 1; 746 #ifdef INVARIANTS 747 for (i = 0; i < len; i++) 748 if (!ffs_checkblk(ip, 749 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 750 panic("ffs_reallocblks: unallocated block 1"); 751 for (i = 1; i < len; i++) 752 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 753 panic("ffs_reallocblks: non-logical cluster"); 754 blkno = buflist->bs_children[0]->b_blkno; 755 ssize = fsbtodb(fs, fs->fs_frag); 756 for (i = 1; i < len - 1; i++) 757 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 758 panic("ffs_reallocblks: non-physical cluster %d", i); 759 #endif 760 /* 761 * If the cluster crosses the boundary for the first indirect 762 * block, do not move anything in it. Indirect blocks are 763 * usually initially laid out in a position between the data 764 * blocks. Block reallocation would usually destroy locality by 765 * moving the indirect block out of the way to make room for 766 * data blocks if we didn't compensate here. We should also do 767 * this for other indirect block boundaries, but it is only 768 * important for the first one. 769 */ 770 if (start_lbn < NDADDR && end_lbn >= NDADDR) 771 return (ENOSPC); 772 /* 773 * If the latest allocation is in a new cylinder group, assume that 774 * the filesystem has decided to move and do not force it back to 775 * the previous cylinder group. 776 */ 777 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 778 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 779 return (ENOSPC); 780 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 781 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 782 return (ENOSPC); 783 /* 784 * Get the starting offset and block map for the first block. 785 */ 786 if (start_lvl == 0) { 787 sbap = &ip->i_din2->di_db[0]; 788 soff = start_lbn; 789 } else { 790 idp = &start_ap[start_lvl - 1]; 791 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 792 brelse(sbp); 793 return (ENOSPC); 794 } 795 sbap = (ufs2_daddr_t *)sbp->b_data; 796 soff = idp->in_off; 797 } 798 /* 799 * If the block range spans two block maps, get the second map. 800 */ 801 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 802 ssize = len; 803 } else { 804 #ifdef INVARIANTS 805 if (start_lvl > 0 && 806 start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 807 panic("ffs_reallocblk: start == end"); 808 #endif 809 ssize = len - (idp->in_off + 1); 810 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 811 goto fail; 812 ebap = (ufs2_daddr_t *)ebp->b_data; 813 } 814 /* 815 * Find the preferred location for the cluster. 816 */ 817 UFS_LOCK(ump); 818 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap); 819 /* 820 * Search the block map looking for an allocation of the desired size. 821 */ 822 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref, 823 len, len, ffs_clusteralloc)) == 0) { 824 UFS_UNLOCK(ump); 825 goto fail; 826 } 827 /* 828 * We have found a new contiguous block. 829 * 830 * First we have to replace the old block pointers with the new 831 * block pointers in the inode and indirect blocks associated 832 * with the file. 833 */ 834 #ifdef DEBUG 835 if (prtrealloc) 836 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number, 837 (intmax_t)start_lbn, (intmax_t)end_lbn); 838 #endif 839 blkno = newblk; 840 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 841 if (i == ssize) { 842 bap = ebap; 843 soff = -i; 844 } 845 #ifdef INVARIANTS 846 if (!ffs_checkblk(ip, 847 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 848 panic("ffs_reallocblks: unallocated block 2"); 849 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 850 panic("ffs_reallocblks: alloc mismatch"); 851 #endif 852 #ifdef DEBUG 853 if (prtrealloc) 854 printf(" %jd,", (intmax_t)*bap); 855 #endif 856 if (DOINGSOFTDEP(vp)) { 857 if (sbap == &ip->i_din2->di_db[0] && i < ssize) 858 softdep_setup_allocdirect(ip, start_lbn + i, 859 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 860 buflist->bs_children[i]); 861 else 862 softdep_setup_allocindir_page(ip, start_lbn + i, 863 i < ssize ? sbp : ebp, soff + i, blkno, 864 *bap, buflist->bs_children[i]); 865 } 866 *bap++ = blkno; 867 } 868 /* 869 * Next we must write out the modified inode and indirect blocks. 870 * For strict correctness, the writes should be synchronous since 871 * the old block values may have been written to disk. In practise 872 * they are almost never written, but if we are concerned about 873 * strict correctness, the `doasyncfree' flag should be set to zero. 874 * 875 * The test on `doasyncfree' should be changed to test a flag 876 * that shows whether the associated buffers and inodes have 877 * been written. The flag should be set when the cluster is 878 * started and cleared whenever the buffer or inode is flushed. 879 * We can then check below to see if it is set, and do the 880 * synchronous write only when it has been cleared. 881 */ 882 if (sbap != &ip->i_din2->di_db[0]) { 883 if (doasyncfree) 884 bdwrite(sbp); 885 else 886 bwrite(sbp); 887 } else { 888 ip->i_flag |= IN_CHANGE | IN_UPDATE; 889 if (!doasyncfree) 890 ffs_update(vp, 1); 891 } 892 if (ssize < len) { 893 if (doasyncfree) 894 bdwrite(ebp); 895 else 896 bwrite(ebp); 897 } 898 /* 899 * Last, free the old blocks and assign the new blocks to the buffers. 900 */ 901 #ifdef DEBUG 902 if (prtrealloc) 903 printf("\n\tnew:"); 904 #endif 905 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 906 if (!DOINGSOFTDEP(vp)) 907 ffs_blkfree(ump, fs, ip->i_devvp, 908 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 909 fs->fs_bsize, ip->i_number, vp->v_type, NULL); 910 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 911 #ifdef INVARIANTS 912 if (!ffs_checkblk(ip, 913 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 914 panic("ffs_reallocblks: unallocated block 3"); 915 #endif 916 #ifdef DEBUG 917 if (prtrealloc) 918 printf(" %jd,", (intmax_t)blkno); 919 #endif 920 } 921 #ifdef DEBUG 922 if (prtrealloc) { 923 prtrealloc--; 924 printf("\n"); 925 } 926 #endif 927 return (0); 928 929 fail: 930 if (ssize < len) 931 brelse(ebp); 932 if (sbap != &ip->i_din2->di_db[0]) 933 brelse(sbp); 934 return (ENOSPC); 935 } 936 937 /* 938 * Allocate an inode in the filesystem. 939 * 940 * If allocating a directory, use ffs_dirpref to select the inode. 941 * If allocating in a directory, the following hierarchy is followed: 942 * 1) allocate the preferred inode. 943 * 2) allocate an inode in the same cylinder group. 944 * 3) quadradically rehash into other cylinder groups, until an 945 * available inode is located. 946 * If no inode preference is given the following hierarchy is used 947 * to allocate an inode: 948 * 1) allocate an inode in cylinder group 0. 949 * 2) quadradically rehash into other cylinder groups, until an 950 * available inode is located. 951 */ 952 int 953 ffs_valloc(pvp, mode, cred, vpp) 954 struct vnode *pvp; 955 int mode; 956 struct ucred *cred; 957 struct vnode **vpp; 958 { 959 struct inode *pip; 960 struct fs *fs; 961 struct inode *ip; 962 struct timespec ts; 963 struct ufsmount *ump; 964 ino_t ino, ipref; 965 u_int cg; 966 int error, error1, reclaimed; 967 static struct timeval lastfail; 968 static int curfail; 969 970 *vpp = NULL; 971 pip = VTOI(pvp); 972 fs = pip->i_fs; 973 ump = pip->i_ump; 974 975 UFS_LOCK(ump); 976 reclaimed = 0; 977 retry: 978 if (fs->fs_cstotal.cs_nifree == 0) 979 goto noinodes; 980 981 if ((mode & IFMT) == IFDIR) 982 ipref = ffs_dirpref(pip); 983 else 984 ipref = pip->i_number; 985 if (ipref >= fs->fs_ncg * fs->fs_ipg) 986 ipref = 0; 987 cg = ino_to_cg(fs, ipref); 988 /* 989 * Track number of dirs created one after another 990 * in a same cg without intervening by files. 991 */ 992 if ((mode & IFMT) == IFDIR) { 993 if (fs->fs_contigdirs[cg] < 255) 994 fs->fs_contigdirs[cg]++; 995 } else { 996 if (fs->fs_contigdirs[cg] > 0) 997 fs->fs_contigdirs[cg]--; 998 } 999 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0, 1000 (allocfcn_t *)ffs_nodealloccg); 1001 if (ino == 0) 1002 goto noinodes; 1003 error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp); 1004 if (error) { 1005 error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp, 1006 FFSV_FORCEINSMQ); 1007 ffs_vfree(pvp, ino, mode); 1008 if (error1 == 0) { 1009 ip = VTOI(*vpp); 1010 if (ip->i_mode) 1011 goto dup_alloc; 1012 ip->i_flag |= IN_MODIFIED; 1013 vput(*vpp); 1014 } 1015 return (error); 1016 } 1017 ip = VTOI(*vpp); 1018 if (ip->i_mode) { 1019 dup_alloc: 1020 printf("mode = 0%o, inum = %lu, fs = %s\n", 1021 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); 1022 panic("ffs_valloc: dup alloc"); 1023 } 1024 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */ 1025 printf("free inode %s/%lu had %ld blocks\n", 1026 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks)); 1027 DIP_SET(ip, i_blocks, 0); 1028 } 1029 ip->i_flags = 0; 1030 DIP_SET(ip, i_flags, 0); 1031 /* 1032 * Set up a new generation number for this inode. 1033 */ 1034 if (ip->i_gen == 0 || ++ip->i_gen == 0) 1035 ip->i_gen = arc4random() / 2 + 1; 1036 DIP_SET(ip, i_gen, ip->i_gen); 1037 if (fs->fs_magic == FS_UFS2_MAGIC) { 1038 vfs_timestamp(&ts); 1039 ip->i_din2->di_birthtime = ts.tv_sec; 1040 ip->i_din2->di_birthnsec = ts.tv_nsec; 1041 } 1042 ufs_prepare_reclaim(*vpp); 1043 ip->i_flag = 0; 1044 (*vpp)->v_vflag = 0; 1045 (*vpp)->v_type = VNON; 1046 if (fs->fs_magic == FS_UFS2_MAGIC) 1047 (*vpp)->v_op = &ffs_vnodeops2; 1048 else 1049 (*vpp)->v_op = &ffs_vnodeops1; 1050 return (0); 1051 noinodes: 1052 if (reclaimed == 0) { 1053 reclaimed = 1; 1054 softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT); 1055 goto retry; 1056 } 1057 UFS_UNLOCK(ump); 1058 if (ppsratecheck(&lastfail, &curfail, 1)) { 1059 ffs_fserr(fs, pip->i_number, "out of inodes"); 1060 uprintf("\n%s: create/symlink failed, no inodes free\n", 1061 fs->fs_fsmnt); 1062 } 1063 return (ENOSPC); 1064 } 1065 1066 /* 1067 * Find a cylinder group to place a directory. 1068 * 1069 * The policy implemented by this algorithm is to allocate a 1070 * directory inode in the same cylinder group as its parent 1071 * directory, but also to reserve space for its files inodes 1072 * and data. Restrict the number of directories which may be 1073 * allocated one after another in the same cylinder group 1074 * without intervening allocation of files. 1075 * 1076 * If we allocate a first level directory then force allocation 1077 * in another cylinder group. 1078 */ 1079 static ino_t 1080 ffs_dirpref(pip) 1081 struct inode *pip; 1082 { 1083 struct fs *fs; 1084 int cg, prefcg, dirsize, cgsize; 1085 u_int avgifree, avgbfree, avgndir, curdirsize; 1086 u_int minifree, minbfree, maxndir; 1087 u_int mincg, minndir; 1088 u_int maxcontigdirs; 1089 1090 mtx_assert(UFS_MTX(pip->i_ump), MA_OWNED); 1091 fs = pip->i_fs; 1092 1093 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 1094 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1095 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; 1096 1097 /* 1098 * Force allocation in another cg if creating a first level dir. 1099 */ 1100 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref"); 1101 if (ITOV(pip)->v_vflag & VV_ROOT) { 1102 prefcg = arc4random() % fs->fs_ncg; 1103 mincg = prefcg; 1104 minndir = fs->fs_ipg; 1105 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1106 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 1107 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 1108 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1109 mincg = cg; 1110 minndir = fs->fs_cs(fs, cg).cs_ndir; 1111 } 1112 for (cg = 0; cg < prefcg; cg++) 1113 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 1114 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 1115 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1116 mincg = cg; 1117 minndir = fs->fs_cs(fs, cg).cs_ndir; 1118 } 1119 return ((ino_t)(fs->fs_ipg * mincg)); 1120 } 1121 1122 /* 1123 * Count various limits which used for 1124 * optimal allocation of a directory inode. 1125 */ 1126 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); 1127 minifree = avgifree - avgifree / 4; 1128 if (minifree < 1) 1129 minifree = 1; 1130 minbfree = avgbfree - avgbfree / 4; 1131 if (minbfree < 1) 1132 minbfree = 1; 1133 cgsize = fs->fs_fsize * fs->fs_fpg; 1134 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir; 1135 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; 1136 if (dirsize < curdirsize) 1137 dirsize = curdirsize; 1138 if (dirsize <= 0) 1139 maxcontigdirs = 0; /* dirsize overflowed */ 1140 else 1141 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255); 1142 if (fs->fs_avgfpdir > 0) 1143 maxcontigdirs = min(maxcontigdirs, 1144 fs->fs_ipg / fs->fs_avgfpdir); 1145 if (maxcontigdirs == 0) 1146 maxcontigdirs = 1; 1147 1148 /* 1149 * Limit number of dirs in one cg and reserve space for 1150 * regular files, but only if we have no deficit in 1151 * inodes or space. 1152 * 1153 * We are trying to find a suitable cylinder group nearby 1154 * our preferred cylinder group to place a new directory. 1155 * We scan from our preferred cylinder group forward looking 1156 * for a cylinder group that meets our criterion. If we get 1157 * to the final cylinder group and do not find anything, 1158 * we start scanning backwards from our preferred cylinder 1159 * group. The ideal would be to alternate looking forward 1160 * and backward, but that is just too complex to code for 1161 * the gain it would get. The most likely place where the 1162 * backward scan would take effect is when we start near 1163 * the end of the filesystem and do not find anything from 1164 * where we are to the end. In that case, scanning backward 1165 * will likely find us a suitable cylinder group much closer 1166 * to our desired location than if we were to start scanning 1167 * forward from the beginning of the filesystem. 1168 */ 1169 prefcg = ino_to_cg(fs, pip->i_number); 1170 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1171 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 1172 fs->fs_cs(fs, cg).cs_nifree >= minifree && 1173 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 1174 if (fs->fs_contigdirs[cg] < maxcontigdirs) 1175 return ((ino_t)(fs->fs_ipg * cg)); 1176 } 1177 for (cg = prefcg - 1; cg >= 0; cg--) 1178 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 1179 fs->fs_cs(fs, cg).cs_nifree >= minifree && 1180 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 1181 if (fs->fs_contigdirs[cg] < maxcontigdirs) 1182 return ((ino_t)(fs->fs_ipg * cg)); 1183 } 1184 /* 1185 * This is a backstop when we have deficit in space. 1186 */ 1187 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1188 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 1189 return ((ino_t)(fs->fs_ipg * cg)); 1190 for (cg = prefcg - 1; cg >= 0; cg--) 1191 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 1192 break; 1193 return ((ino_t)(fs->fs_ipg * cg)); 1194 } 1195 1196 /* 1197 * Select the desired position for the next block in a file. The file is 1198 * logically divided into sections. The first section is composed of the 1199 * direct blocks. Each additional section contains fs_maxbpg blocks. 1200 * 1201 * If no blocks have been allocated in the first section, the policy is to 1202 * request a block in the same cylinder group as the inode that describes 1203 * the file. The first indirect is allocated immediately following the last 1204 * direct block and the data blocks for the first indirect immediately 1205 * follow it. 1206 * 1207 * If no blocks have been allocated in any other section, the indirect 1208 * block(s) are allocated in the same cylinder group as its inode in an 1209 * area reserved immediately following the inode blocks. The policy for 1210 * the data blocks is to place them in a cylinder group with a greater than 1211 * average number of free blocks. An appropriate cylinder group is found 1212 * by using a rotor that sweeps the cylinder groups. When a new group of 1213 * blocks is needed, the sweep begins in the cylinder group following the 1214 * cylinder group from which the previous allocation was made. The sweep 1215 * continues until a cylinder group with greater than the average number 1216 * of free blocks is found. If the allocation is for the first block in an 1217 * indirect block, the information on the previous allocation is unavailable; 1218 * here a best guess is made based upon the logical block number being 1219 * allocated. 1220 * 1221 * If a section is already partially allocated, the policy is to 1222 * contiguously allocate fs_maxcontig blocks. The end of one of these 1223 * contiguous blocks and the beginning of the next is laid out 1224 * contiguously if possible. 1225 */ 1226 ufs2_daddr_t 1227 ffs_blkpref_ufs1(ip, lbn, indx, bap) 1228 struct inode *ip; 1229 ufs_lbn_t lbn; 1230 int indx; 1231 ufs1_daddr_t *bap; 1232 { 1233 struct fs *fs; 1234 u_int cg, inocg; 1235 u_int avgbfree, startcg; 1236 ufs2_daddr_t pref; 1237 1238 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap")); 1239 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1240 fs = ip->i_fs; 1241 /* 1242 * Allocation of indirect blocks is indicated by passing negative 1243 * values in indx: -1 for single indirect, -2 for double indirect, 1244 * -3 for triple indirect. As noted below, we attempt to allocate 1245 * the first indirect inline with the file data. For all later 1246 * indirect blocks, the data is often allocated in other cylinder 1247 * groups. However to speed random file access and to speed up 1248 * fsck, the filesystem reserves the first fs_metaspace blocks 1249 * (typically half of fs_minfree) of the data area of each cylinder 1250 * group to hold these later indirect blocks. 1251 */ 1252 inocg = ino_to_cg(fs, ip->i_number); 1253 if (indx < 0) { 1254 /* 1255 * Our preference for indirect blocks is the zone at the 1256 * beginning of the inode's cylinder group data area that 1257 * we try to reserve for indirect blocks. 1258 */ 1259 pref = cgmeta(fs, inocg); 1260 /* 1261 * If we are allocating the first indirect block, try to 1262 * place it immediately following the last direct block. 1263 */ 1264 if (indx == -1 && lbn < NDADDR + NINDIR(fs) && 1265 ip->i_din1->di_db[NDADDR - 1] != 0) 1266 pref = ip->i_din1->di_db[NDADDR - 1] + fs->fs_frag; 1267 return (pref); 1268 } 1269 /* 1270 * If we are allocating the first data block in the first indirect 1271 * block and the indirect has been allocated in the data block area, 1272 * try to place it immediately following the indirect block. 1273 */ 1274 if (lbn == NDADDR) { 1275 pref = ip->i_din1->di_ib[0]; 1276 if (pref != 0 && pref >= cgdata(fs, inocg) && 1277 pref < cgbase(fs, inocg + 1)) 1278 return (pref + fs->fs_frag); 1279 } 1280 /* 1281 * If we are at the beginning of a file, or we have already allocated 1282 * the maximum number of blocks per cylinder group, or we do not 1283 * have a block allocated immediately preceeding us, then we need 1284 * to decide where to start allocating new blocks. 1285 */ 1286 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1287 /* 1288 * If we are allocating a directory data block, we want 1289 * to place it in the metadata area. 1290 */ 1291 if ((ip->i_mode & IFMT) == IFDIR) 1292 return (cgmeta(fs, inocg)); 1293 /* 1294 * Until we fill all the direct and all the first indirect's 1295 * blocks, we try to allocate in the data area of the inode's 1296 * cylinder group. 1297 */ 1298 if (lbn < NDADDR + NINDIR(fs)) 1299 return (cgdata(fs, inocg)); 1300 /* 1301 * Find a cylinder with greater than average number of 1302 * unused data blocks. 1303 */ 1304 if (indx == 0 || bap[indx - 1] == 0) 1305 startcg = inocg + lbn / fs->fs_maxbpg; 1306 else 1307 startcg = dtog(fs, bap[indx - 1]) + 1; 1308 startcg %= fs->fs_ncg; 1309 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1310 for (cg = startcg; cg < fs->fs_ncg; cg++) 1311 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1312 fs->fs_cgrotor = cg; 1313 return (cgdata(fs, cg)); 1314 } 1315 for (cg = 0; cg <= startcg; cg++) 1316 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1317 fs->fs_cgrotor = cg; 1318 return (cgdata(fs, cg)); 1319 } 1320 return (0); 1321 } 1322 /* 1323 * Otherwise, we just always try to lay things out contiguously. 1324 */ 1325 return (bap[indx - 1] + fs->fs_frag); 1326 } 1327 1328 /* 1329 * Same as above, but for UFS2 1330 */ 1331 ufs2_daddr_t 1332 ffs_blkpref_ufs2(ip, lbn, indx, bap) 1333 struct inode *ip; 1334 ufs_lbn_t lbn; 1335 int indx; 1336 ufs2_daddr_t *bap; 1337 { 1338 struct fs *fs; 1339 u_int cg, inocg; 1340 u_int avgbfree, startcg; 1341 ufs2_daddr_t pref; 1342 1343 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap")); 1344 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1345 fs = ip->i_fs; 1346 /* 1347 * Allocation of indirect blocks is indicated by passing negative 1348 * values in indx: -1 for single indirect, -2 for double indirect, 1349 * -3 for triple indirect. As noted below, we attempt to allocate 1350 * the first indirect inline with the file data. For all later 1351 * indirect blocks, the data is often allocated in other cylinder 1352 * groups. However to speed random file access and to speed up 1353 * fsck, the filesystem reserves the first fs_metaspace blocks 1354 * (typically half of fs_minfree) of the data area of each cylinder 1355 * group to hold these later indirect blocks. 1356 */ 1357 inocg = ino_to_cg(fs, ip->i_number); 1358 if (indx < 0) { 1359 /* 1360 * Our preference for indirect blocks is the zone at the 1361 * beginning of the inode's cylinder group data area that 1362 * we try to reserve for indirect blocks. 1363 */ 1364 pref = cgmeta(fs, inocg); 1365 /* 1366 * If we are allocating the first indirect block, try to 1367 * place it immediately following the last direct block. 1368 */ 1369 if (indx == -1 && lbn < NDADDR + NINDIR(fs) && 1370 ip->i_din2->di_db[NDADDR - 1] != 0) 1371 pref = ip->i_din2->di_db[NDADDR - 1] + fs->fs_frag; 1372 return (pref); 1373 } 1374 /* 1375 * If we are allocating the first data block in the first indirect 1376 * block and the indirect has been allocated in the data block area, 1377 * try to place it immediately following the indirect block. 1378 */ 1379 if (lbn == NDADDR) { 1380 pref = ip->i_din2->di_ib[0]; 1381 if (pref != 0 && pref >= cgdata(fs, inocg) && 1382 pref < cgbase(fs, inocg + 1)) 1383 return (pref + fs->fs_frag); 1384 } 1385 /* 1386 * If we are at the beginning of a file, or we have already allocated 1387 * the maximum number of blocks per cylinder group, or we do not 1388 * have a block allocated immediately preceeding us, then we need 1389 * to decide where to start allocating new blocks. 1390 */ 1391 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1392 /* 1393 * If we are allocating a directory data block, we want 1394 * to place it in the metadata area. 1395 */ 1396 if ((ip->i_mode & IFMT) == IFDIR) 1397 return (cgmeta(fs, inocg)); 1398 /* 1399 * Until we fill all the direct and all the first indirect's 1400 * blocks, we try to allocate in the data area of the inode's 1401 * cylinder group. 1402 */ 1403 if (lbn < NDADDR + NINDIR(fs)) 1404 return (cgdata(fs, inocg)); 1405 /* 1406 * Find a cylinder with greater than average number of 1407 * unused data blocks. 1408 */ 1409 if (indx == 0 || bap[indx - 1] == 0) 1410 startcg = inocg + lbn / fs->fs_maxbpg; 1411 else 1412 startcg = dtog(fs, bap[indx - 1]) + 1; 1413 startcg %= fs->fs_ncg; 1414 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1415 for (cg = startcg; cg < fs->fs_ncg; cg++) 1416 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1417 fs->fs_cgrotor = cg; 1418 return (cgdata(fs, cg)); 1419 } 1420 for (cg = 0; cg <= startcg; cg++) 1421 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1422 fs->fs_cgrotor = cg; 1423 return (cgdata(fs, cg)); 1424 } 1425 return (0); 1426 } 1427 /* 1428 * Otherwise, we just always try to lay things out contiguously. 1429 */ 1430 return (bap[indx - 1] + fs->fs_frag); 1431 } 1432 1433 /* 1434 * Implement the cylinder overflow algorithm. 1435 * 1436 * The policy implemented by this algorithm is: 1437 * 1) allocate the block in its requested cylinder group. 1438 * 2) quadradically rehash on the cylinder group number. 1439 * 3) brute force search for a free block. 1440 * 1441 * Must be called with the UFS lock held. Will release the lock on success 1442 * and return with it held on failure. 1443 */ 1444 /*VARARGS5*/ 1445 static ufs2_daddr_t 1446 ffs_hashalloc(ip, cg, pref, size, rsize, allocator) 1447 struct inode *ip; 1448 u_int cg; 1449 ufs2_daddr_t pref; 1450 int size; /* Search size for data blocks, mode for inodes */ 1451 int rsize; /* Real allocated size. */ 1452 allocfcn_t *allocator; 1453 { 1454 struct fs *fs; 1455 ufs2_daddr_t result; 1456 u_int i, icg = cg; 1457 1458 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1459 #ifdef INVARIANTS 1460 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 1461 panic("ffs_hashalloc: allocation on suspended filesystem"); 1462 #endif 1463 fs = ip->i_fs; 1464 /* 1465 * 1: preferred cylinder group 1466 */ 1467 result = (*allocator)(ip, cg, pref, size, rsize); 1468 if (result) 1469 return (result); 1470 /* 1471 * 2: quadratic rehash 1472 */ 1473 for (i = 1; i < fs->fs_ncg; i *= 2) { 1474 cg += i; 1475 if (cg >= fs->fs_ncg) 1476 cg -= fs->fs_ncg; 1477 result = (*allocator)(ip, cg, 0, size, rsize); 1478 if (result) 1479 return (result); 1480 } 1481 /* 1482 * 3: brute force search 1483 * Note that we start at i == 2, since 0 was checked initially, 1484 * and 1 is always checked in the quadratic rehash. 1485 */ 1486 cg = (icg + 2) % fs->fs_ncg; 1487 for (i = 2; i < fs->fs_ncg; i++) { 1488 result = (*allocator)(ip, cg, 0, size, rsize); 1489 if (result) 1490 return (result); 1491 cg++; 1492 if (cg == fs->fs_ncg) 1493 cg = 0; 1494 } 1495 return (0); 1496 } 1497 1498 /* 1499 * Determine whether a fragment can be extended. 1500 * 1501 * Check to see if the necessary fragments are available, and 1502 * if they are, allocate them. 1503 */ 1504 static ufs2_daddr_t 1505 ffs_fragextend(ip, cg, bprev, osize, nsize) 1506 struct inode *ip; 1507 u_int cg; 1508 ufs2_daddr_t bprev; 1509 int osize, nsize; 1510 { 1511 struct fs *fs; 1512 struct cg *cgp; 1513 struct buf *bp; 1514 struct ufsmount *ump; 1515 int nffree; 1516 long bno; 1517 int frags, bbase; 1518 int i, error; 1519 u_int8_t *blksfree; 1520 1521 ump = ip->i_ump; 1522 fs = ip->i_fs; 1523 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 1524 return (0); 1525 frags = numfrags(fs, nsize); 1526 bbase = fragnum(fs, bprev); 1527 if (bbase > fragnum(fs, (bprev + frags - 1))) { 1528 /* cannot extend across a block boundary */ 1529 return (0); 1530 } 1531 UFS_UNLOCK(ump); 1532 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1533 (int)fs->fs_cgsize, NOCRED, &bp); 1534 if (error) 1535 goto fail; 1536 cgp = (struct cg *)bp->b_data; 1537 if (!cg_chkmagic(cgp)) 1538 goto fail; 1539 bp->b_xflags |= BX_BKGRDWRITE; 1540 cgp->cg_old_time = cgp->cg_time = time_second; 1541 bno = dtogd(fs, bprev); 1542 blksfree = cg_blksfree(cgp); 1543 for (i = numfrags(fs, osize); i < frags; i++) 1544 if (isclr(blksfree, bno + i)) 1545 goto fail; 1546 /* 1547 * the current fragment can be extended 1548 * deduct the count on fragment being extended into 1549 * increase the count on the remaining fragment (if any) 1550 * allocate the extended piece 1551 */ 1552 for (i = frags; i < fs->fs_frag - bbase; i++) 1553 if (isclr(blksfree, bno + i)) 1554 break; 1555 cgp->cg_frsum[i - numfrags(fs, osize)]--; 1556 if (i != frags) 1557 cgp->cg_frsum[i - frags]++; 1558 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) { 1559 clrbit(blksfree, bno + i); 1560 cgp->cg_cs.cs_nffree--; 1561 nffree++; 1562 } 1563 UFS_LOCK(ump); 1564 fs->fs_cstotal.cs_nffree -= nffree; 1565 fs->fs_cs(fs, cg).cs_nffree -= nffree; 1566 fs->fs_fmod = 1; 1567 ACTIVECLEAR(fs, cg); 1568 UFS_UNLOCK(ump); 1569 if (DOINGSOFTDEP(ITOV(ip))) 1570 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev, 1571 frags, numfrags(fs, osize)); 1572 bdwrite(bp); 1573 return (bprev); 1574 1575 fail: 1576 brelse(bp); 1577 UFS_LOCK(ump); 1578 return (0); 1579 1580 } 1581 1582 /* 1583 * Determine whether a block can be allocated. 1584 * 1585 * Check to see if a block of the appropriate size is available, 1586 * and if it is, allocate it. 1587 */ 1588 static ufs2_daddr_t 1589 ffs_alloccg(ip, cg, bpref, size, rsize) 1590 struct inode *ip; 1591 u_int cg; 1592 ufs2_daddr_t bpref; 1593 int size; 1594 int rsize; 1595 { 1596 struct fs *fs; 1597 struct cg *cgp; 1598 struct buf *bp; 1599 struct ufsmount *ump; 1600 ufs1_daddr_t bno; 1601 ufs2_daddr_t blkno; 1602 int i, allocsiz, error, frags; 1603 u_int8_t *blksfree; 1604 1605 ump = ip->i_ump; 1606 fs = ip->i_fs; 1607 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 1608 return (0); 1609 UFS_UNLOCK(ump); 1610 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1611 (int)fs->fs_cgsize, NOCRED, &bp); 1612 if (error) 1613 goto fail; 1614 cgp = (struct cg *)bp->b_data; 1615 if (!cg_chkmagic(cgp) || 1616 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) 1617 goto fail; 1618 bp->b_xflags |= BX_BKGRDWRITE; 1619 cgp->cg_old_time = cgp->cg_time = time_second; 1620 if (size == fs->fs_bsize) { 1621 UFS_LOCK(ump); 1622 blkno = ffs_alloccgblk(ip, bp, bpref, rsize); 1623 ACTIVECLEAR(fs, cg); 1624 UFS_UNLOCK(ump); 1625 bdwrite(bp); 1626 return (blkno); 1627 } 1628 /* 1629 * check to see if any fragments are already available 1630 * allocsiz is the size which will be allocated, hacking 1631 * it down to a smaller size if necessary 1632 */ 1633 blksfree = cg_blksfree(cgp); 1634 frags = numfrags(fs, size); 1635 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 1636 if (cgp->cg_frsum[allocsiz] != 0) 1637 break; 1638 if (allocsiz == fs->fs_frag) { 1639 /* 1640 * no fragments were available, so a block will be 1641 * allocated, and hacked up 1642 */ 1643 if (cgp->cg_cs.cs_nbfree == 0) 1644 goto fail; 1645 UFS_LOCK(ump); 1646 blkno = ffs_alloccgblk(ip, bp, bpref, rsize); 1647 ACTIVECLEAR(fs, cg); 1648 UFS_UNLOCK(ump); 1649 bdwrite(bp); 1650 return (blkno); 1651 } 1652 KASSERT(size == rsize, 1653 ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize)); 1654 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 1655 if (bno < 0) 1656 goto fail; 1657 for (i = 0; i < frags; i++) 1658 clrbit(blksfree, bno + i); 1659 cgp->cg_cs.cs_nffree -= frags; 1660 cgp->cg_frsum[allocsiz]--; 1661 if (frags != allocsiz) 1662 cgp->cg_frsum[allocsiz - frags]++; 1663 UFS_LOCK(ump); 1664 fs->fs_cstotal.cs_nffree -= frags; 1665 fs->fs_cs(fs, cg).cs_nffree -= frags; 1666 fs->fs_fmod = 1; 1667 blkno = cgbase(fs, cg) + bno; 1668 ACTIVECLEAR(fs, cg); 1669 UFS_UNLOCK(ump); 1670 if (DOINGSOFTDEP(ITOV(ip))) 1671 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0); 1672 bdwrite(bp); 1673 return (blkno); 1674 1675 fail: 1676 brelse(bp); 1677 UFS_LOCK(ump); 1678 return (0); 1679 } 1680 1681 /* 1682 * Allocate a block in a cylinder group. 1683 * 1684 * This algorithm implements the following policy: 1685 * 1) allocate the requested block. 1686 * 2) allocate a rotationally optimal block in the same cylinder. 1687 * 3) allocate the next available block on the block rotor for the 1688 * specified cylinder group. 1689 * Note that this routine only allocates fs_bsize blocks; these 1690 * blocks may be fragmented by the routine that allocates them. 1691 */ 1692 static ufs2_daddr_t 1693 ffs_alloccgblk(ip, bp, bpref, size) 1694 struct inode *ip; 1695 struct buf *bp; 1696 ufs2_daddr_t bpref; 1697 int size; 1698 { 1699 struct fs *fs; 1700 struct cg *cgp; 1701 struct ufsmount *ump; 1702 ufs1_daddr_t bno; 1703 ufs2_daddr_t blkno; 1704 u_int8_t *blksfree; 1705 int i, cgbpref; 1706 1707 fs = ip->i_fs; 1708 ump = ip->i_ump; 1709 mtx_assert(UFS_MTX(ump), MA_OWNED); 1710 cgp = (struct cg *)bp->b_data; 1711 blksfree = cg_blksfree(cgp); 1712 if (bpref == 0) { 1713 bpref = cgp->cg_rotor; 1714 } else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) { 1715 /* map bpref to correct zone in this cg */ 1716 if (bpref < cgdata(fs, cgbpref)) 1717 bpref = cgmeta(fs, cgp->cg_cgx); 1718 else 1719 bpref = cgdata(fs, cgp->cg_cgx); 1720 } 1721 /* 1722 * if the requested block is available, use it 1723 */ 1724 bno = dtogd(fs, blknum(fs, bpref)); 1725 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno))) 1726 goto gotit; 1727 /* 1728 * Take the next available block in this cylinder group. 1729 */ 1730 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1731 if (bno < 0) 1732 return (0); 1733 /* Update cg_rotor only if allocated from the data zone */ 1734 if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx))) 1735 cgp->cg_rotor = bno; 1736 gotit: 1737 blkno = fragstoblks(fs, bno); 1738 ffs_clrblock(fs, blksfree, (long)blkno); 1739 ffs_clusteracct(fs, cgp, blkno, -1); 1740 cgp->cg_cs.cs_nbfree--; 1741 fs->fs_cstotal.cs_nbfree--; 1742 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1743 fs->fs_fmod = 1; 1744 blkno = cgbase(fs, cgp->cg_cgx) + bno; 1745 /* 1746 * If the caller didn't want the whole block free the frags here. 1747 */ 1748 size = numfrags(fs, size); 1749 if (size != fs->fs_frag) { 1750 bno = dtogd(fs, blkno); 1751 for (i = size; i < fs->fs_frag; i++) 1752 setbit(blksfree, bno + i); 1753 i = fs->fs_frag - size; 1754 cgp->cg_cs.cs_nffree += i; 1755 fs->fs_cstotal.cs_nffree += i; 1756 fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i; 1757 fs->fs_fmod = 1; 1758 cgp->cg_frsum[i]++; 1759 } 1760 /* XXX Fixme. */ 1761 UFS_UNLOCK(ump); 1762 if (DOINGSOFTDEP(ITOV(ip))) 1763 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, 1764 size, 0); 1765 UFS_LOCK(ump); 1766 return (blkno); 1767 } 1768 1769 /* 1770 * Determine whether a cluster can be allocated. 1771 * 1772 * We do not currently check for optimal rotational layout if there 1773 * are multiple choices in the same cylinder group. Instead we just 1774 * take the first one that we find following bpref. 1775 */ 1776 static ufs2_daddr_t 1777 ffs_clusteralloc(ip, cg, bpref, len, unused) 1778 struct inode *ip; 1779 u_int cg; 1780 ufs2_daddr_t bpref; 1781 int len; 1782 int unused; 1783 { 1784 struct fs *fs; 1785 struct cg *cgp; 1786 struct buf *bp; 1787 struct ufsmount *ump; 1788 int i, run, bit, map, got; 1789 ufs2_daddr_t bno; 1790 u_char *mapp; 1791 int32_t *lp; 1792 u_int8_t *blksfree; 1793 1794 fs = ip->i_fs; 1795 ump = ip->i_ump; 1796 if (fs->fs_maxcluster[cg] < len) 1797 return (0); 1798 UFS_UNLOCK(ump); 1799 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1800 NOCRED, &bp)) 1801 goto fail_lock; 1802 cgp = (struct cg *)bp->b_data; 1803 if (!cg_chkmagic(cgp)) 1804 goto fail_lock; 1805 bp->b_xflags |= BX_BKGRDWRITE; 1806 /* 1807 * Check to see if a cluster of the needed size (or bigger) is 1808 * available in this cylinder group. 1809 */ 1810 lp = &cg_clustersum(cgp)[len]; 1811 for (i = len; i <= fs->fs_contigsumsize; i++) 1812 if (*lp++ > 0) 1813 break; 1814 if (i > fs->fs_contigsumsize) { 1815 /* 1816 * This is the first time looking for a cluster in this 1817 * cylinder group. Update the cluster summary information 1818 * to reflect the true maximum sized cluster so that 1819 * future cluster allocation requests can avoid reading 1820 * the cylinder group map only to find no clusters. 1821 */ 1822 lp = &cg_clustersum(cgp)[len - 1]; 1823 for (i = len - 1; i > 0; i--) 1824 if (*lp-- > 0) 1825 break; 1826 UFS_LOCK(ump); 1827 fs->fs_maxcluster[cg] = i; 1828 goto fail; 1829 } 1830 /* 1831 * Search the cluster map to find a big enough cluster. 1832 * We take the first one that we find, even if it is larger 1833 * than we need as we prefer to get one close to the previous 1834 * block allocation. We do not search before the current 1835 * preference point as we do not want to allocate a block 1836 * that is allocated before the previous one (as we will 1837 * then have to wait for another pass of the elevator 1838 * algorithm before it will be read). We prefer to fail and 1839 * be recalled to try an allocation in the next cylinder group. 1840 */ 1841 if (dtog(fs, bpref) != cg) 1842 bpref = cgdata(fs, cg); 1843 else 1844 bpref = blknum(fs, bpref); 1845 bpref = fragstoblks(fs, dtogd(fs, bpref)); 1846 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1847 map = *mapp++; 1848 bit = 1 << (bpref % NBBY); 1849 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1850 if ((map & bit) == 0) { 1851 run = 0; 1852 } else { 1853 run++; 1854 if (run == len) 1855 break; 1856 } 1857 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1858 bit <<= 1; 1859 } else { 1860 map = *mapp++; 1861 bit = 1; 1862 } 1863 } 1864 if (got >= cgp->cg_nclusterblks) 1865 goto fail_lock; 1866 /* 1867 * Allocate the cluster that we have found. 1868 */ 1869 blksfree = cg_blksfree(cgp); 1870 for (i = 1; i <= len; i++) 1871 if (!ffs_isblock(fs, blksfree, got - run + i)) 1872 panic("ffs_clusteralloc: map mismatch"); 1873 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1); 1874 if (dtog(fs, bno) != cg) 1875 panic("ffs_clusteralloc: allocated out of group"); 1876 len = blkstofrags(fs, len); 1877 UFS_LOCK(ump); 1878 for (i = 0; i < len; i += fs->fs_frag) 1879 if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i) 1880 panic("ffs_clusteralloc: lost block"); 1881 ACTIVECLEAR(fs, cg); 1882 UFS_UNLOCK(ump); 1883 bdwrite(bp); 1884 return (bno); 1885 1886 fail_lock: 1887 UFS_LOCK(ump); 1888 fail: 1889 brelse(bp); 1890 return (0); 1891 } 1892 1893 static inline struct buf * 1894 getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags) 1895 { 1896 struct fs *fs; 1897 1898 fs = ip->i_fs; 1899 return (getblk(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, 1900 cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0, 1901 gbflags)); 1902 } 1903 1904 /* 1905 * Determine whether an inode can be allocated. 1906 * 1907 * Check to see if an inode is available, and if it is, 1908 * allocate it using the following policy: 1909 * 1) allocate the requested inode. 1910 * 2) allocate the next available inode after the requested 1911 * inode in the specified cylinder group. 1912 */ 1913 static ufs2_daddr_t 1914 ffs_nodealloccg(ip, cg, ipref, mode, unused) 1915 struct inode *ip; 1916 u_int cg; 1917 ufs2_daddr_t ipref; 1918 int mode; 1919 int unused; 1920 { 1921 struct fs *fs; 1922 struct cg *cgp; 1923 struct buf *bp, *ibp; 1924 struct ufsmount *ump; 1925 u_int8_t *inosused, *loc; 1926 struct ufs2_dinode *dp2; 1927 int error, start, len, i; 1928 u_int32_t old_initediblk; 1929 1930 fs = ip->i_fs; 1931 ump = ip->i_ump; 1932 check_nifree: 1933 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1934 return (0); 1935 UFS_UNLOCK(ump); 1936 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1937 (int)fs->fs_cgsize, NOCRED, &bp); 1938 if (error) { 1939 brelse(bp); 1940 UFS_LOCK(ump); 1941 return (0); 1942 } 1943 cgp = (struct cg *)bp->b_data; 1944 restart: 1945 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1946 brelse(bp); 1947 UFS_LOCK(ump); 1948 return (0); 1949 } 1950 bp->b_xflags |= BX_BKGRDWRITE; 1951 inosused = cg_inosused(cgp); 1952 if (ipref) { 1953 ipref %= fs->fs_ipg; 1954 if (isclr(inosused, ipref)) 1955 goto gotit; 1956 } 1957 start = cgp->cg_irotor / NBBY; 1958 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 1959 loc = memcchr(&inosused[start], 0xff, len); 1960 if (loc == NULL) { 1961 len = start + 1; 1962 start = 0; 1963 loc = memcchr(&inosused[start], 0xff, len); 1964 if (loc == NULL) { 1965 printf("cg = %d, irotor = %ld, fs = %s\n", 1966 cg, (long)cgp->cg_irotor, fs->fs_fsmnt); 1967 panic("ffs_nodealloccg: map corrupted"); 1968 /* NOTREACHED */ 1969 } 1970 } 1971 ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1; 1972 gotit: 1973 /* 1974 * Check to see if we need to initialize more inodes. 1975 */ 1976 if (fs->fs_magic == FS_UFS2_MAGIC && 1977 ipref + INOPB(fs) > cgp->cg_initediblk && 1978 cgp->cg_initediblk < cgp->cg_niblk) { 1979 old_initediblk = cgp->cg_initediblk; 1980 1981 /* 1982 * Free the cylinder group lock before writing the 1983 * initialized inode block. Entering the 1984 * babarrierwrite() with the cylinder group lock 1985 * causes lock order violation between the lock and 1986 * snaplk. 1987 * 1988 * Another thread can decide to initialize the same 1989 * inode block, but whichever thread first gets the 1990 * cylinder group lock after writing the newly 1991 * allocated inode block will update it and the other 1992 * will realize that it has lost and leave the 1993 * cylinder group unchanged. 1994 */ 1995 ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT); 1996 brelse(bp); 1997 if (ibp == NULL) { 1998 /* 1999 * The inode block buffer is already owned by 2000 * another thread, which must initialize it. 2001 * Wait on the buffer to allow another thread 2002 * to finish the updates, with dropped cg 2003 * buffer lock, then retry. 2004 */ 2005 ibp = getinobuf(ip, cg, old_initediblk, 0); 2006 brelse(ibp); 2007 UFS_LOCK(ump); 2008 goto check_nifree; 2009 } 2010 bzero(ibp->b_data, (int)fs->fs_bsize); 2011 dp2 = (struct ufs2_dinode *)(ibp->b_data); 2012 for (i = 0; i < INOPB(fs); i++) { 2013 dp2->di_gen = arc4random() / 2 + 1; 2014 dp2++; 2015 } 2016 /* 2017 * Rather than adding a soft updates dependency to ensure 2018 * that the new inode block is written before it is claimed 2019 * by the cylinder group map, we just do a barrier write 2020 * here. The barrier write will ensure that the inode block 2021 * gets written before the updated cylinder group map can be 2022 * written. The barrier write should only slow down bulk 2023 * loading of newly created filesystems. 2024 */ 2025 babarrierwrite(ibp); 2026 2027 /* 2028 * After the inode block is written, try to update the 2029 * cg initediblk pointer. If another thread beat us 2030 * to it, then leave it unchanged as the other thread 2031 * has already set it correctly. 2032 */ 2033 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 2034 (int)fs->fs_cgsize, NOCRED, &bp); 2035 UFS_LOCK(ump); 2036 ACTIVECLEAR(fs, cg); 2037 UFS_UNLOCK(ump); 2038 if (error != 0) { 2039 brelse(bp); 2040 return (error); 2041 } 2042 cgp = (struct cg *)bp->b_data; 2043 if (cgp->cg_initediblk == old_initediblk) 2044 cgp->cg_initediblk += INOPB(fs); 2045 goto restart; 2046 } 2047 cgp->cg_old_time = cgp->cg_time = time_second; 2048 cgp->cg_irotor = ipref; 2049 UFS_LOCK(ump); 2050 ACTIVECLEAR(fs, cg); 2051 setbit(inosused, ipref); 2052 cgp->cg_cs.cs_nifree--; 2053 fs->fs_cstotal.cs_nifree--; 2054 fs->fs_cs(fs, cg).cs_nifree--; 2055 fs->fs_fmod = 1; 2056 if ((mode & IFMT) == IFDIR) { 2057 cgp->cg_cs.cs_ndir++; 2058 fs->fs_cstotal.cs_ndir++; 2059 fs->fs_cs(fs, cg).cs_ndir++; 2060 } 2061 UFS_UNLOCK(ump); 2062 if (DOINGSOFTDEP(ITOV(ip))) 2063 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode); 2064 bdwrite(bp); 2065 return ((ino_t)(cg * fs->fs_ipg + ipref)); 2066 } 2067 2068 /* 2069 * Free a block or fragment. 2070 * 2071 * The specified block or fragment is placed back in the 2072 * free map. If a fragment is deallocated, a possible 2073 * block reassembly is checked. 2074 */ 2075 static void 2076 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd) 2077 struct ufsmount *ump; 2078 struct fs *fs; 2079 struct vnode *devvp; 2080 ufs2_daddr_t bno; 2081 long size; 2082 ino_t inum; 2083 struct workhead *dephd; 2084 { 2085 struct mount *mp; 2086 struct cg *cgp; 2087 struct buf *bp; 2088 ufs1_daddr_t fragno, cgbno; 2089 ufs2_daddr_t cgblkno; 2090 int i, blk, frags, bbase; 2091 u_int cg; 2092 u_int8_t *blksfree; 2093 struct cdev *dev; 2094 2095 cg = dtog(fs, bno); 2096 if (devvp->v_type == VREG) { 2097 /* devvp is a snapshot */ 2098 dev = VTOI(devvp)->i_devvp->v_rdev; 2099 cgblkno = fragstoblks(fs, cgtod(fs, cg)); 2100 } else { 2101 /* devvp is a normal disk device */ 2102 dev = devvp->v_rdev; 2103 cgblkno = fsbtodb(fs, cgtod(fs, cg)); 2104 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg"); 2105 } 2106 #ifdef INVARIANTS 2107 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 2108 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 2109 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n", 2110 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize, 2111 size, fs->fs_fsmnt); 2112 panic("ffs_blkfree_cg: bad size"); 2113 } 2114 #endif 2115 if ((u_int)bno >= fs->fs_size) { 2116 printf("bad block %jd, ino %lu\n", (intmax_t)bno, 2117 (u_long)inum); 2118 ffs_fserr(fs, inum, "bad block"); 2119 return; 2120 } 2121 if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) { 2122 brelse(bp); 2123 return; 2124 } 2125 cgp = (struct cg *)bp->b_data; 2126 if (!cg_chkmagic(cgp)) { 2127 brelse(bp); 2128 return; 2129 } 2130 bp->b_xflags |= BX_BKGRDWRITE; 2131 cgp->cg_old_time = cgp->cg_time = time_second; 2132 cgbno = dtogd(fs, bno); 2133 blksfree = cg_blksfree(cgp); 2134 UFS_LOCK(ump); 2135 if (size == fs->fs_bsize) { 2136 fragno = fragstoblks(fs, cgbno); 2137 if (!ffs_isfreeblock(fs, blksfree, fragno)) { 2138 if (devvp->v_type == VREG) { 2139 UFS_UNLOCK(ump); 2140 /* devvp is a snapshot */ 2141 brelse(bp); 2142 return; 2143 } 2144 printf("dev = %s, block = %jd, fs = %s\n", 2145 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt); 2146 panic("ffs_blkfree_cg: freeing free block"); 2147 } 2148 ffs_setblock(fs, blksfree, fragno); 2149 ffs_clusteracct(fs, cgp, fragno, 1); 2150 cgp->cg_cs.cs_nbfree++; 2151 fs->fs_cstotal.cs_nbfree++; 2152 fs->fs_cs(fs, cg).cs_nbfree++; 2153 } else { 2154 bbase = cgbno - fragnum(fs, cgbno); 2155 /* 2156 * decrement the counts associated with the old frags 2157 */ 2158 blk = blkmap(fs, blksfree, bbase); 2159 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 2160 /* 2161 * deallocate the fragment 2162 */ 2163 frags = numfrags(fs, size); 2164 for (i = 0; i < frags; i++) { 2165 if (isset(blksfree, cgbno + i)) { 2166 printf("dev = %s, block = %jd, fs = %s\n", 2167 devtoname(dev), (intmax_t)(bno + i), 2168 fs->fs_fsmnt); 2169 panic("ffs_blkfree_cg: freeing free frag"); 2170 } 2171 setbit(blksfree, cgbno + i); 2172 } 2173 cgp->cg_cs.cs_nffree += i; 2174 fs->fs_cstotal.cs_nffree += i; 2175 fs->fs_cs(fs, cg).cs_nffree += i; 2176 /* 2177 * add back in counts associated with the new frags 2178 */ 2179 blk = blkmap(fs, blksfree, bbase); 2180 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 2181 /* 2182 * if a complete block has been reassembled, account for it 2183 */ 2184 fragno = fragstoblks(fs, bbase); 2185 if (ffs_isblock(fs, blksfree, fragno)) { 2186 cgp->cg_cs.cs_nffree -= fs->fs_frag; 2187 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 2188 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 2189 ffs_clusteracct(fs, cgp, fragno, 1); 2190 cgp->cg_cs.cs_nbfree++; 2191 fs->fs_cstotal.cs_nbfree++; 2192 fs->fs_cs(fs, cg).cs_nbfree++; 2193 } 2194 } 2195 fs->fs_fmod = 1; 2196 ACTIVECLEAR(fs, cg); 2197 UFS_UNLOCK(ump); 2198 mp = UFSTOVFS(ump); 2199 if (MOUNTEDSOFTDEP(mp) && devvp->v_type != VREG) 2200 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno, 2201 numfrags(fs, size), dephd); 2202 bdwrite(bp); 2203 } 2204 2205 TASKQUEUE_DEFINE_THREAD(ffs_trim); 2206 2207 struct ffs_blkfree_trim_params { 2208 struct task task; 2209 struct ufsmount *ump; 2210 struct vnode *devvp; 2211 ufs2_daddr_t bno; 2212 long size; 2213 ino_t inum; 2214 struct workhead *pdephd; 2215 struct workhead dephd; 2216 }; 2217 2218 static void 2219 ffs_blkfree_trim_task(ctx, pending) 2220 void *ctx; 2221 int pending; 2222 { 2223 struct ffs_blkfree_trim_params *tp; 2224 2225 tp = ctx; 2226 ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size, 2227 tp->inum, tp->pdephd); 2228 vn_finished_secondary_write(UFSTOVFS(tp->ump)); 2229 free(tp, M_TEMP); 2230 } 2231 2232 static void 2233 ffs_blkfree_trim_completed(bip) 2234 struct bio *bip; 2235 { 2236 struct ffs_blkfree_trim_params *tp; 2237 2238 tp = bip->bio_caller2; 2239 g_destroy_bio(bip); 2240 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp); 2241 taskqueue_enqueue(taskqueue_ffs_trim, &tp->task); 2242 } 2243 2244 void 2245 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd) 2246 struct ufsmount *ump; 2247 struct fs *fs; 2248 struct vnode *devvp; 2249 ufs2_daddr_t bno; 2250 long size; 2251 ino_t inum; 2252 enum vtype vtype; 2253 struct workhead *dephd; 2254 { 2255 struct mount *mp; 2256 struct bio *bip; 2257 struct ffs_blkfree_trim_params *tp; 2258 2259 /* 2260 * Check to see if a snapshot wants to claim the block. 2261 * Check that devvp is a normal disk device, not a snapshot, 2262 * it has a snapshot(s) associated with it, and one of the 2263 * snapshots wants to claim the block. 2264 */ 2265 if (devvp->v_type != VREG && 2266 (devvp->v_vflag & VV_COPYONWRITE) && 2267 ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) { 2268 return; 2269 } 2270 /* 2271 * Nothing to delay if TRIM is disabled, or the operation is 2272 * performed on the snapshot. 2273 */ 2274 if (!ump->um_candelete || devvp->v_type == VREG) { 2275 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd); 2276 return; 2277 } 2278 2279 /* 2280 * Postpone the set of the free bit in the cg bitmap until the 2281 * BIO_DELETE is completed. Otherwise, due to disk queue 2282 * reordering, TRIM might be issued after we reuse the block 2283 * and write some new data into it. 2284 */ 2285 tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK); 2286 tp->ump = ump; 2287 tp->devvp = devvp; 2288 tp->bno = bno; 2289 tp->size = size; 2290 tp->inum = inum; 2291 if (dephd != NULL) { 2292 LIST_INIT(&tp->dephd); 2293 LIST_SWAP(dephd, &tp->dephd, worklist, wk_list); 2294 tp->pdephd = &tp->dephd; 2295 } else 2296 tp->pdephd = NULL; 2297 2298 bip = g_alloc_bio(); 2299 bip->bio_cmd = BIO_DELETE; 2300 bip->bio_offset = dbtob(fsbtodb(fs, bno)); 2301 bip->bio_done = ffs_blkfree_trim_completed; 2302 bip->bio_length = size; 2303 bip->bio_caller2 = tp; 2304 2305 mp = UFSTOVFS(ump); 2306 vn_start_secondary_write(NULL, &mp, 0); 2307 g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private); 2308 } 2309 2310 #ifdef INVARIANTS 2311 /* 2312 * Verify allocation of a block or fragment. Returns true if block or 2313 * fragment is allocated, false if it is free. 2314 */ 2315 static int 2316 ffs_checkblk(ip, bno, size) 2317 struct inode *ip; 2318 ufs2_daddr_t bno; 2319 long size; 2320 { 2321 struct fs *fs; 2322 struct cg *cgp; 2323 struct buf *bp; 2324 ufs1_daddr_t cgbno; 2325 int i, error, frags, free; 2326 u_int8_t *blksfree; 2327 2328 fs = ip->i_fs; 2329 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 2330 printf("bsize = %ld, size = %ld, fs = %s\n", 2331 (long)fs->fs_bsize, size, fs->fs_fsmnt); 2332 panic("ffs_checkblk: bad size"); 2333 } 2334 if ((u_int)bno >= fs->fs_size) 2335 panic("ffs_checkblk: bad block %jd", (intmax_t)bno); 2336 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), 2337 (int)fs->fs_cgsize, NOCRED, &bp); 2338 if (error) 2339 panic("ffs_checkblk: cg bread failed"); 2340 cgp = (struct cg *)bp->b_data; 2341 if (!cg_chkmagic(cgp)) 2342 panic("ffs_checkblk: cg magic mismatch"); 2343 bp->b_xflags |= BX_BKGRDWRITE; 2344 blksfree = cg_blksfree(cgp); 2345 cgbno = dtogd(fs, bno); 2346 if (size == fs->fs_bsize) { 2347 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno)); 2348 } else { 2349 frags = numfrags(fs, size); 2350 for (free = 0, i = 0; i < frags; i++) 2351 if (isset(blksfree, cgbno + i)) 2352 free++; 2353 if (free != 0 && free != frags) 2354 panic("ffs_checkblk: partially free fragment"); 2355 } 2356 brelse(bp); 2357 return (!free); 2358 } 2359 #endif /* INVARIANTS */ 2360 2361 /* 2362 * Free an inode. 2363 */ 2364 int 2365 ffs_vfree(pvp, ino, mode) 2366 struct vnode *pvp; 2367 ino_t ino; 2368 int mode; 2369 { 2370 struct inode *ip; 2371 2372 if (DOINGSOFTDEP(pvp)) { 2373 softdep_freefile(pvp, ino, mode); 2374 return (0); 2375 } 2376 ip = VTOI(pvp); 2377 return (ffs_freefile(ip->i_ump, ip->i_fs, ip->i_devvp, ino, mode, 2378 NULL)); 2379 } 2380 2381 /* 2382 * Do the actual free operation. 2383 * The specified inode is placed back in the free map. 2384 */ 2385 int 2386 ffs_freefile(ump, fs, devvp, ino, mode, wkhd) 2387 struct ufsmount *ump; 2388 struct fs *fs; 2389 struct vnode *devvp; 2390 ino_t ino; 2391 int mode; 2392 struct workhead *wkhd; 2393 { 2394 struct cg *cgp; 2395 struct buf *bp; 2396 ufs2_daddr_t cgbno; 2397 int error; 2398 u_int cg; 2399 u_int8_t *inosused; 2400 struct cdev *dev; 2401 2402 cg = ino_to_cg(fs, ino); 2403 if (devvp->v_type == VREG) { 2404 /* devvp is a snapshot */ 2405 dev = VTOI(devvp)->i_devvp->v_rdev; 2406 cgbno = fragstoblks(fs, cgtod(fs, cg)); 2407 } else { 2408 /* devvp is a normal disk device */ 2409 dev = devvp->v_rdev; 2410 cgbno = fsbtodb(fs, cgtod(fs, cg)); 2411 } 2412 if (ino >= fs->fs_ipg * fs->fs_ncg) 2413 panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s", 2414 devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt); 2415 if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) { 2416 brelse(bp); 2417 return (error); 2418 } 2419 cgp = (struct cg *)bp->b_data; 2420 if (!cg_chkmagic(cgp)) { 2421 brelse(bp); 2422 return (0); 2423 } 2424 bp->b_xflags |= BX_BKGRDWRITE; 2425 cgp->cg_old_time = cgp->cg_time = time_second; 2426 inosused = cg_inosused(cgp); 2427 ino %= fs->fs_ipg; 2428 if (isclr(inosused, ino)) { 2429 printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev), 2430 (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt); 2431 if (fs->fs_ronly == 0) 2432 panic("ffs_freefile: freeing free inode"); 2433 } 2434 clrbit(inosused, ino); 2435 if (ino < cgp->cg_irotor) 2436 cgp->cg_irotor = ino; 2437 cgp->cg_cs.cs_nifree++; 2438 UFS_LOCK(ump); 2439 fs->fs_cstotal.cs_nifree++; 2440 fs->fs_cs(fs, cg).cs_nifree++; 2441 if ((mode & IFMT) == IFDIR) { 2442 cgp->cg_cs.cs_ndir--; 2443 fs->fs_cstotal.cs_ndir--; 2444 fs->fs_cs(fs, cg).cs_ndir--; 2445 } 2446 fs->fs_fmod = 1; 2447 ACTIVECLEAR(fs, cg); 2448 UFS_UNLOCK(ump); 2449 if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type != VREG) 2450 softdep_setup_inofree(UFSTOVFS(ump), bp, 2451 ino + cg * fs->fs_ipg, wkhd); 2452 bdwrite(bp); 2453 return (0); 2454 } 2455 2456 /* 2457 * Check to see if a file is free. 2458 */ 2459 int 2460 ffs_checkfreefile(fs, devvp, ino) 2461 struct fs *fs; 2462 struct vnode *devvp; 2463 ino_t ino; 2464 { 2465 struct cg *cgp; 2466 struct buf *bp; 2467 ufs2_daddr_t cgbno; 2468 int ret; 2469 u_int cg; 2470 u_int8_t *inosused; 2471 2472 cg = ino_to_cg(fs, ino); 2473 if (devvp->v_type == VREG) { 2474 /* devvp is a snapshot */ 2475 cgbno = fragstoblks(fs, cgtod(fs, cg)); 2476 } else { 2477 /* devvp is a normal disk device */ 2478 cgbno = fsbtodb(fs, cgtod(fs, cg)); 2479 } 2480 if (ino >= fs->fs_ipg * fs->fs_ncg) 2481 return (1); 2482 if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) { 2483 brelse(bp); 2484 return (1); 2485 } 2486 cgp = (struct cg *)bp->b_data; 2487 if (!cg_chkmagic(cgp)) { 2488 brelse(bp); 2489 return (1); 2490 } 2491 inosused = cg_inosused(cgp); 2492 ino %= fs->fs_ipg; 2493 ret = isclr(inosused, ino); 2494 brelse(bp); 2495 return (ret); 2496 } 2497 2498 /* 2499 * Find a block of the specified size in the specified cylinder group. 2500 * 2501 * It is a panic if a request is made to find a block if none are 2502 * available. 2503 */ 2504 static ufs1_daddr_t 2505 ffs_mapsearch(fs, cgp, bpref, allocsiz) 2506 struct fs *fs; 2507 struct cg *cgp; 2508 ufs2_daddr_t bpref; 2509 int allocsiz; 2510 { 2511 ufs1_daddr_t bno; 2512 int start, len, loc, i; 2513 int blk, field, subfield, pos; 2514 u_int8_t *blksfree; 2515 2516 /* 2517 * find the fragment by searching through the free block 2518 * map for an appropriate bit pattern 2519 */ 2520 if (bpref) 2521 start = dtogd(fs, bpref) / NBBY; 2522 else 2523 start = cgp->cg_frotor / NBBY; 2524 blksfree = cg_blksfree(cgp); 2525 len = howmany(fs->fs_fpg, NBBY) - start; 2526 loc = scanc((u_int)len, (u_char *)&blksfree[start], 2527 fragtbl[fs->fs_frag], 2528 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 2529 if (loc == 0) { 2530 len = start + 1; 2531 start = 0; 2532 loc = scanc((u_int)len, (u_char *)&blksfree[0], 2533 fragtbl[fs->fs_frag], 2534 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 2535 if (loc == 0) { 2536 printf("start = %d, len = %d, fs = %s\n", 2537 start, len, fs->fs_fsmnt); 2538 panic("ffs_alloccg: map corrupted"); 2539 /* NOTREACHED */ 2540 } 2541 } 2542 bno = (start + len - loc) * NBBY; 2543 cgp->cg_frotor = bno; 2544 /* 2545 * found the byte in the map 2546 * sift through the bits to find the selected frag 2547 */ 2548 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 2549 blk = blkmap(fs, blksfree, bno); 2550 blk <<= 1; 2551 field = around[allocsiz]; 2552 subfield = inside[allocsiz]; 2553 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 2554 if ((blk & field) == subfield) 2555 return (bno + pos); 2556 field <<= 1; 2557 subfield <<= 1; 2558 } 2559 } 2560 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 2561 panic("ffs_alloccg: block not in map"); 2562 return (-1); 2563 } 2564 2565 /* 2566 * Fserr prints the name of a filesystem with an error diagnostic. 2567 * 2568 * The form of the error message is: 2569 * fs: error message 2570 */ 2571 void 2572 ffs_fserr(fs, inum, cp) 2573 struct fs *fs; 2574 ino_t inum; 2575 char *cp; 2576 { 2577 struct thread *td = curthread; /* XXX */ 2578 struct proc *p = td->td_proc; 2579 2580 log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n", 2581 p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum, 2582 fs->fs_fsmnt, cp); 2583 } 2584 2585 /* 2586 * This function provides the capability for the fsck program to 2587 * update an active filesystem. Fourteen operations are provided: 2588 * 2589 * adjrefcnt(inode, amt) - adjusts the reference count on the 2590 * specified inode by the specified amount. Under normal 2591 * operation the count should always go down. Decrementing 2592 * the count to zero will cause the inode to be freed. 2593 * adjblkcnt(inode, amt) - adjust the number of blocks used by the 2594 * inode by the specified amount. 2595 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) - 2596 * adjust the superblock summary. 2597 * freedirs(inode, count) - directory inodes [inode..inode + count - 1] 2598 * are marked as free. Inodes should never have to be marked 2599 * as in use. 2600 * freefiles(inode, count) - file inodes [inode..inode + count - 1] 2601 * are marked as free. Inodes should never have to be marked 2602 * as in use. 2603 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1] 2604 * are marked as free. Blocks should never have to be marked 2605 * as in use. 2606 * setflags(flags, set/clear) - the fs_flags field has the specified 2607 * flags set (second parameter +1) or cleared (second parameter -1). 2608 * setcwd(dirinode) - set the current directory to dirinode in the 2609 * filesystem associated with the snapshot. 2610 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".." 2611 * in the current directory is oldvalue then change it to newvalue. 2612 * unlink(nameptr, oldvalue) - Verify that the inode number associated 2613 * with nameptr in the current directory is oldvalue then unlink it. 2614 * 2615 * The following functions may only be used on a quiescent filesystem 2616 * by the soft updates journal. They are not safe to be run on an active 2617 * filesystem. 2618 * 2619 * setinode(inode, dip) - the specified disk inode is replaced with the 2620 * contents pointed to by dip. 2621 * setbufoutput(fd, flags) - output associated with the specified file 2622 * descriptor (which must reference the character device supporting 2623 * the filesystem) switches from using physio to running through the 2624 * buffer cache when flags is set to 1. The descriptor reverts to 2625 * physio for output when flags is set to zero. 2626 */ 2627 2628 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS); 2629 2630 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT, 2631 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count"); 2632 2633 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR, 2634 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count"); 2635 2636 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR, 2637 sysctl_ffs_fsck, "Adjust number of directories"); 2638 2639 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR, 2640 sysctl_ffs_fsck, "Adjust number of free blocks"); 2641 2642 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR, 2643 sysctl_ffs_fsck, "Adjust number of free inodes"); 2644 2645 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR, 2646 sysctl_ffs_fsck, "Adjust number of free frags"); 2647 2648 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR, 2649 sysctl_ffs_fsck, "Adjust number of free clusters"); 2650 2651 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR, 2652 sysctl_ffs_fsck, "Free Range of Directory Inodes"); 2653 2654 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR, 2655 sysctl_ffs_fsck, "Free Range of File Inodes"); 2656 2657 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR, 2658 sysctl_ffs_fsck, "Free Range of Blocks"); 2659 2660 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR, 2661 sysctl_ffs_fsck, "Change Filesystem Flags"); 2662 2663 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR, 2664 sysctl_ffs_fsck, "Set Current Working Directory"); 2665 2666 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR, 2667 sysctl_ffs_fsck, "Change Value of .. Entry"); 2668 2669 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR, 2670 sysctl_ffs_fsck, "Unlink a Duplicate Name"); 2671 2672 static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR, 2673 sysctl_ffs_fsck, "Update an On-Disk Inode"); 2674 2675 static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR, 2676 sysctl_ffs_fsck, "Set Buffered Writing for Descriptor"); 2677 2678 #define DEBUG 1 2679 #ifdef DEBUG 2680 static int fsckcmds = 0; 2681 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, ""); 2682 #endif /* DEBUG */ 2683 2684 static int buffered_write(struct file *, struct uio *, struct ucred *, 2685 int, struct thread *); 2686 2687 static int 2688 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS) 2689 { 2690 struct thread *td = curthread; 2691 struct fsck_cmd cmd; 2692 struct ufsmount *ump; 2693 struct vnode *vp, *vpold, *dvp, *fdvp; 2694 struct inode *ip, *dp; 2695 struct mount *mp; 2696 struct fs *fs; 2697 ufs2_daddr_t blkno; 2698 long blkcnt, blksize; 2699 struct filedesc *fdp; 2700 struct file *fp, *vfp; 2701 int filetype, error; 2702 static struct fileops *origops, bufferedops; 2703 2704 if (req->newlen > sizeof cmd) 2705 return (EBADRPC); 2706 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0) 2707 return (error); 2708 if (cmd.version != FFS_CMD_VERSION) 2709 return (ERPCMISMATCH); 2710 if ((error = getvnode(td->td_proc->p_fd, cmd.handle, CAP_FSCK, 2711 &fp)) != 0) 2712 return (error); 2713 vp = fp->f_data; 2714 if (vp->v_type != VREG && vp->v_type != VDIR) { 2715 fdrop(fp, td); 2716 return (EINVAL); 2717 } 2718 vn_start_write(vp, &mp, V_WAIT); 2719 if (mp == 0 || strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) { 2720 vn_finished_write(mp); 2721 fdrop(fp, td); 2722 return (EINVAL); 2723 } 2724 ump = VFSTOUFS(mp); 2725 if ((mp->mnt_flag & MNT_RDONLY) && 2726 ump->um_fsckpid != td->td_proc->p_pid) { 2727 vn_finished_write(mp); 2728 fdrop(fp, td); 2729 return (EROFS); 2730 } 2731 fs = ump->um_fs; 2732 filetype = IFREG; 2733 2734 switch (oidp->oid_number) { 2735 2736 case FFS_SET_FLAGS: 2737 #ifdef DEBUG 2738 if (fsckcmds) 2739 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname, 2740 cmd.size > 0 ? "set" : "clear"); 2741 #endif /* DEBUG */ 2742 if (cmd.size > 0) 2743 fs->fs_flags |= (long)cmd.value; 2744 else 2745 fs->fs_flags &= ~(long)cmd.value; 2746 break; 2747 2748 case FFS_ADJ_REFCNT: 2749 #ifdef DEBUG 2750 if (fsckcmds) { 2751 printf("%s: adjust inode %jd link count by %jd\n", 2752 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2753 (intmax_t)cmd.size); 2754 } 2755 #endif /* DEBUG */ 2756 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2757 break; 2758 ip = VTOI(vp); 2759 ip->i_nlink += cmd.size; 2760 DIP_SET(ip, i_nlink, ip->i_nlink); 2761 ip->i_effnlink += cmd.size; 2762 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 2763 error = ffs_update(vp, 1); 2764 if (DOINGSOFTDEP(vp)) 2765 softdep_change_linkcnt(ip); 2766 vput(vp); 2767 break; 2768 2769 case FFS_ADJ_BLKCNT: 2770 #ifdef DEBUG 2771 if (fsckcmds) { 2772 printf("%s: adjust inode %jd block count by %jd\n", 2773 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2774 (intmax_t)cmd.size); 2775 } 2776 #endif /* DEBUG */ 2777 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2778 break; 2779 ip = VTOI(vp); 2780 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size); 2781 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 2782 error = ffs_update(vp, 1); 2783 vput(vp); 2784 break; 2785 2786 case FFS_DIR_FREE: 2787 filetype = IFDIR; 2788 /* fall through */ 2789 2790 case FFS_FILE_FREE: 2791 #ifdef DEBUG 2792 if (fsckcmds) { 2793 if (cmd.size == 1) 2794 printf("%s: free %s inode %ju\n", 2795 mp->mnt_stat.f_mntonname, 2796 filetype == IFDIR ? "directory" : "file", 2797 (uintmax_t)cmd.value); 2798 else 2799 printf("%s: free %s inodes %ju-%ju\n", 2800 mp->mnt_stat.f_mntonname, 2801 filetype == IFDIR ? "directory" : "file", 2802 (uintmax_t)cmd.value, 2803 (uintmax_t)(cmd.value + cmd.size - 1)); 2804 } 2805 #endif /* DEBUG */ 2806 while (cmd.size > 0) { 2807 if ((error = ffs_freefile(ump, fs, ump->um_devvp, 2808 cmd.value, filetype, NULL))) 2809 break; 2810 cmd.size -= 1; 2811 cmd.value += 1; 2812 } 2813 break; 2814 2815 case FFS_BLK_FREE: 2816 #ifdef DEBUG 2817 if (fsckcmds) { 2818 if (cmd.size == 1) 2819 printf("%s: free block %jd\n", 2820 mp->mnt_stat.f_mntonname, 2821 (intmax_t)cmd.value); 2822 else 2823 printf("%s: free blocks %jd-%jd\n", 2824 mp->mnt_stat.f_mntonname, 2825 (intmax_t)cmd.value, 2826 (intmax_t)cmd.value + cmd.size - 1); 2827 } 2828 #endif /* DEBUG */ 2829 blkno = cmd.value; 2830 blkcnt = cmd.size; 2831 blksize = fs->fs_frag - (blkno % fs->fs_frag); 2832 while (blkcnt > 0) { 2833 if (blksize > blkcnt) 2834 blksize = blkcnt; 2835 ffs_blkfree(ump, fs, ump->um_devvp, blkno, 2836 blksize * fs->fs_fsize, ROOTINO, VDIR, NULL); 2837 blkno += blksize; 2838 blkcnt -= blksize; 2839 blksize = fs->fs_frag; 2840 } 2841 break; 2842 2843 /* 2844 * Adjust superblock summaries. fsck(8) is expected to 2845 * submit deltas when necessary. 2846 */ 2847 case FFS_ADJ_NDIR: 2848 #ifdef DEBUG 2849 if (fsckcmds) { 2850 printf("%s: adjust number of directories by %jd\n", 2851 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2852 } 2853 #endif /* DEBUG */ 2854 fs->fs_cstotal.cs_ndir += cmd.value; 2855 break; 2856 2857 case FFS_ADJ_NBFREE: 2858 #ifdef DEBUG 2859 if (fsckcmds) { 2860 printf("%s: adjust number of free blocks by %+jd\n", 2861 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2862 } 2863 #endif /* DEBUG */ 2864 fs->fs_cstotal.cs_nbfree += cmd.value; 2865 break; 2866 2867 case FFS_ADJ_NIFREE: 2868 #ifdef DEBUG 2869 if (fsckcmds) { 2870 printf("%s: adjust number of free inodes by %+jd\n", 2871 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2872 } 2873 #endif /* DEBUG */ 2874 fs->fs_cstotal.cs_nifree += cmd.value; 2875 break; 2876 2877 case FFS_ADJ_NFFREE: 2878 #ifdef DEBUG 2879 if (fsckcmds) { 2880 printf("%s: adjust number of free frags by %+jd\n", 2881 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2882 } 2883 #endif /* DEBUG */ 2884 fs->fs_cstotal.cs_nffree += cmd.value; 2885 break; 2886 2887 case FFS_ADJ_NUMCLUSTERS: 2888 #ifdef DEBUG 2889 if (fsckcmds) { 2890 printf("%s: adjust number of free clusters by %+jd\n", 2891 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2892 } 2893 #endif /* DEBUG */ 2894 fs->fs_cstotal.cs_numclusters += cmd.value; 2895 break; 2896 2897 case FFS_SET_CWD: 2898 #ifdef DEBUG 2899 if (fsckcmds) { 2900 printf("%s: set current directory to inode %jd\n", 2901 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2902 } 2903 #endif /* DEBUG */ 2904 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp))) 2905 break; 2906 AUDIT_ARG_VNODE1(vp); 2907 if ((error = change_dir(vp, td)) != 0) { 2908 vput(vp); 2909 break; 2910 } 2911 VOP_UNLOCK(vp, 0); 2912 fdp = td->td_proc->p_fd; 2913 FILEDESC_XLOCK(fdp); 2914 vpold = fdp->fd_cdir; 2915 fdp->fd_cdir = vp; 2916 FILEDESC_XUNLOCK(fdp); 2917 vrele(vpold); 2918 break; 2919 2920 case FFS_SET_DOTDOT: 2921 #ifdef DEBUG 2922 if (fsckcmds) { 2923 printf("%s: change .. in cwd from %jd to %jd\n", 2924 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2925 (intmax_t)cmd.size); 2926 } 2927 #endif /* DEBUG */ 2928 /* 2929 * First we have to get and lock the parent directory 2930 * to which ".." points. 2931 */ 2932 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp); 2933 if (error) 2934 break; 2935 /* 2936 * Now we get and lock the child directory containing "..". 2937 */ 2938 FILEDESC_SLOCK(td->td_proc->p_fd); 2939 dvp = td->td_proc->p_fd->fd_cdir; 2940 FILEDESC_SUNLOCK(td->td_proc->p_fd); 2941 if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) { 2942 vput(fdvp); 2943 break; 2944 } 2945 dp = VTOI(dvp); 2946 dp->i_offset = 12; /* XXX mastertemplate.dot_reclen */ 2947 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size, 2948 DT_DIR, 0); 2949 cache_purge(fdvp); 2950 cache_purge(dvp); 2951 vput(dvp); 2952 vput(fdvp); 2953 break; 2954 2955 case FFS_UNLINK: 2956 #ifdef DEBUG 2957 if (fsckcmds) { 2958 char buf[32]; 2959 2960 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL)) 2961 strncpy(buf, "Name_too_long", 32); 2962 printf("%s: unlink %s (inode %jd)\n", 2963 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size); 2964 } 2965 #endif /* DEBUG */ 2966 /* 2967 * kern_unlinkat will do its own start/finish writes and 2968 * they do not nest, so drop ours here. Setting mp == NULL 2969 * indicates that vn_finished_write is not needed down below. 2970 */ 2971 vn_finished_write(mp); 2972 mp = NULL; 2973 error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value, 2974 UIO_USERSPACE, (ino_t)cmd.size); 2975 break; 2976 2977 case FFS_SET_INODE: 2978 if (ump->um_fsckpid != td->td_proc->p_pid) { 2979 error = EPERM; 2980 break; 2981 } 2982 #ifdef DEBUG 2983 if (fsckcmds) { 2984 printf("%s: update inode %jd\n", 2985 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2986 } 2987 #endif /* DEBUG */ 2988 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2989 break; 2990 AUDIT_ARG_VNODE1(vp); 2991 ip = VTOI(vp); 2992 if (ip->i_ump->um_fstype == UFS1) 2993 error = copyin((void *)(intptr_t)cmd.size, ip->i_din1, 2994 sizeof(struct ufs1_dinode)); 2995 else 2996 error = copyin((void *)(intptr_t)cmd.size, ip->i_din2, 2997 sizeof(struct ufs2_dinode)); 2998 if (error) { 2999 vput(vp); 3000 break; 3001 } 3002 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 3003 error = ffs_update(vp, 1); 3004 vput(vp); 3005 break; 3006 3007 case FFS_SET_BUFOUTPUT: 3008 if (ump->um_fsckpid != td->td_proc->p_pid) { 3009 error = EPERM; 3010 break; 3011 } 3012 if (VTOI(vp)->i_ump != ump) { 3013 error = EINVAL; 3014 break; 3015 } 3016 #ifdef DEBUG 3017 if (fsckcmds) { 3018 printf("%s: %s buffered output for descriptor %jd\n", 3019 mp->mnt_stat.f_mntonname, 3020 cmd.size == 1 ? "enable" : "disable", 3021 (intmax_t)cmd.value); 3022 } 3023 #endif /* DEBUG */ 3024 if ((error = getvnode(td->td_proc->p_fd, cmd.value, 3025 CAP_FSCK, &vfp)) != 0) 3026 break; 3027 if (vfp->f_vnode->v_type != VCHR) { 3028 fdrop(vfp, td); 3029 error = EINVAL; 3030 break; 3031 } 3032 if (origops == NULL) { 3033 origops = vfp->f_ops; 3034 bcopy((void *)origops, (void *)&bufferedops, 3035 sizeof(bufferedops)); 3036 bufferedops.fo_write = buffered_write; 3037 } 3038 if (cmd.size == 1) 3039 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops, 3040 (uintptr_t)&bufferedops); 3041 else 3042 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops, 3043 (uintptr_t)origops); 3044 fdrop(vfp, td); 3045 break; 3046 3047 default: 3048 #ifdef DEBUG 3049 if (fsckcmds) { 3050 printf("Invalid request %d from fsck\n", 3051 oidp->oid_number); 3052 } 3053 #endif /* DEBUG */ 3054 error = EINVAL; 3055 break; 3056 3057 } 3058 fdrop(fp, td); 3059 vn_finished_write(mp); 3060 return (error); 3061 } 3062 3063 /* 3064 * Function to switch a descriptor to use the buffer cache to stage 3065 * its I/O. This is needed so that writes to the filesystem device 3066 * will give snapshots a chance to copy modified blocks for which it 3067 * needs to retain copies. 3068 */ 3069 static int 3070 buffered_write(fp, uio, active_cred, flags, td) 3071 struct file *fp; 3072 struct uio *uio; 3073 struct ucred *active_cred; 3074 int flags; 3075 struct thread *td; 3076 { 3077 struct vnode *devvp, *vp; 3078 struct inode *ip; 3079 struct buf *bp; 3080 struct fs *fs; 3081 struct filedesc *fdp; 3082 int error; 3083 daddr_t lbn; 3084 3085 /* 3086 * The devvp is associated with the /dev filesystem. To discover 3087 * the filesystem with which the device is associated, we depend 3088 * on the application setting the current directory to a location 3089 * within the filesystem being written. Yes, this is an ugly hack. 3090 */ 3091 devvp = fp->f_vnode; 3092 if (!vn_isdisk(devvp, NULL)) 3093 return (EINVAL); 3094 fdp = td->td_proc->p_fd; 3095 FILEDESC_SLOCK(fdp); 3096 vp = fdp->fd_cdir; 3097 vref(vp); 3098 FILEDESC_SUNLOCK(fdp); 3099 vn_lock(vp, LK_SHARED | LK_RETRY); 3100 /* 3101 * Check that the current directory vnode indeed belongs to 3102 * UFS before trying to dereference UFS-specific v_data fields. 3103 */ 3104 if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) { 3105 vput(vp); 3106 return (EINVAL); 3107 } 3108 ip = VTOI(vp); 3109 if (ip->i_devvp != devvp) { 3110 vput(vp); 3111 return (EINVAL); 3112 } 3113 fs = ip->i_fs; 3114 vput(vp); 3115 foffset_lock_uio(fp, uio, flags); 3116 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 3117 #ifdef DEBUG 3118 if (fsckcmds) { 3119 printf("%s: buffered write for block %jd\n", 3120 fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset)); 3121 } 3122 #endif /* DEBUG */ 3123 /* 3124 * All I/O must be contained within a filesystem block, start on 3125 * a fragment boundary, and be a multiple of fragments in length. 3126 */ 3127 if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) || 3128 fragoff(fs, uio->uio_offset) != 0 || 3129 fragoff(fs, uio->uio_resid) != 0) { 3130 error = EINVAL; 3131 goto out; 3132 } 3133 lbn = numfrags(fs, uio->uio_offset); 3134 bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0); 3135 bp->b_flags |= B_RELBUF; 3136 if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) { 3137 brelse(bp); 3138 goto out; 3139 } 3140 error = bwrite(bp); 3141 out: 3142 VOP_UNLOCK(devvp, 0); 3143 foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF); 3144 return (error); 3145 } 3146