1 /*- 2 * Copyright (c) 2002 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Copyright (c) 1982, 1986, 1989, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 4. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 60 */ 61 62 #include <sys/cdefs.h> 63 __FBSDID("$FreeBSD$"); 64 65 #include "opt_quota.h" 66 67 #include <sys/param.h> 68 #include <sys/capability.h> 69 #include <sys/systm.h> 70 #include <sys/bio.h> 71 #include <sys/buf.h> 72 #include <sys/conf.h> 73 #include <sys/fcntl.h> 74 #include <sys/file.h> 75 #include <sys/filedesc.h> 76 #include <sys/priv.h> 77 #include <sys/proc.h> 78 #include <sys/vnode.h> 79 #include <sys/mount.h> 80 #include <sys/kernel.h> 81 #include <sys/syscallsubr.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/taskqueue.h> 85 86 #include <security/audit/audit.h> 87 88 #include <geom/geom.h> 89 90 #include <ufs/ufs/dir.h> 91 #include <ufs/ufs/extattr.h> 92 #include <ufs/ufs/quota.h> 93 #include <ufs/ufs/inode.h> 94 #include <ufs/ufs/ufs_extern.h> 95 #include <ufs/ufs/ufsmount.h> 96 97 #include <ufs/ffs/fs.h> 98 #include <ufs/ffs/ffs_extern.h> 99 #include <ufs/ffs/softdep.h> 100 101 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref, 102 int size, int rsize); 103 104 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int); 105 static ufs2_daddr_t 106 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int); 107 static void ffs_blkfree_cg(struct ufsmount *, struct fs *, 108 struct vnode *, ufs2_daddr_t, long, ino_t, 109 struct workhead *); 110 static void ffs_blkfree_trim_completed(struct bio *); 111 static void ffs_blkfree_trim_task(void *ctx, int pending __unused); 112 #ifdef INVARIANTS 113 static int ffs_checkblk(struct inode *, ufs2_daddr_t, long); 114 #endif 115 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int, 116 int); 117 static ino_t ffs_dirpref(struct inode *); 118 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t, 119 int, int); 120 static ufs2_daddr_t ffs_hashalloc 121 (struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *); 122 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int, 123 int); 124 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int); 125 static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *); 126 static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *); 127 128 /* 129 * Allocate a block in the filesystem. 130 * 131 * The size of the requested block is given, which must be some 132 * multiple of fs_fsize and <= fs_bsize. 133 * A preference may be optionally specified. If a preference is given 134 * the following hierarchy is used to allocate a block: 135 * 1) allocate the requested block. 136 * 2) allocate a rotationally optimal block in the same cylinder. 137 * 3) allocate a block in the same cylinder group. 138 * 4) quadradically rehash into other cylinder groups, until an 139 * available block is located. 140 * If no block preference is given the following hierarchy is used 141 * to allocate a block: 142 * 1) allocate a block in the cylinder group that contains the 143 * inode for the file. 144 * 2) quadradically rehash into other cylinder groups, until an 145 * available block is located. 146 */ 147 int 148 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp) 149 struct inode *ip; 150 ufs2_daddr_t lbn, bpref; 151 int size, flags; 152 struct ucred *cred; 153 ufs2_daddr_t *bnp; 154 { 155 struct fs *fs; 156 struct ufsmount *ump; 157 ufs2_daddr_t bno; 158 u_int cg, reclaimed; 159 static struct timeval lastfail; 160 static int curfail; 161 int64_t delta; 162 #ifdef QUOTA 163 int error; 164 #endif 165 166 *bnp = 0; 167 fs = ip->i_fs; 168 ump = ip->i_ump; 169 mtx_assert(UFS_MTX(ump), MA_OWNED); 170 #ifdef INVARIANTS 171 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 172 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n", 173 devtoname(ip->i_dev), (long)fs->fs_bsize, size, 174 fs->fs_fsmnt); 175 panic("ffs_alloc: bad size"); 176 } 177 if (cred == NOCRED) 178 panic("ffs_alloc: missing credential"); 179 #endif /* INVARIANTS */ 180 reclaimed = 0; 181 retry: 182 #ifdef QUOTA 183 UFS_UNLOCK(ump); 184 error = chkdq(ip, btodb(size), cred, 0); 185 if (error) 186 return (error); 187 UFS_LOCK(ump); 188 #endif 189 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 190 goto nospace; 191 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) && 192 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 193 goto nospace; 194 if (bpref >= fs->fs_size) 195 bpref = 0; 196 if (bpref == 0) 197 cg = ino_to_cg(fs, ip->i_number); 198 else 199 cg = dtog(fs, bpref); 200 bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg); 201 if (bno > 0) { 202 delta = btodb(size); 203 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 204 if (flags & IO_EXT) 205 ip->i_flag |= IN_CHANGE; 206 else 207 ip->i_flag |= IN_CHANGE | IN_UPDATE; 208 *bnp = bno; 209 return (0); 210 } 211 nospace: 212 #ifdef QUOTA 213 UFS_UNLOCK(ump); 214 /* 215 * Restore user's disk quota because allocation failed. 216 */ 217 (void) chkdq(ip, -btodb(size), cred, FORCE); 218 UFS_LOCK(ump); 219 #endif 220 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) { 221 reclaimed = 1; 222 softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT); 223 goto retry; 224 } 225 UFS_UNLOCK(ump); 226 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) { 227 ffs_fserr(fs, ip->i_number, "filesystem full"); 228 uprintf("\n%s: write failed, filesystem is full\n", 229 fs->fs_fsmnt); 230 } 231 return (ENOSPC); 232 } 233 234 /* 235 * Reallocate a fragment to a bigger size 236 * 237 * The number and size of the old block is given, and a preference 238 * and new size is also specified. The allocator attempts to extend 239 * the original block. Failing that, the regular block allocator is 240 * invoked to get an appropriate block. 241 */ 242 int 243 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp) 244 struct inode *ip; 245 ufs2_daddr_t lbprev; 246 ufs2_daddr_t bprev; 247 ufs2_daddr_t bpref; 248 int osize, nsize, flags; 249 struct ucred *cred; 250 struct buf **bpp; 251 { 252 struct vnode *vp; 253 struct fs *fs; 254 struct buf *bp; 255 struct ufsmount *ump; 256 u_int cg, request, reclaimed; 257 int error; 258 ufs2_daddr_t bno; 259 static struct timeval lastfail; 260 static int curfail; 261 int64_t delta; 262 263 *bpp = 0; 264 vp = ITOV(ip); 265 fs = ip->i_fs; 266 bp = NULL; 267 ump = ip->i_ump; 268 mtx_assert(UFS_MTX(ump), MA_OWNED); 269 #ifdef INVARIANTS 270 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 271 panic("ffs_realloccg: allocation on suspended filesystem"); 272 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 273 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 274 printf( 275 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 276 devtoname(ip->i_dev), (long)fs->fs_bsize, osize, 277 nsize, fs->fs_fsmnt); 278 panic("ffs_realloccg: bad size"); 279 } 280 if (cred == NOCRED) 281 panic("ffs_realloccg: missing credential"); 282 #endif /* INVARIANTS */ 283 reclaimed = 0; 284 retry: 285 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) && 286 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) { 287 goto nospace; 288 } 289 if (bprev == 0) { 290 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n", 291 devtoname(ip->i_dev), (long)fs->fs_bsize, (intmax_t)bprev, 292 fs->fs_fsmnt); 293 panic("ffs_realloccg: bad bprev"); 294 } 295 UFS_UNLOCK(ump); 296 /* 297 * Allocate the extra space in the buffer. 298 */ 299 error = bread(vp, lbprev, osize, NOCRED, &bp); 300 if (error) { 301 brelse(bp); 302 return (error); 303 } 304 305 if (bp->b_blkno == bp->b_lblkno) { 306 if (lbprev >= NDADDR) 307 panic("ffs_realloccg: lbprev out of range"); 308 bp->b_blkno = fsbtodb(fs, bprev); 309 } 310 311 #ifdef QUOTA 312 error = chkdq(ip, btodb(nsize - osize), cred, 0); 313 if (error) { 314 brelse(bp); 315 return (error); 316 } 317 #endif 318 /* 319 * Check for extension in the existing location. 320 */ 321 cg = dtog(fs, bprev); 322 UFS_LOCK(ump); 323 bno = ffs_fragextend(ip, cg, bprev, osize, nsize); 324 if (bno) { 325 if (bp->b_blkno != fsbtodb(fs, bno)) 326 panic("ffs_realloccg: bad blockno"); 327 delta = btodb(nsize - osize); 328 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 329 if (flags & IO_EXT) 330 ip->i_flag |= IN_CHANGE; 331 else 332 ip->i_flag |= IN_CHANGE | IN_UPDATE; 333 allocbuf(bp, nsize); 334 bp->b_flags |= B_DONE; 335 bzero(bp->b_data + osize, nsize - osize); 336 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) 337 vfs_bio_set_valid(bp, osize, nsize - osize); 338 *bpp = bp; 339 return (0); 340 } 341 /* 342 * Allocate a new disk location. 343 */ 344 if (bpref >= fs->fs_size) 345 bpref = 0; 346 switch ((int)fs->fs_optim) { 347 case FS_OPTSPACE: 348 /* 349 * Allocate an exact sized fragment. Although this makes 350 * best use of space, we will waste time relocating it if 351 * the file continues to grow. If the fragmentation is 352 * less than half of the minimum free reserve, we choose 353 * to begin optimizing for time. 354 */ 355 request = nsize; 356 if (fs->fs_minfree <= 5 || 357 fs->fs_cstotal.cs_nffree > 358 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100)) 359 break; 360 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 361 fs->fs_fsmnt); 362 fs->fs_optim = FS_OPTTIME; 363 break; 364 case FS_OPTTIME: 365 /* 366 * At this point we have discovered a file that is trying to 367 * grow a small fragment to a larger fragment. To save time, 368 * we allocate a full sized block, then free the unused portion. 369 * If the file continues to grow, the `ffs_fragextend' call 370 * above will be able to grow it in place without further 371 * copying. If aberrant programs cause disk fragmentation to 372 * grow within 2% of the free reserve, we choose to begin 373 * optimizing for space. 374 */ 375 request = fs->fs_bsize; 376 if (fs->fs_cstotal.cs_nffree < 377 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100) 378 break; 379 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 380 fs->fs_fsmnt); 381 fs->fs_optim = FS_OPTSPACE; 382 break; 383 default: 384 printf("dev = %s, optim = %ld, fs = %s\n", 385 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt); 386 panic("ffs_realloccg: bad optim"); 387 /* NOTREACHED */ 388 } 389 bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg); 390 if (bno > 0) { 391 bp->b_blkno = fsbtodb(fs, bno); 392 if (!DOINGSOFTDEP(vp)) 393 ffs_blkfree(ump, fs, ip->i_devvp, bprev, (long)osize, 394 ip->i_number, vp->v_type, NULL); 395 delta = btodb(nsize - osize); 396 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 397 if (flags & IO_EXT) 398 ip->i_flag |= IN_CHANGE; 399 else 400 ip->i_flag |= IN_CHANGE | IN_UPDATE; 401 allocbuf(bp, nsize); 402 bp->b_flags |= B_DONE; 403 bzero(bp->b_data + osize, nsize - osize); 404 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) 405 vfs_bio_set_valid(bp, osize, nsize - osize); 406 *bpp = bp; 407 return (0); 408 } 409 #ifdef QUOTA 410 UFS_UNLOCK(ump); 411 /* 412 * Restore user's disk quota because allocation failed. 413 */ 414 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE); 415 UFS_LOCK(ump); 416 #endif 417 nospace: 418 /* 419 * no space available 420 */ 421 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) { 422 reclaimed = 1; 423 UFS_UNLOCK(ump); 424 if (bp) { 425 brelse(bp); 426 bp = NULL; 427 } 428 UFS_LOCK(ump); 429 softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT); 430 goto retry; 431 } 432 UFS_UNLOCK(ump); 433 if (bp) 434 brelse(bp); 435 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) { 436 ffs_fserr(fs, ip->i_number, "filesystem full"); 437 uprintf("\n%s: write failed, filesystem is full\n", 438 fs->fs_fsmnt); 439 } 440 return (ENOSPC); 441 } 442 443 /* 444 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 445 * 446 * The vnode and an array of buffer pointers for a range of sequential 447 * logical blocks to be made contiguous is given. The allocator attempts 448 * to find a range of sequential blocks starting as close as possible 449 * from the end of the allocation for the logical block immediately 450 * preceding the current range. If successful, the physical block numbers 451 * in the buffer pointers and in the inode are changed to reflect the new 452 * allocation. If unsuccessful, the allocation is left unchanged. The 453 * success in doing the reallocation is returned. Note that the error 454 * return is not reflected back to the user. Rather the previous block 455 * allocation will be used. 456 */ 457 458 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 459 460 static int doasyncfree = 1; 461 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, ""); 462 463 static int doreallocblks = 1; 464 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 465 466 #ifdef DEBUG 467 static volatile int prtrealloc = 0; 468 #endif 469 470 int 471 ffs_reallocblks(ap) 472 struct vop_reallocblks_args /* { 473 struct vnode *a_vp; 474 struct cluster_save *a_buflist; 475 } */ *ap; 476 { 477 478 if (doreallocblks == 0) 479 return (ENOSPC); 480 /* 481 * We can't wait in softdep prealloc as it may fsync and recurse 482 * here. Instead we simply fail to reallocate blocks if this 483 * rare condition arises. 484 */ 485 if (DOINGSOFTDEP(ap->a_vp)) 486 if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0) 487 return (ENOSPC); 488 if (VTOI(ap->a_vp)->i_ump->um_fstype == UFS1) 489 return (ffs_reallocblks_ufs1(ap)); 490 return (ffs_reallocblks_ufs2(ap)); 491 } 492 493 static int 494 ffs_reallocblks_ufs1(ap) 495 struct vop_reallocblks_args /* { 496 struct vnode *a_vp; 497 struct cluster_save *a_buflist; 498 } */ *ap; 499 { 500 struct fs *fs; 501 struct inode *ip; 502 struct vnode *vp; 503 struct buf *sbp, *ebp; 504 ufs1_daddr_t *bap, *sbap, *ebap = 0; 505 struct cluster_save *buflist; 506 struct ufsmount *ump; 507 ufs_lbn_t start_lbn, end_lbn; 508 ufs1_daddr_t soff, newblk, blkno; 509 ufs2_daddr_t pref; 510 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 511 int i, len, start_lvl, end_lvl, ssize; 512 513 vp = ap->a_vp; 514 ip = VTOI(vp); 515 fs = ip->i_fs; 516 ump = ip->i_ump; 517 if (fs->fs_contigsumsize <= 0) 518 return (ENOSPC); 519 buflist = ap->a_buflist; 520 len = buflist->bs_nchildren; 521 start_lbn = buflist->bs_children[0]->b_lblkno; 522 end_lbn = start_lbn + len - 1; 523 #ifdef INVARIANTS 524 for (i = 0; i < len; i++) 525 if (!ffs_checkblk(ip, 526 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 527 panic("ffs_reallocblks: unallocated block 1"); 528 for (i = 1; i < len; i++) 529 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 530 panic("ffs_reallocblks: non-logical cluster"); 531 blkno = buflist->bs_children[0]->b_blkno; 532 ssize = fsbtodb(fs, fs->fs_frag); 533 for (i = 1; i < len - 1; i++) 534 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 535 panic("ffs_reallocblks: non-physical cluster %d", i); 536 #endif 537 /* 538 * If the cluster crosses the boundary for the first indirect 539 * block, leave space for the indirect block. Indirect blocks 540 * are initially laid out in a position after the last direct 541 * block. Block reallocation would usually destroy locality by 542 * moving the indirect block out of the way to make room for 543 * data blocks if we didn't compensate here. We should also do 544 * this for other indirect block boundaries, but it is only 545 * important for the first one. 546 */ 547 if (start_lbn < NDADDR && end_lbn >= NDADDR) 548 return (ENOSPC); 549 /* 550 * If the latest allocation is in a new cylinder group, assume that 551 * the filesystem has decided to move and do not force it back to 552 * the previous cylinder group. 553 */ 554 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 555 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 556 return (ENOSPC); 557 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 558 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 559 return (ENOSPC); 560 /* 561 * Get the starting offset and block map for the first block. 562 */ 563 if (start_lvl == 0) { 564 sbap = &ip->i_din1->di_db[0]; 565 soff = start_lbn; 566 } else { 567 idp = &start_ap[start_lvl - 1]; 568 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 569 brelse(sbp); 570 return (ENOSPC); 571 } 572 sbap = (ufs1_daddr_t *)sbp->b_data; 573 soff = idp->in_off; 574 } 575 /* 576 * If the block range spans two block maps, get the second map. 577 */ 578 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 579 ssize = len; 580 } else { 581 #ifdef INVARIANTS 582 if (start_lvl > 0 && 583 start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 584 panic("ffs_reallocblk: start == end"); 585 #endif 586 ssize = len - (idp->in_off + 1); 587 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 588 goto fail; 589 ebap = (ufs1_daddr_t *)ebp->b_data; 590 } 591 /* 592 * Find the preferred location for the cluster. 593 */ 594 UFS_LOCK(ump); 595 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap); 596 /* 597 * Search the block map looking for an allocation of the desired size. 598 */ 599 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref, 600 len, len, ffs_clusteralloc)) == 0) { 601 UFS_UNLOCK(ump); 602 goto fail; 603 } 604 /* 605 * We have found a new contiguous block. 606 * 607 * First we have to replace the old block pointers with the new 608 * block pointers in the inode and indirect blocks associated 609 * with the file. 610 */ 611 #ifdef DEBUG 612 if (prtrealloc) 613 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", 614 (uintmax_t)ip->i_number, 615 (intmax_t)start_lbn, (intmax_t)end_lbn); 616 #endif 617 blkno = newblk; 618 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 619 if (i == ssize) { 620 bap = ebap; 621 soff = -i; 622 } 623 #ifdef INVARIANTS 624 if (!ffs_checkblk(ip, 625 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 626 panic("ffs_reallocblks: unallocated block 2"); 627 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 628 panic("ffs_reallocblks: alloc mismatch"); 629 #endif 630 #ifdef DEBUG 631 if (prtrealloc) 632 printf(" %d,", *bap); 633 #endif 634 if (DOINGSOFTDEP(vp)) { 635 if (sbap == &ip->i_din1->di_db[0] && i < ssize) 636 softdep_setup_allocdirect(ip, start_lbn + i, 637 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 638 buflist->bs_children[i]); 639 else 640 softdep_setup_allocindir_page(ip, start_lbn + i, 641 i < ssize ? sbp : ebp, soff + i, blkno, 642 *bap, buflist->bs_children[i]); 643 } 644 *bap++ = blkno; 645 } 646 /* 647 * Next we must write out the modified inode and indirect blocks. 648 * For strict correctness, the writes should be synchronous since 649 * the old block values may have been written to disk. In practise 650 * they are almost never written, but if we are concerned about 651 * strict correctness, the `doasyncfree' flag should be set to zero. 652 * 653 * The test on `doasyncfree' should be changed to test a flag 654 * that shows whether the associated buffers and inodes have 655 * been written. The flag should be set when the cluster is 656 * started and cleared whenever the buffer or inode is flushed. 657 * We can then check below to see if it is set, and do the 658 * synchronous write only when it has been cleared. 659 */ 660 if (sbap != &ip->i_din1->di_db[0]) { 661 if (doasyncfree) 662 bdwrite(sbp); 663 else 664 bwrite(sbp); 665 } else { 666 ip->i_flag |= IN_CHANGE | IN_UPDATE; 667 if (!doasyncfree) 668 ffs_update(vp, 1); 669 } 670 if (ssize < len) { 671 if (doasyncfree) 672 bdwrite(ebp); 673 else 674 bwrite(ebp); 675 } 676 /* 677 * Last, free the old blocks and assign the new blocks to the buffers. 678 */ 679 #ifdef DEBUG 680 if (prtrealloc) 681 printf("\n\tnew:"); 682 #endif 683 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 684 if (!DOINGSOFTDEP(vp)) 685 ffs_blkfree(ump, fs, ip->i_devvp, 686 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 687 fs->fs_bsize, ip->i_number, vp->v_type, NULL); 688 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 689 #ifdef INVARIANTS 690 if (!ffs_checkblk(ip, 691 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 692 panic("ffs_reallocblks: unallocated block 3"); 693 #endif 694 #ifdef DEBUG 695 if (prtrealloc) 696 printf(" %d,", blkno); 697 #endif 698 } 699 #ifdef DEBUG 700 if (prtrealloc) { 701 prtrealloc--; 702 printf("\n"); 703 } 704 #endif 705 return (0); 706 707 fail: 708 if (ssize < len) 709 brelse(ebp); 710 if (sbap != &ip->i_din1->di_db[0]) 711 brelse(sbp); 712 return (ENOSPC); 713 } 714 715 static int 716 ffs_reallocblks_ufs2(ap) 717 struct vop_reallocblks_args /* { 718 struct vnode *a_vp; 719 struct cluster_save *a_buflist; 720 } */ *ap; 721 { 722 struct fs *fs; 723 struct inode *ip; 724 struct vnode *vp; 725 struct buf *sbp, *ebp; 726 ufs2_daddr_t *bap, *sbap, *ebap = 0; 727 struct cluster_save *buflist; 728 struct ufsmount *ump; 729 ufs_lbn_t start_lbn, end_lbn; 730 ufs2_daddr_t soff, newblk, blkno, pref; 731 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 732 int i, len, start_lvl, end_lvl, ssize; 733 734 vp = ap->a_vp; 735 ip = VTOI(vp); 736 fs = ip->i_fs; 737 ump = ip->i_ump; 738 if (fs->fs_contigsumsize <= 0) 739 return (ENOSPC); 740 buflist = ap->a_buflist; 741 len = buflist->bs_nchildren; 742 start_lbn = buflist->bs_children[0]->b_lblkno; 743 end_lbn = start_lbn + len - 1; 744 #ifdef INVARIANTS 745 for (i = 0; i < len; i++) 746 if (!ffs_checkblk(ip, 747 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 748 panic("ffs_reallocblks: unallocated block 1"); 749 for (i = 1; i < len; i++) 750 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 751 panic("ffs_reallocblks: non-logical cluster"); 752 blkno = buflist->bs_children[0]->b_blkno; 753 ssize = fsbtodb(fs, fs->fs_frag); 754 for (i = 1; i < len - 1; i++) 755 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 756 panic("ffs_reallocblks: non-physical cluster %d", i); 757 #endif 758 /* 759 * If the cluster crosses the boundary for the first indirect 760 * block, do not move anything in it. Indirect blocks are 761 * usually initially laid out in a position between the data 762 * blocks. Block reallocation would usually destroy locality by 763 * moving the indirect block out of the way to make room for 764 * data blocks if we didn't compensate here. We should also do 765 * this for other indirect block boundaries, but it is only 766 * important for the first one. 767 */ 768 if (start_lbn < NDADDR && end_lbn >= NDADDR) 769 return (ENOSPC); 770 /* 771 * If the latest allocation is in a new cylinder group, assume that 772 * the filesystem has decided to move and do not force it back to 773 * the previous cylinder group. 774 */ 775 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 776 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 777 return (ENOSPC); 778 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 779 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 780 return (ENOSPC); 781 /* 782 * Get the starting offset and block map for the first block. 783 */ 784 if (start_lvl == 0) { 785 sbap = &ip->i_din2->di_db[0]; 786 soff = start_lbn; 787 } else { 788 idp = &start_ap[start_lvl - 1]; 789 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 790 brelse(sbp); 791 return (ENOSPC); 792 } 793 sbap = (ufs2_daddr_t *)sbp->b_data; 794 soff = idp->in_off; 795 } 796 /* 797 * If the block range spans two block maps, get the second map. 798 */ 799 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 800 ssize = len; 801 } else { 802 #ifdef INVARIANTS 803 if (start_lvl > 0 && 804 start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 805 panic("ffs_reallocblk: start == end"); 806 #endif 807 ssize = len - (idp->in_off + 1); 808 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 809 goto fail; 810 ebap = (ufs2_daddr_t *)ebp->b_data; 811 } 812 /* 813 * Find the preferred location for the cluster. 814 */ 815 UFS_LOCK(ump); 816 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap); 817 /* 818 * Skip a block for the first indirect block. Indirect blocks are 819 * usually initially laid out in a good position between the data 820 * blocks, but block reallocation would usually destroy locality by 821 * moving them out of the way to make room for data blocks if we 822 * didn't compensate here. 823 */ 824 if (start_lbn == NDADDR) 825 pref += fs->fs_frag; 826 /* 827 * Search the block map looking for an allocation of the desired size. 828 */ 829 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref, 830 len, len, ffs_clusteralloc)) == 0) { 831 UFS_UNLOCK(ump); 832 goto fail; 833 } 834 /* 835 * We have found a new contiguous block. 836 * 837 * First we have to replace the old block pointers with the new 838 * block pointers in the inode and indirect blocks associated 839 * with the file. 840 */ 841 #ifdef DEBUG 842 if (prtrealloc) 843 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number, 844 (intmax_t)start_lbn, (intmax_t)end_lbn); 845 #endif 846 blkno = newblk; 847 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 848 if (i == ssize) { 849 bap = ebap; 850 soff = -i; 851 } 852 #ifdef INVARIANTS 853 if (!ffs_checkblk(ip, 854 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 855 panic("ffs_reallocblks: unallocated block 2"); 856 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 857 panic("ffs_reallocblks: alloc mismatch"); 858 #endif 859 #ifdef DEBUG 860 if (prtrealloc) 861 printf(" %jd,", (intmax_t)*bap); 862 #endif 863 if (DOINGSOFTDEP(vp)) { 864 if (sbap == &ip->i_din2->di_db[0] && i < ssize) 865 softdep_setup_allocdirect(ip, start_lbn + i, 866 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 867 buflist->bs_children[i]); 868 else 869 softdep_setup_allocindir_page(ip, start_lbn + i, 870 i < ssize ? sbp : ebp, soff + i, blkno, 871 *bap, buflist->bs_children[i]); 872 } 873 *bap++ = blkno; 874 } 875 /* 876 * Next we must write out the modified inode and indirect blocks. 877 * For strict correctness, the writes should be synchronous since 878 * the old block values may have been written to disk. In practise 879 * they are almost never written, but if we are concerned about 880 * strict correctness, the `doasyncfree' flag should be set to zero. 881 * 882 * The test on `doasyncfree' should be changed to test a flag 883 * that shows whether the associated buffers and inodes have 884 * been written. The flag should be set when the cluster is 885 * started and cleared whenever the buffer or inode is flushed. 886 * We can then check below to see if it is set, and do the 887 * synchronous write only when it has been cleared. 888 */ 889 if (sbap != &ip->i_din2->di_db[0]) { 890 if (doasyncfree) 891 bdwrite(sbp); 892 else 893 bwrite(sbp); 894 } else { 895 ip->i_flag |= IN_CHANGE | IN_UPDATE; 896 if (!doasyncfree) 897 ffs_update(vp, 1); 898 } 899 if (ssize < len) { 900 if (doasyncfree) 901 bdwrite(ebp); 902 else 903 bwrite(ebp); 904 } 905 /* 906 * Last, free the old blocks and assign the new blocks to the buffers. 907 */ 908 #ifdef DEBUG 909 if (prtrealloc) 910 printf("\n\tnew:"); 911 #endif 912 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 913 if (!DOINGSOFTDEP(vp)) 914 ffs_blkfree(ump, fs, ip->i_devvp, 915 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 916 fs->fs_bsize, ip->i_number, vp->v_type, NULL); 917 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 918 #ifdef INVARIANTS 919 if (!ffs_checkblk(ip, 920 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 921 panic("ffs_reallocblks: unallocated block 3"); 922 #endif 923 #ifdef DEBUG 924 if (prtrealloc) 925 printf(" %jd,", (intmax_t)blkno); 926 #endif 927 } 928 #ifdef DEBUG 929 if (prtrealloc) { 930 prtrealloc--; 931 printf("\n"); 932 } 933 #endif 934 return (0); 935 936 fail: 937 if (ssize < len) 938 brelse(ebp); 939 if (sbap != &ip->i_din2->di_db[0]) 940 brelse(sbp); 941 return (ENOSPC); 942 } 943 944 /* 945 * Allocate an inode in the filesystem. 946 * 947 * If allocating a directory, use ffs_dirpref to select the inode. 948 * If allocating in a directory, the following hierarchy is followed: 949 * 1) allocate the preferred inode. 950 * 2) allocate an inode in the same cylinder group. 951 * 3) quadradically rehash into other cylinder groups, until an 952 * available inode is located. 953 * If no inode preference is given the following hierarchy is used 954 * to allocate an inode: 955 * 1) allocate an inode in cylinder group 0. 956 * 2) quadradically rehash into other cylinder groups, until an 957 * available inode is located. 958 */ 959 int 960 ffs_valloc(pvp, mode, cred, vpp) 961 struct vnode *pvp; 962 int mode; 963 struct ucred *cred; 964 struct vnode **vpp; 965 { 966 struct inode *pip; 967 struct fs *fs; 968 struct inode *ip; 969 struct timespec ts; 970 struct ufsmount *ump; 971 ino_t ino, ipref; 972 u_int cg; 973 int error, error1, reclaimed; 974 static struct timeval lastfail; 975 static int curfail; 976 977 *vpp = NULL; 978 pip = VTOI(pvp); 979 fs = pip->i_fs; 980 ump = pip->i_ump; 981 982 UFS_LOCK(ump); 983 reclaimed = 0; 984 retry: 985 if (fs->fs_cstotal.cs_nifree == 0) 986 goto noinodes; 987 988 if ((mode & IFMT) == IFDIR) 989 ipref = ffs_dirpref(pip); 990 else 991 ipref = pip->i_number; 992 if (ipref >= fs->fs_ncg * fs->fs_ipg) 993 ipref = 0; 994 cg = ino_to_cg(fs, ipref); 995 /* 996 * Track number of dirs created one after another 997 * in a same cg without intervening by files. 998 */ 999 if ((mode & IFMT) == IFDIR) { 1000 if (fs->fs_contigdirs[cg] < 255) 1001 fs->fs_contigdirs[cg]++; 1002 } else { 1003 if (fs->fs_contigdirs[cg] > 0) 1004 fs->fs_contigdirs[cg]--; 1005 } 1006 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0, 1007 (allocfcn_t *)ffs_nodealloccg); 1008 if (ino == 0) 1009 goto noinodes; 1010 error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp); 1011 if (error) { 1012 error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp, 1013 FFSV_FORCEINSMQ); 1014 ffs_vfree(pvp, ino, mode); 1015 if (error1 == 0) { 1016 ip = VTOI(*vpp); 1017 if (ip->i_mode) 1018 goto dup_alloc; 1019 ip->i_flag |= IN_MODIFIED; 1020 vput(*vpp); 1021 } 1022 return (error); 1023 } 1024 ip = VTOI(*vpp); 1025 if (ip->i_mode) { 1026 dup_alloc: 1027 printf("mode = 0%o, inum = %lu, fs = %s\n", 1028 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); 1029 panic("ffs_valloc: dup alloc"); 1030 } 1031 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */ 1032 printf("free inode %s/%lu had %ld blocks\n", 1033 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks)); 1034 DIP_SET(ip, i_blocks, 0); 1035 } 1036 ip->i_flags = 0; 1037 DIP_SET(ip, i_flags, 0); 1038 /* 1039 * Set up a new generation number for this inode. 1040 */ 1041 if (ip->i_gen == 0 || ++ip->i_gen == 0) 1042 ip->i_gen = arc4random() / 2 + 1; 1043 DIP_SET(ip, i_gen, ip->i_gen); 1044 if (fs->fs_magic == FS_UFS2_MAGIC) { 1045 vfs_timestamp(&ts); 1046 ip->i_din2->di_birthtime = ts.tv_sec; 1047 ip->i_din2->di_birthnsec = ts.tv_nsec; 1048 } 1049 ufs_prepare_reclaim(*vpp); 1050 ip->i_flag = 0; 1051 (*vpp)->v_vflag = 0; 1052 (*vpp)->v_type = VNON; 1053 if (fs->fs_magic == FS_UFS2_MAGIC) 1054 (*vpp)->v_op = &ffs_vnodeops2; 1055 else 1056 (*vpp)->v_op = &ffs_vnodeops1; 1057 return (0); 1058 noinodes: 1059 if (reclaimed == 0) { 1060 reclaimed = 1; 1061 softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT); 1062 goto retry; 1063 } 1064 UFS_UNLOCK(ump); 1065 if (ppsratecheck(&lastfail, &curfail, 1)) { 1066 ffs_fserr(fs, pip->i_number, "out of inodes"); 1067 uprintf("\n%s: create/symlink failed, no inodes free\n", 1068 fs->fs_fsmnt); 1069 } 1070 return (ENOSPC); 1071 } 1072 1073 /* 1074 * Find a cylinder group to place a directory. 1075 * 1076 * The policy implemented by this algorithm is to allocate a 1077 * directory inode in the same cylinder group as its parent 1078 * directory, but also to reserve space for its files inodes 1079 * and data. Restrict the number of directories which may be 1080 * allocated one after another in the same cylinder group 1081 * without intervening allocation of files. 1082 * 1083 * If we allocate a first level directory then force allocation 1084 * in another cylinder group. 1085 */ 1086 static ino_t 1087 ffs_dirpref(pip) 1088 struct inode *pip; 1089 { 1090 struct fs *fs; 1091 u_int cg, prefcg, dirsize, cgsize; 1092 u_int avgifree, avgbfree, avgndir, curdirsize; 1093 u_int minifree, minbfree, maxndir; 1094 u_int mincg, minndir; 1095 u_int maxcontigdirs; 1096 1097 mtx_assert(UFS_MTX(pip->i_ump), MA_OWNED); 1098 fs = pip->i_fs; 1099 1100 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 1101 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1102 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; 1103 1104 /* 1105 * Force allocation in another cg if creating a first level dir. 1106 */ 1107 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref"); 1108 if (ITOV(pip)->v_vflag & VV_ROOT) { 1109 prefcg = arc4random() % fs->fs_ncg; 1110 mincg = prefcg; 1111 minndir = fs->fs_ipg; 1112 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1113 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 1114 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 1115 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1116 mincg = cg; 1117 minndir = fs->fs_cs(fs, cg).cs_ndir; 1118 } 1119 for (cg = 0; cg < prefcg; cg++) 1120 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 1121 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 1122 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1123 mincg = cg; 1124 minndir = fs->fs_cs(fs, cg).cs_ndir; 1125 } 1126 return ((ino_t)(fs->fs_ipg * mincg)); 1127 } 1128 1129 /* 1130 * Count various limits which used for 1131 * optimal allocation of a directory inode. 1132 */ 1133 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); 1134 minifree = avgifree - avgifree / 4; 1135 if (minifree < 1) 1136 minifree = 1; 1137 minbfree = avgbfree - avgbfree / 4; 1138 if (minbfree < 1) 1139 minbfree = 1; 1140 cgsize = fs->fs_fsize * fs->fs_fpg; 1141 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir; 1142 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; 1143 if (dirsize < curdirsize) 1144 dirsize = curdirsize; 1145 if (dirsize <= 0) 1146 maxcontigdirs = 0; /* dirsize overflowed */ 1147 else 1148 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255); 1149 if (fs->fs_avgfpdir > 0) 1150 maxcontigdirs = min(maxcontigdirs, 1151 fs->fs_ipg / fs->fs_avgfpdir); 1152 if (maxcontigdirs == 0) 1153 maxcontigdirs = 1; 1154 1155 /* 1156 * Limit number of dirs in one cg and reserve space for 1157 * regular files, but only if we have no deficit in 1158 * inodes or space. 1159 */ 1160 prefcg = ino_to_cg(fs, pip->i_number); 1161 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1162 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 1163 fs->fs_cs(fs, cg).cs_nifree >= minifree && 1164 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 1165 if (fs->fs_contigdirs[cg] < maxcontigdirs) 1166 return ((ino_t)(fs->fs_ipg * cg)); 1167 } 1168 for (cg = 0; cg < prefcg; cg++) 1169 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 1170 fs->fs_cs(fs, cg).cs_nifree >= minifree && 1171 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 1172 if (fs->fs_contigdirs[cg] < maxcontigdirs) 1173 return ((ino_t)(fs->fs_ipg * cg)); 1174 } 1175 /* 1176 * This is a backstop when we have deficit in space. 1177 */ 1178 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1179 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 1180 return ((ino_t)(fs->fs_ipg * cg)); 1181 for (cg = 0; cg < prefcg; cg++) 1182 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 1183 break; 1184 return ((ino_t)(fs->fs_ipg * cg)); 1185 } 1186 1187 /* 1188 * Select the desired position for the next block in a file. The file is 1189 * logically divided into sections. The first section is composed of the 1190 * direct blocks. Each additional section contains fs_maxbpg blocks. 1191 * 1192 * If no blocks have been allocated in the first section, the policy is to 1193 * request a block in the same cylinder group as the inode that describes 1194 * the file. If no blocks have been allocated in any other section, the 1195 * policy is to place the section in a cylinder group with a greater than 1196 * average number of free blocks. An appropriate cylinder group is found 1197 * by using a rotor that sweeps the cylinder groups. When a new group of 1198 * blocks is needed, the sweep begins in the cylinder group following the 1199 * cylinder group from which the previous allocation was made. The sweep 1200 * continues until a cylinder group with greater than the average number 1201 * of free blocks is found. If the allocation is for the first block in an 1202 * indirect block, the information on the previous allocation is unavailable; 1203 * here a best guess is made based upon the logical block number being 1204 * allocated. 1205 * 1206 * If a section is already partially allocated, the policy is to 1207 * contiguously allocate fs_maxcontig blocks. The end of one of these 1208 * contiguous blocks and the beginning of the next is laid out 1209 * contiguously if possible. 1210 */ 1211 ufs2_daddr_t 1212 ffs_blkpref_ufs1(ip, lbn, indx, bap) 1213 struct inode *ip; 1214 ufs_lbn_t lbn; 1215 int indx; 1216 ufs1_daddr_t *bap; 1217 { 1218 struct fs *fs; 1219 u_int cg; 1220 u_int avgbfree, startcg; 1221 ufs2_daddr_t pref; 1222 1223 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1224 fs = ip->i_fs; 1225 /* 1226 * If we are allocating the first indirect block, try to place it 1227 * immediately following the last direct block. 1228 * 1229 * If we are allocating the first data block in the first indirect 1230 * block, try to place it immediately following the indirect block. 1231 */ 1232 if (lbn == NDADDR) { 1233 pref = ip->i_din1->di_db[NDADDR - 1]; 1234 if (bap == NULL && pref != 0) 1235 return (pref + fs->fs_frag); 1236 pref = ip->i_din1->di_ib[0]; 1237 if (pref != 0) 1238 return (pref + fs->fs_frag); 1239 } 1240 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1241 if (lbn < NDADDR + NINDIR(fs)) { 1242 cg = ino_to_cg(fs, ip->i_number); 1243 return (cgbase(fs, cg) + fs->fs_frag); 1244 } 1245 /* 1246 * Find a cylinder with greater than average number of 1247 * unused data blocks. 1248 */ 1249 if (indx == 0 || bap[indx - 1] == 0) 1250 startcg = 1251 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 1252 else 1253 startcg = dtog(fs, bap[indx - 1]) + 1; 1254 startcg %= fs->fs_ncg; 1255 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1256 for (cg = startcg; cg < fs->fs_ncg; cg++) 1257 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1258 fs->fs_cgrotor = cg; 1259 return (cgbase(fs, cg) + fs->fs_frag); 1260 } 1261 for (cg = 0; cg <= startcg; cg++) 1262 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1263 fs->fs_cgrotor = cg; 1264 return (cgbase(fs, cg) + fs->fs_frag); 1265 } 1266 return (0); 1267 } 1268 /* 1269 * We just always try to lay things out contiguously. 1270 */ 1271 return (bap[indx - 1] + fs->fs_frag); 1272 } 1273 1274 /* 1275 * Same as above, but for UFS2 1276 */ 1277 ufs2_daddr_t 1278 ffs_blkpref_ufs2(ip, lbn, indx, bap) 1279 struct inode *ip; 1280 ufs_lbn_t lbn; 1281 int indx; 1282 ufs2_daddr_t *bap; 1283 { 1284 struct fs *fs; 1285 u_int cg; 1286 u_int avgbfree, startcg; 1287 ufs2_daddr_t pref; 1288 1289 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1290 fs = ip->i_fs; 1291 /* 1292 * If we are allocating the first indirect block, try to place it 1293 * immediately following the last direct block. 1294 * 1295 * If we are allocating the first data block in the first indirect 1296 * block, try to place it immediately following the indirect block. 1297 */ 1298 if (lbn == NDADDR) { 1299 pref = ip->i_din1->di_db[NDADDR - 1]; 1300 if (bap == NULL && pref != 0) 1301 return (pref + fs->fs_frag); 1302 pref = ip->i_din1->di_ib[0]; 1303 if (pref != 0) 1304 return (pref + fs->fs_frag); 1305 } 1306 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1307 if (lbn < NDADDR + NINDIR(fs)) { 1308 cg = ino_to_cg(fs, ip->i_number); 1309 return (cgbase(fs, cg) + fs->fs_frag); 1310 } 1311 /* 1312 * Find a cylinder with greater than average number of 1313 * unused data blocks. 1314 */ 1315 if (indx == 0 || bap[indx - 1] == 0) 1316 startcg = 1317 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 1318 else 1319 startcg = dtog(fs, bap[indx - 1]) + 1; 1320 startcg %= fs->fs_ncg; 1321 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1322 for (cg = startcg; cg < fs->fs_ncg; cg++) 1323 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1324 fs->fs_cgrotor = cg; 1325 return (cgbase(fs, cg) + fs->fs_frag); 1326 } 1327 for (cg = 0; cg <= startcg; cg++) 1328 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1329 fs->fs_cgrotor = cg; 1330 return (cgbase(fs, cg) + fs->fs_frag); 1331 } 1332 return (0); 1333 } 1334 /* 1335 * We just always try to lay things out contiguously. 1336 */ 1337 return (bap[indx - 1] + fs->fs_frag); 1338 } 1339 1340 /* 1341 * Implement the cylinder overflow algorithm. 1342 * 1343 * The policy implemented by this algorithm is: 1344 * 1) allocate the block in its requested cylinder group. 1345 * 2) quadradically rehash on the cylinder group number. 1346 * 3) brute force search for a free block. 1347 * 1348 * Must be called with the UFS lock held. Will release the lock on success 1349 * and return with it held on failure. 1350 */ 1351 /*VARARGS5*/ 1352 static ufs2_daddr_t 1353 ffs_hashalloc(ip, cg, pref, size, rsize, allocator) 1354 struct inode *ip; 1355 u_int cg; 1356 ufs2_daddr_t pref; 1357 int size; /* Search size for data blocks, mode for inodes */ 1358 int rsize; /* Real allocated size. */ 1359 allocfcn_t *allocator; 1360 { 1361 struct fs *fs; 1362 ufs2_daddr_t result; 1363 u_int i, icg = cg; 1364 1365 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1366 #ifdef INVARIANTS 1367 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 1368 panic("ffs_hashalloc: allocation on suspended filesystem"); 1369 #endif 1370 fs = ip->i_fs; 1371 /* 1372 * 1: preferred cylinder group 1373 */ 1374 result = (*allocator)(ip, cg, pref, size, rsize); 1375 if (result) 1376 return (result); 1377 /* 1378 * 2: quadratic rehash 1379 */ 1380 for (i = 1; i < fs->fs_ncg; i *= 2) { 1381 cg += i; 1382 if (cg >= fs->fs_ncg) 1383 cg -= fs->fs_ncg; 1384 result = (*allocator)(ip, cg, 0, size, rsize); 1385 if (result) 1386 return (result); 1387 } 1388 /* 1389 * 3: brute force search 1390 * Note that we start at i == 2, since 0 was checked initially, 1391 * and 1 is always checked in the quadratic rehash. 1392 */ 1393 cg = (icg + 2) % fs->fs_ncg; 1394 for (i = 2; i < fs->fs_ncg; i++) { 1395 result = (*allocator)(ip, cg, 0, size, rsize); 1396 if (result) 1397 return (result); 1398 cg++; 1399 if (cg == fs->fs_ncg) 1400 cg = 0; 1401 } 1402 return (0); 1403 } 1404 1405 /* 1406 * Determine whether a fragment can be extended. 1407 * 1408 * Check to see if the necessary fragments are available, and 1409 * if they are, allocate them. 1410 */ 1411 static ufs2_daddr_t 1412 ffs_fragextend(ip, cg, bprev, osize, nsize) 1413 struct inode *ip; 1414 u_int cg; 1415 ufs2_daddr_t bprev; 1416 int osize, nsize; 1417 { 1418 struct fs *fs; 1419 struct cg *cgp; 1420 struct buf *bp; 1421 struct ufsmount *ump; 1422 int nffree; 1423 long bno; 1424 int frags, bbase; 1425 int i, error; 1426 u_int8_t *blksfree; 1427 1428 ump = ip->i_ump; 1429 fs = ip->i_fs; 1430 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 1431 return (0); 1432 frags = numfrags(fs, nsize); 1433 bbase = fragnum(fs, bprev); 1434 if (bbase > fragnum(fs, (bprev + frags - 1))) { 1435 /* cannot extend across a block boundary */ 1436 return (0); 1437 } 1438 UFS_UNLOCK(ump); 1439 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1440 (int)fs->fs_cgsize, NOCRED, &bp); 1441 if (error) 1442 goto fail; 1443 cgp = (struct cg *)bp->b_data; 1444 if (!cg_chkmagic(cgp)) 1445 goto fail; 1446 bp->b_xflags |= BX_BKGRDWRITE; 1447 cgp->cg_old_time = cgp->cg_time = time_second; 1448 bno = dtogd(fs, bprev); 1449 blksfree = cg_blksfree(cgp); 1450 for (i = numfrags(fs, osize); i < frags; i++) 1451 if (isclr(blksfree, bno + i)) 1452 goto fail; 1453 /* 1454 * the current fragment can be extended 1455 * deduct the count on fragment being extended into 1456 * increase the count on the remaining fragment (if any) 1457 * allocate the extended piece 1458 */ 1459 for (i = frags; i < fs->fs_frag - bbase; i++) 1460 if (isclr(blksfree, bno + i)) 1461 break; 1462 cgp->cg_frsum[i - numfrags(fs, osize)]--; 1463 if (i != frags) 1464 cgp->cg_frsum[i - frags]++; 1465 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) { 1466 clrbit(blksfree, bno + i); 1467 cgp->cg_cs.cs_nffree--; 1468 nffree++; 1469 } 1470 UFS_LOCK(ump); 1471 fs->fs_cstotal.cs_nffree -= nffree; 1472 fs->fs_cs(fs, cg).cs_nffree -= nffree; 1473 fs->fs_fmod = 1; 1474 ACTIVECLEAR(fs, cg); 1475 UFS_UNLOCK(ump); 1476 if (DOINGSOFTDEP(ITOV(ip))) 1477 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev, 1478 frags, numfrags(fs, osize)); 1479 bdwrite(bp); 1480 return (bprev); 1481 1482 fail: 1483 brelse(bp); 1484 UFS_LOCK(ump); 1485 return (0); 1486 1487 } 1488 1489 /* 1490 * Determine whether a block can be allocated. 1491 * 1492 * Check to see if a block of the appropriate size is available, 1493 * and if it is, allocate it. 1494 */ 1495 static ufs2_daddr_t 1496 ffs_alloccg(ip, cg, bpref, size, rsize) 1497 struct inode *ip; 1498 u_int cg; 1499 ufs2_daddr_t bpref; 1500 int size; 1501 int rsize; 1502 { 1503 struct fs *fs; 1504 struct cg *cgp; 1505 struct buf *bp; 1506 struct ufsmount *ump; 1507 ufs1_daddr_t bno; 1508 ufs2_daddr_t blkno; 1509 int i, allocsiz, error, frags; 1510 u_int8_t *blksfree; 1511 1512 ump = ip->i_ump; 1513 fs = ip->i_fs; 1514 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 1515 return (0); 1516 UFS_UNLOCK(ump); 1517 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1518 (int)fs->fs_cgsize, NOCRED, &bp); 1519 if (error) 1520 goto fail; 1521 cgp = (struct cg *)bp->b_data; 1522 if (!cg_chkmagic(cgp) || 1523 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) 1524 goto fail; 1525 bp->b_xflags |= BX_BKGRDWRITE; 1526 cgp->cg_old_time = cgp->cg_time = time_second; 1527 if (size == fs->fs_bsize) { 1528 UFS_LOCK(ump); 1529 blkno = ffs_alloccgblk(ip, bp, bpref, rsize); 1530 ACTIVECLEAR(fs, cg); 1531 UFS_UNLOCK(ump); 1532 bdwrite(bp); 1533 return (blkno); 1534 } 1535 /* 1536 * check to see if any fragments are already available 1537 * allocsiz is the size which will be allocated, hacking 1538 * it down to a smaller size if necessary 1539 */ 1540 blksfree = cg_blksfree(cgp); 1541 frags = numfrags(fs, size); 1542 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 1543 if (cgp->cg_frsum[allocsiz] != 0) 1544 break; 1545 if (allocsiz == fs->fs_frag) { 1546 /* 1547 * no fragments were available, so a block will be 1548 * allocated, and hacked up 1549 */ 1550 if (cgp->cg_cs.cs_nbfree == 0) 1551 goto fail; 1552 UFS_LOCK(ump); 1553 blkno = ffs_alloccgblk(ip, bp, bpref, rsize); 1554 ACTIVECLEAR(fs, cg); 1555 UFS_UNLOCK(ump); 1556 bdwrite(bp); 1557 return (blkno); 1558 } 1559 KASSERT(size == rsize, 1560 ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize)); 1561 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 1562 if (bno < 0) 1563 goto fail; 1564 for (i = 0; i < frags; i++) 1565 clrbit(blksfree, bno + i); 1566 cgp->cg_cs.cs_nffree -= frags; 1567 cgp->cg_frsum[allocsiz]--; 1568 if (frags != allocsiz) 1569 cgp->cg_frsum[allocsiz - frags]++; 1570 UFS_LOCK(ump); 1571 fs->fs_cstotal.cs_nffree -= frags; 1572 fs->fs_cs(fs, cg).cs_nffree -= frags; 1573 fs->fs_fmod = 1; 1574 blkno = cgbase(fs, cg) + bno; 1575 ACTIVECLEAR(fs, cg); 1576 UFS_UNLOCK(ump); 1577 if (DOINGSOFTDEP(ITOV(ip))) 1578 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0); 1579 bdwrite(bp); 1580 return (blkno); 1581 1582 fail: 1583 brelse(bp); 1584 UFS_LOCK(ump); 1585 return (0); 1586 } 1587 1588 /* 1589 * Allocate a block in a cylinder group. 1590 * 1591 * This algorithm implements the following policy: 1592 * 1) allocate the requested block. 1593 * 2) allocate a rotationally optimal block in the same cylinder. 1594 * 3) allocate the next available block on the block rotor for the 1595 * specified cylinder group. 1596 * Note that this routine only allocates fs_bsize blocks; these 1597 * blocks may be fragmented by the routine that allocates them. 1598 */ 1599 static ufs2_daddr_t 1600 ffs_alloccgblk(ip, bp, bpref, size) 1601 struct inode *ip; 1602 struct buf *bp; 1603 ufs2_daddr_t bpref; 1604 int size; 1605 { 1606 struct fs *fs; 1607 struct cg *cgp; 1608 struct ufsmount *ump; 1609 ufs1_daddr_t bno; 1610 ufs2_daddr_t blkno; 1611 u_int8_t *blksfree; 1612 int i; 1613 1614 fs = ip->i_fs; 1615 ump = ip->i_ump; 1616 mtx_assert(UFS_MTX(ump), MA_OWNED); 1617 cgp = (struct cg *)bp->b_data; 1618 blksfree = cg_blksfree(cgp); 1619 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 1620 bpref = cgp->cg_rotor; 1621 } else { 1622 bpref = blknum(fs, bpref); 1623 bno = dtogd(fs, bpref); 1624 /* 1625 * if the requested block is available, use it 1626 */ 1627 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno))) 1628 goto gotit; 1629 } 1630 /* 1631 * Take the next available block in this cylinder group. 1632 */ 1633 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1634 if (bno < 0) 1635 return (0); 1636 cgp->cg_rotor = bno; 1637 gotit: 1638 blkno = fragstoblks(fs, bno); 1639 ffs_clrblock(fs, blksfree, (long)blkno); 1640 ffs_clusteracct(fs, cgp, blkno, -1); 1641 cgp->cg_cs.cs_nbfree--; 1642 fs->fs_cstotal.cs_nbfree--; 1643 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1644 fs->fs_fmod = 1; 1645 blkno = cgbase(fs, cgp->cg_cgx) + bno; 1646 /* 1647 * If the caller didn't want the whole block free the frags here. 1648 */ 1649 size = numfrags(fs, size); 1650 if (size != fs->fs_frag) { 1651 bno = dtogd(fs, blkno); 1652 for (i = size; i < fs->fs_frag; i++) 1653 setbit(blksfree, bno + i); 1654 i = fs->fs_frag - size; 1655 cgp->cg_cs.cs_nffree += i; 1656 fs->fs_cstotal.cs_nffree += i; 1657 fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i; 1658 fs->fs_fmod = 1; 1659 cgp->cg_frsum[i]++; 1660 } 1661 /* XXX Fixme. */ 1662 UFS_UNLOCK(ump); 1663 if (DOINGSOFTDEP(ITOV(ip))) 1664 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, 1665 size, 0); 1666 UFS_LOCK(ump); 1667 return (blkno); 1668 } 1669 1670 /* 1671 * Determine whether a cluster can be allocated. 1672 * 1673 * We do not currently check for optimal rotational layout if there 1674 * are multiple choices in the same cylinder group. Instead we just 1675 * take the first one that we find following bpref. 1676 */ 1677 static ufs2_daddr_t 1678 ffs_clusteralloc(ip, cg, bpref, len, unused) 1679 struct inode *ip; 1680 u_int cg; 1681 ufs2_daddr_t bpref; 1682 int len; 1683 int unused; 1684 { 1685 struct fs *fs; 1686 struct cg *cgp; 1687 struct buf *bp; 1688 struct ufsmount *ump; 1689 int i, run, bit, map, got; 1690 ufs2_daddr_t bno; 1691 u_char *mapp; 1692 int32_t *lp; 1693 u_int8_t *blksfree; 1694 1695 fs = ip->i_fs; 1696 ump = ip->i_ump; 1697 if (fs->fs_maxcluster[cg] < len) 1698 return (0); 1699 UFS_UNLOCK(ump); 1700 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1701 NOCRED, &bp)) 1702 goto fail_lock; 1703 cgp = (struct cg *)bp->b_data; 1704 if (!cg_chkmagic(cgp)) 1705 goto fail_lock; 1706 bp->b_xflags |= BX_BKGRDWRITE; 1707 /* 1708 * Check to see if a cluster of the needed size (or bigger) is 1709 * available in this cylinder group. 1710 */ 1711 lp = &cg_clustersum(cgp)[len]; 1712 for (i = len; i <= fs->fs_contigsumsize; i++) 1713 if (*lp++ > 0) 1714 break; 1715 if (i > fs->fs_contigsumsize) { 1716 /* 1717 * This is the first time looking for a cluster in this 1718 * cylinder group. Update the cluster summary information 1719 * to reflect the true maximum sized cluster so that 1720 * future cluster allocation requests can avoid reading 1721 * the cylinder group map only to find no clusters. 1722 */ 1723 lp = &cg_clustersum(cgp)[len - 1]; 1724 for (i = len - 1; i > 0; i--) 1725 if (*lp-- > 0) 1726 break; 1727 UFS_LOCK(ump); 1728 fs->fs_maxcluster[cg] = i; 1729 goto fail; 1730 } 1731 /* 1732 * Search the cluster map to find a big enough cluster. 1733 * We take the first one that we find, even if it is larger 1734 * than we need as we prefer to get one close to the previous 1735 * block allocation. We do not search before the current 1736 * preference point as we do not want to allocate a block 1737 * that is allocated before the previous one (as we will 1738 * then have to wait for another pass of the elevator 1739 * algorithm before it will be read). We prefer to fail and 1740 * be recalled to try an allocation in the next cylinder group. 1741 */ 1742 if (dtog(fs, bpref) != cg) 1743 bpref = 0; 1744 else 1745 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1746 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1747 map = *mapp++; 1748 bit = 1 << (bpref % NBBY); 1749 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1750 if ((map & bit) == 0) { 1751 run = 0; 1752 } else { 1753 run++; 1754 if (run == len) 1755 break; 1756 } 1757 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1758 bit <<= 1; 1759 } else { 1760 map = *mapp++; 1761 bit = 1; 1762 } 1763 } 1764 if (got >= cgp->cg_nclusterblks) 1765 goto fail_lock; 1766 /* 1767 * Allocate the cluster that we have found. 1768 */ 1769 blksfree = cg_blksfree(cgp); 1770 for (i = 1; i <= len; i++) 1771 if (!ffs_isblock(fs, blksfree, got - run + i)) 1772 panic("ffs_clusteralloc: map mismatch"); 1773 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1); 1774 if (dtog(fs, bno) != cg) 1775 panic("ffs_clusteralloc: allocated out of group"); 1776 len = blkstofrags(fs, len); 1777 UFS_LOCK(ump); 1778 for (i = 0; i < len; i += fs->fs_frag) 1779 if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i) 1780 panic("ffs_clusteralloc: lost block"); 1781 ACTIVECLEAR(fs, cg); 1782 UFS_UNLOCK(ump); 1783 bdwrite(bp); 1784 return (bno); 1785 1786 fail_lock: 1787 UFS_LOCK(ump); 1788 fail: 1789 brelse(bp); 1790 return (0); 1791 } 1792 1793 /* 1794 * Determine whether an inode can be allocated. 1795 * 1796 * Check to see if an inode is available, and if it is, 1797 * allocate it using the following policy: 1798 * 1) allocate the requested inode. 1799 * 2) allocate the next available inode after the requested 1800 * inode in the specified cylinder group. 1801 */ 1802 static ufs2_daddr_t 1803 ffs_nodealloccg(ip, cg, ipref, mode, unused) 1804 struct inode *ip; 1805 u_int cg; 1806 ufs2_daddr_t ipref; 1807 int mode; 1808 int unused; 1809 { 1810 struct fs *fs; 1811 struct cg *cgp; 1812 struct buf *bp, *ibp; 1813 struct ufsmount *ump; 1814 u_int8_t *inosused, *loc; 1815 struct ufs2_dinode *dp2; 1816 int error, start, len, i; 1817 1818 fs = ip->i_fs; 1819 ump = ip->i_ump; 1820 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1821 return (0); 1822 UFS_UNLOCK(ump); 1823 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1824 (int)fs->fs_cgsize, NOCRED, &bp); 1825 if (error) { 1826 brelse(bp); 1827 UFS_LOCK(ump); 1828 return (0); 1829 } 1830 cgp = (struct cg *)bp->b_data; 1831 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1832 brelse(bp); 1833 UFS_LOCK(ump); 1834 return (0); 1835 } 1836 bp->b_xflags |= BX_BKGRDWRITE; 1837 cgp->cg_old_time = cgp->cg_time = time_second; 1838 inosused = cg_inosused(cgp); 1839 if (ipref) { 1840 ipref %= fs->fs_ipg; 1841 if (isclr(inosused, ipref)) 1842 goto gotit; 1843 } 1844 start = cgp->cg_irotor / NBBY; 1845 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 1846 loc = memcchr(&inosused[start], 0xff, len); 1847 if (loc == NULL) { 1848 len = start + 1; 1849 start = 0; 1850 loc = memcchr(&inosused[start], 0xff, len); 1851 if (loc == NULL) { 1852 printf("cg = %d, irotor = %ld, fs = %s\n", 1853 cg, (long)cgp->cg_irotor, fs->fs_fsmnt); 1854 panic("ffs_nodealloccg: map corrupted"); 1855 /* NOTREACHED */ 1856 } 1857 } 1858 ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1; 1859 cgp->cg_irotor = ipref; 1860 gotit: 1861 /* 1862 * Check to see if we need to initialize more inodes. 1863 */ 1864 ibp = NULL; 1865 if (fs->fs_magic == FS_UFS2_MAGIC && 1866 ipref + INOPB(fs) > cgp->cg_initediblk && 1867 cgp->cg_initediblk < cgp->cg_niblk) { 1868 ibp = getblk(ip->i_devvp, fsbtodb(fs, 1869 ino_to_fsba(fs, cg * fs->fs_ipg + cgp->cg_initediblk)), 1870 (int)fs->fs_bsize, 0, 0, 0); 1871 bzero(ibp->b_data, (int)fs->fs_bsize); 1872 dp2 = (struct ufs2_dinode *)(ibp->b_data); 1873 for (i = 0; i < INOPB(fs); i++) { 1874 dp2->di_gen = arc4random() / 2 + 1; 1875 dp2++; 1876 } 1877 cgp->cg_initediblk += INOPB(fs); 1878 } 1879 UFS_LOCK(ump); 1880 ACTIVECLEAR(fs, cg); 1881 setbit(inosused, ipref); 1882 cgp->cg_cs.cs_nifree--; 1883 fs->fs_cstotal.cs_nifree--; 1884 fs->fs_cs(fs, cg).cs_nifree--; 1885 fs->fs_fmod = 1; 1886 if ((mode & IFMT) == IFDIR) { 1887 cgp->cg_cs.cs_ndir++; 1888 fs->fs_cstotal.cs_ndir++; 1889 fs->fs_cs(fs, cg).cs_ndir++; 1890 } 1891 UFS_UNLOCK(ump); 1892 if (DOINGSOFTDEP(ITOV(ip))) 1893 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode); 1894 bdwrite(bp); 1895 if (ibp != NULL) 1896 bawrite(ibp); 1897 return ((ino_t)(cg * fs->fs_ipg + ipref)); 1898 } 1899 1900 /* 1901 * Free a block or fragment. 1902 * 1903 * The specified block or fragment is placed back in the 1904 * free map. If a fragment is deallocated, a possible 1905 * block reassembly is checked. 1906 */ 1907 static void 1908 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd) 1909 struct ufsmount *ump; 1910 struct fs *fs; 1911 struct vnode *devvp; 1912 ufs2_daddr_t bno; 1913 long size; 1914 ino_t inum; 1915 struct workhead *dephd; 1916 { 1917 struct mount *mp; 1918 struct cg *cgp; 1919 struct buf *bp; 1920 ufs1_daddr_t fragno, cgbno; 1921 ufs2_daddr_t cgblkno; 1922 int i, blk, frags, bbase; 1923 u_int cg; 1924 u_int8_t *blksfree; 1925 struct cdev *dev; 1926 1927 cg = dtog(fs, bno); 1928 if (devvp->v_type == VREG) { 1929 /* devvp is a snapshot */ 1930 dev = VTOI(devvp)->i_devvp->v_rdev; 1931 cgblkno = fragstoblks(fs, cgtod(fs, cg)); 1932 } else { 1933 /* devvp is a normal disk device */ 1934 dev = devvp->v_rdev; 1935 cgblkno = fsbtodb(fs, cgtod(fs, cg)); 1936 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg"); 1937 } 1938 #ifdef INVARIANTS 1939 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 1940 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 1941 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n", 1942 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize, 1943 size, fs->fs_fsmnt); 1944 panic("ffs_blkfree_cg: bad size"); 1945 } 1946 #endif 1947 if ((u_int)bno >= fs->fs_size) { 1948 printf("bad block %jd, ino %lu\n", (intmax_t)bno, 1949 (u_long)inum); 1950 ffs_fserr(fs, inum, "bad block"); 1951 return; 1952 } 1953 if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) { 1954 brelse(bp); 1955 return; 1956 } 1957 cgp = (struct cg *)bp->b_data; 1958 if (!cg_chkmagic(cgp)) { 1959 brelse(bp); 1960 return; 1961 } 1962 bp->b_xflags |= BX_BKGRDWRITE; 1963 cgp->cg_old_time = cgp->cg_time = time_second; 1964 cgbno = dtogd(fs, bno); 1965 blksfree = cg_blksfree(cgp); 1966 UFS_LOCK(ump); 1967 if (size == fs->fs_bsize) { 1968 fragno = fragstoblks(fs, cgbno); 1969 if (!ffs_isfreeblock(fs, blksfree, fragno)) { 1970 if (devvp->v_type == VREG) { 1971 UFS_UNLOCK(ump); 1972 /* devvp is a snapshot */ 1973 brelse(bp); 1974 return; 1975 } 1976 printf("dev = %s, block = %jd, fs = %s\n", 1977 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt); 1978 panic("ffs_blkfree_cg: freeing free block"); 1979 } 1980 ffs_setblock(fs, blksfree, fragno); 1981 ffs_clusteracct(fs, cgp, fragno, 1); 1982 cgp->cg_cs.cs_nbfree++; 1983 fs->fs_cstotal.cs_nbfree++; 1984 fs->fs_cs(fs, cg).cs_nbfree++; 1985 } else { 1986 bbase = cgbno - fragnum(fs, cgbno); 1987 /* 1988 * decrement the counts associated with the old frags 1989 */ 1990 blk = blkmap(fs, blksfree, bbase); 1991 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1992 /* 1993 * deallocate the fragment 1994 */ 1995 frags = numfrags(fs, size); 1996 for (i = 0; i < frags; i++) { 1997 if (isset(blksfree, cgbno + i)) { 1998 printf("dev = %s, block = %jd, fs = %s\n", 1999 devtoname(dev), (intmax_t)(bno + i), 2000 fs->fs_fsmnt); 2001 panic("ffs_blkfree_cg: freeing free frag"); 2002 } 2003 setbit(blksfree, cgbno + i); 2004 } 2005 cgp->cg_cs.cs_nffree += i; 2006 fs->fs_cstotal.cs_nffree += i; 2007 fs->fs_cs(fs, cg).cs_nffree += i; 2008 /* 2009 * add back in counts associated with the new frags 2010 */ 2011 blk = blkmap(fs, blksfree, bbase); 2012 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 2013 /* 2014 * if a complete block has been reassembled, account for it 2015 */ 2016 fragno = fragstoblks(fs, bbase); 2017 if (ffs_isblock(fs, blksfree, fragno)) { 2018 cgp->cg_cs.cs_nffree -= fs->fs_frag; 2019 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 2020 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 2021 ffs_clusteracct(fs, cgp, fragno, 1); 2022 cgp->cg_cs.cs_nbfree++; 2023 fs->fs_cstotal.cs_nbfree++; 2024 fs->fs_cs(fs, cg).cs_nbfree++; 2025 } 2026 } 2027 fs->fs_fmod = 1; 2028 ACTIVECLEAR(fs, cg); 2029 UFS_UNLOCK(ump); 2030 mp = UFSTOVFS(ump); 2031 if (MOUNTEDSOFTDEP(mp) && devvp->v_type != VREG) 2032 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno, 2033 numfrags(fs, size), dephd); 2034 bdwrite(bp); 2035 } 2036 2037 TASKQUEUE_DEFINE_THREAD(ffs_trim); 2038 2039 struct ffs_blkfree_trim_params { 2040 struct task task; 2041 struct ufsmount *ump; 2042 struct vnode *devvp; 2043 ufs2_daddr_t bno; 2044 long size; 2045 ino_t inum; 2046 struct workhead *pdephd; 2047 struct workhead dephd; 2048 }; 2049 2050 static void 2051 ffs_blkfree_trim_task(ctx, pending) 2052 void *ctx; 2053 int pending; 2054 { 2055 struct ffs_blkfree_trim_params *tp; 2056 2057 tp = ctx; 2058 ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size, 2059 tp->inum, tp->pdephd); 2060 vn_finished_secondary_write(UFSTOVFS(tp->ump)); 2061 free(tp, M_TEMP); 2062 } 2063 2064 static void 2065 ffs_blkfree_trim_completed(bip) 2066 struct bio *bip; 2067 { 2068 struct ffs_blkfree_trim_params *tp; 2069 2070 tp = bip->bio_caller2; 2071 g_destroy_bio(bip); 2072 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp); 2073 taskqueue_enqueue(taskqueue_ffs_trim, &tp->task); 2074 } 2075 2076 void 2077 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd) 2078 struct ufsmount *ump; 2079 struct fs *fs; 2080 struct vnode *devvp; 2081 ufs2_daddr_t bno; 2082 long size; 2083 ino_t inum; 2084 enum vtype vtype; 2085 struct workhead *dephd; 2086 { 2087 struct mount *mp; 2088 struct bio *bip; 2089 struct ffs_blkfree_trim_params *tp; 2090 2091 /* 2092 * Check to see if a snapshot wants to claim the block. 2093 * Check that devvp is a normal disk device, not a snapshot, 2094 * it has a snapshot(s) associated with it, and one of the 2095 * snapshots wants to claim the block. 2096 */ 2097 if (devvp->v_type != VREG && 2098 (devvp->v_vflag & VV_COPYONWRITE) && 2099 ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) { 2100 return; 2101 } 2102 /* 2103 * Nothing to delay if TRIM is disabled, or the operation is 2104 * performed on the snapshot. 2105 */ 2106 if (!ump->um_candelete || devvp->v_type == VREG) { 2107 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd); 2108 return; 2109 } 2110 2111 /* 2112 * Postpone the set of the free bit in the cg bitmap until the 2113 * BIO_DELETE is completed. Otherwise, due to disk queue 2114 * reordering, TRIM might be issued after we reuse the block 2115 * and write some new data into it. 2116 */ 2117 tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK); 2118 tp->ump = ump; 2119 tp->devvp = devvp; 2120 tp->bno = bno; 2121 tp->size = size; 2122 tp->inum = inum; 2123 if (dephd != NULL) { 2124 LIST_INIT(&tp->dephd); 2125 LIST_SWAP(dephd, &tp->dephd, worklist, wk_list); 2126 tp->pdephd = &tp->dephd; 2127 } else 2128 tp->pdephd = NULL; 2129 2130 bip = g_alloc_bio(); 2131 bip->bio_cmd = BIO_DELETE; 2132 bip->bio_offset = dbtob(fsbtodb(fs, bno)); 2133 bip->bio_done = ffs_blkfree_trim_completed; 2134 bip->bio_length = size; 2135 bip->bio_caller2 = tp; 2136 2137 mp = UFSTOVFS(ump); 2138 vn_start_secondary_write(NULL, &mp, 0); 2139 g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private); 2140 } 2141 2142 #ifdef INVARIANTS 2143 /* 2144 * Verify allocation of a block or fragment. Returns true if block or 2145 * fragment is allocated, false if it is free. 2146 */ 2147 static int 2148 ffs_checkblk(ip, bno, size) 2149 struct inode *ip; 2150 ufs2_daddr_t bno; 2151 long size; 2152 { 2153 struct fs *fs; 2154 struct cg *cgp; 2155 struct buf *bp; 2156 ufs1_daddr_t cgbno; 2157 int i, error, frags, free; 2158 u_int8_t *blksfree; 2159 2160 fs = ip->i_fs; 2161 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 2162 printf("bsize = %ld, size = %ld, fs = %s\n", 2163 (long)fs->fs_bsize, size, fs->fs_fsmnt); 2164 panic("ffs_checkblk: bad size"); 2165 } 2166 if ((u_int)bno >= fs->fs_size) 2167 panic("ffs_checkblk: bad block %jd", (intmax_t)bno); 2168 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), 2169 (int)fs->fs_cgsize, NOCRED, &bp); 2170 if (error) 2171 panic("ffs_checkblk: cg bread failed"); 2172 cgp = (struct cg *)bp->b_data; 2173 if (!cg_chkmagic(cgp)) 2174 panic("ffs_checkblk: cg magic mismatch"); 2175 bp->b_xflags |= BX_BKGRDWRITE; 2176 blksfree = cg_blksfree(cgp); 2177 cgbno = dtogd(fs, bno); 2178 if (size == fs->fs_bsize) { 2179 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno)); 2180 } else { 2181 frags = numfrags(fs, size); 2182 for (free = 0, i = 0; i < frags; i++) 2183 if (isset(blksfree, cgbno + i)) 2184 free++; 2185 if (free != 0 && free != frags) 2186 panic("ffs_checkblk: partially free fragment"); 2187 } 2188 brelse(bp); 2189 return (!free); 2190 } 2191 #endif /* INVARIANTS */ 2192 2193 /* 2194 * Free an inode. 2195 */ 2196 int 2197 ffs_vfree(pvp, ino, mode) 2198 struct vnode *pvp; 2199 ino_t ino; 2200 int mode; 2201 { 2202 struct inode *ip; 2203 2204 if (DOINGSOFTDEP(pvp)) { 2205 softdep_freefile(pvp, ino, mode); 2206 return (0); 2207 } 2208 ip = VTOI(pvp); 2209 return (ffs_freefile(ip->i_ump, ip->i_fs, ip->i_devvp, ino, mode, 2210 NULL)); 2211 } 2212 2213 /* 2214 * Do the actual free operation. 2215 * The specified inode is placed back in the free map. 2216 */ 2217 int 2218 ffs_freefile(ump, fs, devvp, ino, mode, wkhd) 2219 struct ufsmount *ump; 2220 struct fs *fs; 2221 struct vnode *devvp; 2222 ino_t ino; 2223 int mode; 2224 struct workhead *wkhd; 2225 { 2226 struct cg *cgp; 2227 struct buf *bp; 2228 ufs2_daddr_t cgbno; 2229 int error; 2230 u_int cg; 2231 u_int8_t *inosused; 2232 struct cdev *dev; 2233 2234 cg = ino_to_cg(fs, ino); 2235 if (devvp->v_type == VREG) { 2236 /* devvp is a snapshot */ 2237 dev = VTOI(devvp)->i_devvp->v_rdev; 2238 cgbno = fragstoblks(fs, cgtod(fs, cg)); 2239 } else { 2240 /* devvp is a normal disk device */ 2241 dev = devvp->v_rdev; 2242 cgbno = fsbtodb(fs, cgtod(fs, cg)); 2243 } 2244 if (ino >= fs->fs_ipg * fs->fs_ncg) 2245 panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s", 2246 devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt); 2247 if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) { 2248 brelse(bp); 2249 return (error); 2250 } 2251 cgp = (struct cg *)bp->b_data; 2252 if (!cg_chkmagic(cgp)) { 2253 brelse(bp); 2254 return (0); 2255 } 2256 bp->b_xflags |= BX_BKGRDWRITE; 2257 cgp->cg_old_time = cgp->cg_time = time_second; 2258 inosused = cg_inosused(cgp); 2259 ino %= fs->fs_ipg; 2260 if (isclr(inosused, ino)) { 2261 printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev), 2262 (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt); 2263 if (fs->fs_ronly == 0) 2264 panic("ffs_freefile: freeing free inode"); 2265 } 2266 clrbit(inosused, ino); 2267 if (ino < cgp->cg_irotor) 2268 cgp->cg_irotor = ino; 2269 cgp->cg_cs.cs_nifree++; 2270 UFS_LOCK(ump); 2271 fs->fs_cstotal.cs_nifree++; 2272 fs->fs_cs(fs, cg).cs_nifree++; 2273 if ((mode & IFMT) == IFDIR) { 2274 cgp->cg_cs.cs_ndir--; 2275 fs->fs_cstotal.cs_ndir--; 2276 fs->fs_cs(fs, cg).cs_ndir--; 2277 } 2278 fs->fs_fmod = 1; 2279 ACTIVECLEAR(fs, cg); 2280 UFS_UNLOCK(ump); 2281 if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type != VREG) 2282 softdep_setup_inofree(UFSTOVFS(ump), bp, 2283 ino + cg * fs->fs_ipg, wkhd); 2284 bdwrite(bp); 2285 return (0); 2286 } 2287 2288 /* 2289 * Check to see if a file is free. 2290 */ 2291 int 2292 ffs_checkfreefile(fs, devvp, ino) 2293 struct fs *fs; 2294 struct vnode *devvp; 2295 ino_t ino; 2296 { 2297 struct cg *cgp; 2298 struct buf *bp; 2299 ufs2_daddr_t cgbno; 2300 int ret; 2301 u_int cg; 2302 u_int8_t *inosused; 2303 2304 cg = ino_to_cg(fs, ino); 2305 if (devvp->v_type == VREG) { 2306 /* devvp is a snapshot */ 2307 cgbno = fragstoblks(fs, cgtod(fs, cg)); 2308 } else { 2309 /* devvp is a normal disk device */ 2310 cgbno = fsbtodb(fs, cgtod(fs, cg)); 2311 } 2312 if (ino >= fs->fs_ipg * fs->fs_ncg) 2313 return (1); 2314 if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) { 2315 brelse(bp); 2316 return (1); 2317 } 2318 cgp = (struct cg *)bp->b_data; 2319 if (!cg_chkmagic(cgp)) { 2320 brelse(bp); 2321 return (1); 2322 } 2323 inosused = cg_inosused(cgp); 2324 ino %= fs->fs_ipg; 2325 ret = isclr(inosused, ino); 2326 brelse(bp); 2327 return (ret); 2328 } 2329 2330 /* 2331 * Find a block of the specified size in the specified cylinder group. 2332 * 2333 * It is a panic if a request is made to find a block if none are 2334 * available. 2335 */ 2336 static ufs1_daddr_t 2337 ffs_mapsearch(fs, cgp, bpref, allocsiz) 2338 struct fs *fs; 2339 struct cg *cgp; 2340 ufs2_daddr_t bpref; 2341 int allocsiz; 2342 { 2343 ufs1_daddr_t bno; 2344 int start, len, loc, i; 2345 int blk, field, subfield, pos; 2346 u_int8_t *blksfree; 2347 2348 /* 2349 * find the fragment by searching through the free block 2350 * map for an appropriate bit pattern 2351 */ 2352 if (bpref) 2353 start = dtogd(fs, bpref) / NBBY; 2354 else 2355 start = cgp->cg_frotor / NBBY; 2356 blksfree = cg_blksfree(cgp); 2357 len = howmany(fs->fs_fpg, NBBY) - start; 2358 loc = scanc((u_int)len, (u_char *)&blksfree[start], 2359 fragtbl[fs->fs_frag], 2360 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 2361 if (loc == 0) { 2362 len = start + 1; 2363 start = 0; 2364 loc = scanc((u_int)len, (u_char *)&blksfree[0], 2365 fragtbl[fs->fs_frag], 2366 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 2367 if (loc == 0) { 2368 printf("start = %d, len = %d, fs = %s\n", 2369 start, len, fs->fs_fsmnt); 2370 panic("ffs_alloccg: map corrupted"); 2371 /* NOTREACHED */ 2372 } 2373 } 2374 bno = (start + len - loc) * NBBY; 2375 cgp->cg_frotor = bno; 2376 /* 2377 * found the byte in the map 2378 * sift through the bits to find the selected frag 2379 */ 2380 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 2381 blk = blkmap(fs, blksfree, bno); 2382 blk <<= 1; 2383 field = around[allocsiz]; 2384 subfield = inside[allocsiz]; 2385 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 2386 if ((blk & field) == subfield) 2387 return (bno + pos); 2388 field <<= 1; 2389 subfield <<= 1; 2390 } 2391 } 2392 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 2393 panic("ffs_alloccg: block not in map"); 2394 return (-1); 2395 } 2396 2397 /* 2398 * Fserr prints the name of a filesystem with an error diagnostic. 2399 * 2400 * The form of the error message is: 2401 * fs: error message 2402 */ 2403 void 2404 ffs_fserr(fs, inum, cp) 2405 struct fs *fs; 2406 ino_t inum; 2407 char *cp; 2408 { 2409 struct thread *td = curthread; /* XXX */ 2410 struct proc *p = td->td_proc; 2411 2412 log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n", 2413 p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum, 2414 fs->fs_fsmnt, cp); 2415 } 2416 2417 /* 2418 * This function provides the capability for the fsck program to 2419 * update an active filesystem. Fourteen operations are provided: 2420 * 2421 * adjrefcnt(inode, amt) - adjusts the reference count on the 2422 * specified inode by the specified amount. Under normal 2423 * operation the count should always go down. Decrementing 2424 * the count to zero will cause the inode to be freed. 2425 * adjblkcnt(inode, amt) - adjust the number of blocks used by the 2426 * inode by the specified amount. 2427 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) - 2428 * adjust the superblock summary. 2429 * freedirs(inode, count) - directory inodes [inode..inode + count - 1] 2430 * are marked as free. Inodes should never have to be marked 2431 * as in use. 2432 * freefiles(inode, count) - file inodes [inode..inode + count - 1] 2433 * are marked as free. Inodes should never have to be marked 2434 * as in use. 2435 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1] 2436 * are marked as free. Blocks should never have to be marked 2437 * as in use. 2438 * setflags(flags, set/clear) - the fs_flags field has the specified 2439 * flags set (second parameter +1) or cleared (second parameter -1). 2440 * setcwd(dirinode) - set the current directory to dirinode in the 2441 * filesystem associated with the snapshot. 2442 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".." 2443 * in the current directory is oldvalue then change it to newvalue. 2444 * unlink(nameptr, oldvalue) - Verify that the inode number associated 2445 * with nameptr in the current directory is oldvalue then unlink it. 2446 * 2447 * The following functions may only be used on a quiescent filesystem 2448 * by the soft updates journal. They are not safe to be run on an active 2449 * filesystem. 2450 * 2451 * setinode(inode, dip) - the specified disk inode is replaced with the 2452 * contents pointed to by dip. 2453 * setbufoutput(fd, flags) - output associated with the specified file 2454 * descriptor (which must reference the character device supporting 2455 * the filesystem) switches from using physio to running through the 2456 * buffer cache when flags is set to 1. The descriptor reverts to 2457 * physio for output when flags is set to zero. 2458 */ 2459 2460 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS); 2461 2462 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT, 2463 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count"); 2464 2465 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR, 2466 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count"); 2467 2468 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR, 2469 sysctl_ffs_fsck, "Adjust number of directories"); 2470 2471 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR, 2472 sysctl_ffs_fsck, "Adjust number of free blocks"); 2473 2474 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR, 2475 sysctl_ffs_fsck, "Adjust number of free inodes"); 2476 2477 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR, 2478 sysctl_ffs_fsck, "Adjust number of free frags"); 2479 2480 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR, 2481 sysctl_ffs_fsck, "Adjust number of free clusters"); 2482 2483 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR, 2484 sysctl_ffs_fsck, "Free Range of Directory Inodes"); 2485 2486 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR, 2487 sysctl_ffs_fsck, "Free Range of File Inodes"); 2488 2489 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR, 2490 sysctl_ffs_fsck, "Free Range of Blocks"); 2491 2492 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR, 2493 sysctl_ffs_fsck, "Change Filesystem Flags"); 2494 2495 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR, 2496 sysctl_ffs_fsck, "Set Current Working Directory"); 2497 2498 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR, 2499 sysctl_ffs_fsck, "Change Value of .. Entry"); 2500 2501 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR, 2502 sysctl_ffs_fsck, "Unlink a Duplicate Name"); 2503 2504 static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR, 2505 sysctl_ffs_fsck, "Update an On-Disk Inode"); 2506 2507 static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR, 2508 sysctl_ffs_fsck, "Set Buffered Writing for Descriptor"); 2509 2510 #define DEBUG 1 2511 #ifdef DEBUG 2512 static int fsckcmds = 0; 2513 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, ""); 2514 #endif /* DEBUG */ 2515 2516 static int buffered_write(struct file *, struct uio *, struct ucred *, 2517 int, struct thread *); 2518 2519 static int 2520 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS) 2521 { 2522 struct thread *td = curthread; 2523 struct fsck_cmd cmd; 2524 struct ufsmount *ump; 2525 struct vnode *vp, *vpold, *dvp, *fdvp; 2526 struct inode *ip, *dp; 2527 struct mount *mp; 2528 struct fs *fs; 2529 ufs2_daddr_t blkno; 2530 long blkcnt, blksize; 2531 struct filedesc *fdp; 2532 struct file *fp, *vfp; 2533 int filetype, error; 2534 static struct fileops *origops, bufferedops; 2535 2536 if (req->newlen > sizeof cmd) 2537 return (EBADRPC); 2538 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0) 2539 return (error); 2540 if (cmd.version != FFS_CMD_VERSION) 2541 return (ERPCMISMATCH); 2542 if ((error = getvnode(td->td_proc->p_fd, cmd.handle, CAP_FSCK, 2543 &fp)) != 0) 2544 return (error); 2545 vp = fp->f_data; 2546 if (vp->v_type != VREG && vp->v_type != VDIR) { 2547 fdrop(fp, td); 2548 return (EINVAL); 2549 } 2550 vn_start_write(vp, &mp, V_WAIT); 2551 if (mp == 0 || strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) { 2552 vn_finished_write(mp); 2553 fdrop(fp, td); 2554 return (EINVAL); 2555 } 2556 ump = VFSTOUFS(mp); 2557 if ((mp->mnt_flag & MNT_RDONLY) && 2558 ump->um_fsckpid != td->td_proc->p_pid) { 2559 vn_finished_write(mp); 2560 fdrop(fp, td); 2561 return (EROFS); 2562 } 2563 fs = ump->um_fs; 2564 filetype = IFREG; 2565 2566 switch (oidp->oid_number) { 2567 2568 case FFS_SET_FLAGS: 2569 #ifdef DEBUG 2570 if (fsckcmds) 2571 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname, 2572 cmd.size > 0 ? "set" : "clear"); 2573 #endif /* DEBUG */ 2574 if (cmd.size > 0) 2575 fs->fs_flags |= (long)cmd.value; 2576 else 2577 fs->fs_flags &= ~(long)cmd.value; 2578 break; 2579 2580 case FFS_ADJ_REFCNT: 2581 #ifdef DEBUG 2582 if (fsckcmds) { 2583 printf("%s: adjust inode %jd link count by %jd\n", 2584 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2585 (intmax_t)cmd.size); 2586 } 2587 #endif /* DEBUG */ 2588 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2589 break; 2590 ip = VTOI(vp); 2591 ip->i_nlink += cmd.size; 2592 DIP_SET(ip, i_nlink, ip->i_nlink); 2593 ip->i_effnlink += cmd.size; 2594 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 2595 error = ffs_update(vp, 1); 2596 if (DOINGSOFTDEP(vp)) 2597 softdep_change_linkcnt(ip); 2598 vput(vp); 2599 break; 2600 2601 case FFS_ADJ_BLKCNT: 2602 #ifdef DEBUG 2603 if (fsckcmds) { 2604 printf("%s: adjust inode %jd block count by %jd\n", 2605 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2606 (intmax_t)cmd.size); 2607 } 2608 #endif /* DEBUG */ 2609 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2610 break; 2611 ip = VTOI(vp); 2612 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size); 2613 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 2614 error = ffs_update(vp, 1); 2615 vput(vp); 2616 break; 2617 2618 case FFS_DIR_FREE: 2619 filetype = IFDIR; 2620 /* fall through */ 2621 2622 case FFS_FILE_FREE: 2623 #ifdef DEBUG 2624 if (fsckcmds) { 2625 if (cmd.size == 1) 2626 printf("%s: free %s inode %ju\n", 2627 mp->mnt_stat.f_mntonname, 2628 filetype == IFDIR ? "directory" : "file", 2629 (uintmax_t)cmd.value); 2630 else 2631 printf("%s: free %s inodes %ju-%ju\n", 2632 mp->mnt_stat.f_mntonname, 2633 filetype == IFDIR ? "directory" : "file", 2634 (uintmax_t)cmd.value, 2635 (uintmax_t)(cmd.value + cmd.size - 1)); 2636 } 2637 #endif /* DEBUG */ 2638 while (cmd.size > 0) { 2639 if ((error = ffs_freefile(ump, fs, ump->um_devvp, 2640 cmd.value, filetype, NULL))) 2641 break; 2642 cmd.size -= 1; 2643 cmd.value += 1; 2644 } 2645 break; 2646 2647 case FFS_BLK_FREE: 2648 #ifdef DEBUG 2649 if (fsckcmds) { 2650 if (cmd.size == 1) 2651 printf("%s: free block %jd\n", 2652 mp->mnt_stat.f_mntonname, 2653 (intmax_t)cmd.value); 2654 else 2655 printf("%s: free blocks %jd-%jd\n", 2656 mp->mnt_stat.f_mntonname, 2657 (intmax_t)cmd.value, 2658 (intmax_t)cmd.value + cmd.size - 1); 2659 } 2660 #endif /* DEBUG */ 2661 blkno = cmd.value; 2662 blkcnt = cmd.size; 2663 blksize = fs->fs_frag - (blkno % fs->fs_frag); 2664 while (blkcnt > 0) { 2665 if (blksize > blkcnt) 2666 blksize = blkcnt; 2667 ffs_blkfree(ump, fs, ump->um_devvp, blkno, 2668 blksize * fs->fs_fsize, ROOTINO, VDIR, NULL); 2669 blkno += blksize; 2670 blkcnt -= blksize; 2671 blksize = fs->fs_frag; 2672 } 2673 break; 2674 2675 /* 2676 * Adjust superblock summaries. fsck(8) is expected to 2677 * submit deltas when necessary. 2678 */ 2679 case FFS_ADJ_NDIR: 2680 #ifdef DEBUG 2681 if (fsckcmds) { 2682 printf("%s: adjust number of directories by %jd\n", 2683 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2684 } 2685 #endif /* DEBUG */ 2686 fs->fs_cstotal.cs_ndir += cmd.value; 2687 break; 2688 2689 case FFS_ADJ_NBFREE: 2690 #ifdef DEBUG 2691 if (fsckcmds) { 2692 printf("%s: adjust number of free blocks by %+jd\n", 2693 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2694 } 2695 #endif /* DEBUG */ 2696 fs->fs_cstotal.cs_nbfree += cmd.value; 2697 break; 2698 2699 case FFS_ADJ_NIFREE: 2700 #ifdef DEBUG 2701 if (fsckcmds) { 2702 printf("%s: adjust number of free inodes by %+jd\n", 2703 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2704 } 2705 #endif /* DEBUG */ 2706 fs->fs_cstotal.cs_nifree += cmd.value; 2707 break; 2708 2709 case FFS_ADJ_NFFREE: 2710 #ifdef DEBUG 2711 if (fsckcmds) { 2712 printf("%s: adjust number of free frags by %+jd\n", 2713 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2714 } 2715 #endif /* DEBUG */ 2716 fs->fs_cstotal.cs_nffree += cmd.value; 2717 break; 2718 2719 case FFS_ADJ_NUMCLUSTERS: 2720 #ifdef DEBUG 2721 if (fsckcmds) { 2722 printf("%s: adjust number of free clusters by %+jd\n", 2723 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2724 } 2725 #endif /* DEBUG */ 2726 fs->fs_cstotal.cs_numclusters += cmd.value; 2727 break; 2728 2729 case FFS_SET_CWD: 2730 #ifdef DEBUG 2731 if (fsckcmds) { 2732 printf("%s: set current directory to inode %jd\n", 2733 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2734 } 2735 #endif /* DEBUG */ 2736 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp))) 2737 break; 2738 AUDIT_ARG_VNODE1(vp); 2739 if ((error = change_dir(vp, td)) != 0) { 2740 vput(vp); 2741 break; 2742 } 2743 VOP_UNLOCK(vp, 0); 2744 fdp = td->td_proc->p_fd; 2745 FILEDESC_XLOCK(fdp); 2746 vpold = fdp->fd_cdir; 2747 fdp->fd_cdir = vp; 2748 FILEDESC_XUNLOCK(fdp); 2749 vrele(vpold); 2750 break; 2751 2752 case FFS_SET_DOTDOT: 2753 #ifdef DEBUG 2754 if (fsckcmds) { 2755 printf("%s: change .. in cwd from %jd to %jd\n", 2756 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2757 (intmax_t)cmd.size); 2758 } 2759 #endif /* DEBUG */ 2760 /* 2761 * First we have to get and lock the parent directory 2762 * to which ".." points. 2763 */ 2764 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp); 2765 if (error) 2766 break; 2767 /* 2768 * Now we get and lock the child directory containing "..". 2769 */ 2770 FILEDESC_SLOCK(td->td_proc->p_fd); 2771 dvp = td->td_proc->p_fd->fd_cdir; 2772 FILEDESC_SUNLOCK(td->td_proc->p_fd); 2773 if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) { 2774 vput(fdvp); 2775 break; 2776 } 2777 dp = VTOI(dvp); 2778 dp->i_offset = 12; /* XXX mastertemplate.dot_reclen */ 2779 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size, 2780 DT_DIR, 0); 2781 cache_purge(fdvp); 2782 cache_purge(dvp); 2783 vput(dvp); 2784 vput(fdvp); 2785 break; 2786 2787 case FFS_UNLINK: 2788 #ifdef DEBUG 2789 if (fsckcmds) { 2790 char buf[32]; 2791 2792 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL)) 2793 strncpy(buf, "Name_too_long", 32); 2794 printf("%s: unlink %s (inode %jd)\n", 2795 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size); 2796 } 2797 #endif /* DEBUG */ 2798 /* 2799 * kern_unlinkat will do its own start/finish writes and 2800 * they do not nest, so drop ours here. Setting mp == NULL 2801 * indicates that vn_finished_write is not needed down below. 2802 */ 2803 vn_finished_write(mp); 2804 mp = NULL; 2805 error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value, 2806 UIO_USERSPACE, (ino_t)cmd.size); 2807 break; 2808 2809 case FFS_SET_INODE: 2810 if (ump->um_fsckpid != td->td_proc->p_pid) { 2811 error = EPERM; 2812 break; 2813 } 2814 #ifdef DEBUG 2815 if (fsckcmds) { 2816 printf("%s: update inode %jd\n", 2817 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2818 } 2819 #endif /* DEBUG */ 2820 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2821 break; 2822 AUDIT_ARG_VNODE1(vp); 2823 ip = VTOI(vp); 2824 if (ip->i_ump->um_fstype == UFS1) 2825 error = copyin((void *)(intptr_t)cmd.size, ip->i_din1, 2826 sizeof(struct ufs1_dinode)); 2827 else 2828 error = copyin((void *)(intptr_t)cmd.size, ip->i_din2, 2829 sizeof(struct ufs2_dinode)); 2830 if (error) { 2831 vput(vp); 2832 break; 2833 } 2834 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 2835 error = ffs_update(vp, 1); 2836 vput(vp); 2837 break; 2838 2839 case FFS_SET_BUFOUTPUT: 2840 if (ump->um_fsckpid != td->td_proc->p_pid) { 2841 error = EPERM; 2842 break; 2843 } 2844 if (VTOI(vp)->i_ump != ump) { 2845 error = EINVAL; 2846 break; 2847 } 2848 #ifdef DEBUG 2849 if (fsckcmds) { 2850 printf("%s: %s buffered output for descriptor %jd\n", 2851 mp->mnt_stat.f_mntonname, 2852 cmd.size == 1 ? "enable" : "disable", 2853 (intmax_t)cmd.value); 2854 } 2855 #endif /* DEBUG */ 2856 if ((error = getvnode(td->td_proc->p_fd, cmd.value, 2857 CAP_FSCK, &vfp)) != 0) 2858 break; 2859 if (vfp->f_vnode->v_type != VCHR) { 2860 fdrop(vfp, td); 2861 error = EINVAL; 2862 break; 2863 } 2864 if (origops == NULL) { 2865 origops = vfp->f_ops; 2866 bcopy((void *)origops, (void *)&bufferedops, 2867 sizeof(bufferedops)); 2868 bufferedops.fo_write = buffered_write; 2869 } 2870 if (cmd.size == 1) 2871 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops, 2872 (uintptr_t)&bufferedops); 2873 else 2874 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops, 2875 (uintptr_t)origops); 2876 fdrop(vfp, td); 2877 break; 2878 2879 default: 2880 #ifdef DEBUG 2881 if (fsckcmds) { 2882 printf("Invalid request %d from fsck\n", 2883 oidp->oid_number); 2884 } 2885 #endif /* DEBUG */ 2886 error = EINVAL; 2887 break; 2888 2889 } 2890 fdrop(fp, td); 2891 vn_finished_write(mp); 2892 return (error); 2893 } 2894 2895 /* 2896 * Function to switch a descriptor to use the buffer cache to stage 2897 * its I/O. This is needed so that writes to the filesystem device 2898 * will give snapshots a chance to copy modified blocks for which it 2899 * needs to retain copies. 2900 */ 2901 static int 2902 buffered_write(fp, uio, active_cred, flags, td) 2903 struct file *fp; 2904 struct uio *uio; 2905 struct ucred *active_cred; 2906 int flags; 2907 struct thread *td; 2908 { 2909 struct vnode *devvp; 2910 struct inode *ip; 2911 struct buf *bp; 2912 struct fs *fs; 2913 int error; 2914 daddr_t lbn; 2915 2916 /* 2917 * The devvp is associated with the /dev filesystem. To discover 2918 * the filesystem with which the device is associated, we depend 2919 * on the application setting the current directory to a location 2920 * within the filesystem being written. Yes, this is an ugly hack. 2921 */ 2922 devvp = fp->f_vnode; 2923 ip = VTOI(td->td_proc->p_fd->fd_cdir); 2924 if (ip->i_devvp != devvp) 2925 return (EINVAL); 2926 fs = ip->i_fs; 2927 foffset_lock_uio(fp, uio, flags); 2928 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 2929 #ifdef DEBUG 2930 if (fsckcmds) { 2931 printf("%s: buffered write for block %jd\n", 2932 fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset)); 2933 } 2934 #endif /* DEBUG */ 2935 /* 2936 * All I/O must be contained within a filesystem block, start on 2937 * a fragment boundary, and be a multiple of fragments in length. 2938 */ 2939 if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) || 2940 fragoff(fs, uio->uio_offset) != 0 || 2941 fragoff(fs, uio->uio_resid) != 0) { 2942 error = EINVAL; 2943 goto out; 2944 } 2945 lbn = numfrags(fs, uio->uio_offset); 2946 bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0); 2947 bp->b_flags |= B_RELBUF; 2948 if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) { 2949 brelse(bp); 2950 goto out; 2951 } 2952 error = bwrite(bp); 2953 out: 2954 VOP_UNLOCK(devvp, 0); 2955 foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF); 2956 return (error); 2957 } 2958