1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_quota.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/mount.h> 40 #include <sys/proc.h> 41 #include <sys/bio.h> 42 #include <sys/buf.h> 43 #include <sys/vnode.h> 44 #include <sys/malloc.h> 45 #include <sys/resourcevar.h> 46 #include <sys/vmmeter.h> 47 #include <sys/stat.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 52 #include <ufs/ufs/extattr.h> 53 #include <ufs/ufs/quota.h> 54 #include <ufs/ufs/ufsmount.h> 55 #include <ufs/ufs/inode.h> 56 #include <ufs/ufs/ufs_extern.h> 57 58 #include <ufs/ffs/fs.h> 59 #include <ufs/ffs/ffs_extern.h> 60 61 static int ffs_indirtrunc(struct inode *, ufs2_daddr_t, ufs2_daddr_t, 62 ufs2_daddr_t, int, ufs2_daddr_t *); 63 64 /* 65 * Update the access, modified, and inode change times as specified by the 66 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 67 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 68 * the timestamp update). The IN_LAZYMOD flag is set to force a write 69 * later if not now. If we write now, then clear both IN_MODIFIED and 70 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 71 * set, then wait for the write to complete. 72 */ 73 int 74 ffs_update(vp, waitfor) 75 struct vnode *vp; 76 int waitfor; 77 { 78 struct fs *fs; 79 struct buf *bp; 80 struct inode *ip; 81 int error; 82 83 ASSERT_VOP_LOCKED(vp, "ffs_update"); 84 ufs_itimes(vp); 85 ip = VTOI(vp); 86 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 87 return (0); 88 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED); 89 fs = ip->i_fs; 90 if (fs->fs_ronly) 91 return (0); 92 /* 93 * Ensure that uid and gid are correct. This is a temporary 94 * fix until fsck has been changed to do the update. 95 */ 96 if (fs->fs_magic == FS_UFS1_MAGIC && /* XXX */ 97 fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */ 98 ip->i_din1->di_ouid = ip->i_uid; /* XXX */ 99 ip->i_din1->di_ogid = ip->i_gid; /* XXX */ 100 } /* XXX */ 101 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 102 (int)fs->fs_bsize, NOCRED, &bp); 103 if (error) { 104 brelse(bp); 105 return (error); 106 } 107 if (DOINGSOFTDEP(vp)) 108 softdep_update_inodeblock(ip, bp, waitfor); 109 else if (ip->i_effnlink != ip->i_nlink) 110 panic("ffs_update: bad link cnt"); 111 if (ip->i_ump->um_fstype == UFS1) 112 *((struct ufs1_dinode *)bp->b_data + 113 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 114 else 115 *((struct ufs2_dinode *)bp->b_data + 116 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 117 if (waitfor && !DOINGASYNC(vp)) { 118 return (bwrite(bp)); 119 } else if (vm_page_count_severe() || buf_dirty_count_severe()) { 120 return (bwrite(bp)); 121 } else { 122 if (bp->b_bufsize == fs->fs_bsize) 123 bp->b_flags |= B_CLUSTEROK; 124 bdwrite(bp); 125 return (0); 126 } 127 } 128 129 #define SINGLE 0 /* index of single indirect block */ 130 #define DOUBLE 1 /* index of double indirect block */ 131 #define TRIPLE 2 /* index of triple indirect block */ 132 /* 133 * Truncate the inode ip to at most length size, freeing the 134 * disk blocks. 135 */ 136 int 137 ffs_truncate(vp, length, flags, cred, td) 138 struct vnode *vp; 139 off_t length; 140 int flags; 141 struct ucred *cred; 142 struct thread *td; 143 { 144 struct inode *ip; 145 ufs2_daddr_t bn, lbn, lastblock, lastiblock[NIADDR], indir_lbn[NIADDR]; 146 ufs2_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 147 ufs2_daddr_t count, blocksreleased = 0, datablocks; 148 struct fs *fs; 149 struct buf *bp; 150 struct ufsmount *ump; 151 int needextclean, softdepslowdown, extblocks; 152 int offset, size, level, nblocks; 153 int i, error, allerror; 154 off_t osize; 155 156 ip = VTOI(vp); 157 fs = ip->i_fs; 158 ump = ip->i_ump; 159 160 ASSERT_VOP_LOCKED(vp, "ffs_truncate"); 161 162 if (length < 0) 163 return (EINVAL); 164 /* 165 * Historically clients did not have to specify which data 166 * they were truncating. So, if not specified, we assume 167 * traditional behavior, e.g., just the normal data. 168 */ 169 if ((flags & (IO_EXT | IO_NORMAL)) == 0) 170 flags |= IO_NORMAL; 171 /* 172 * If we are truncating the extended-attributes, and cannot 173 * do it with soft updates, then do it slowly here. If we are 174 * truncating both the extended attributes and the file contents 175 * (e.g., the file is being unlinked), then pick it off with 176 * soft updates below. 177 */ 178 needextclean = 0; 179 softdepslowdown = DOINGSOFTDEP(vp) && softdep_slowdown(vp); 180 extblocks = 0; 181 datablocks = DIP(ip, i_blocks); 182 if (fs->fs_magic == FS_UFS2_MAGIC && ip->i_din2->di_extsize > 0) { 183 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 184 datablocks -= extblocks; 185 } 186 if ((flags & IO_EXT) && extblocks > 0) { 187 if (DOINGSOFTDEP(vp) && softdepslowdown == 0 && length == 0) { 188 if ((flags & IO_NORMAL) == 0) { 189 softdep_setup_freeblocks(ip, length, IO_EXT); 190 return (0); 191 } 192 needextclean = 1; 193 } else { 194 if (length != 0) 195 panic("ffs_truncate: partial trunc of extdata"); 196 if ((error = ffs_syncvnode(vp, MNT_WAIT)) != 0) 197 return (error); 198 osize = ip->i_din2->di_extsize; 199 ip->i_din2->di_blocks -= extblocks; 200 #ifdef QUOTA 201 (void) chkdq(ip, -extblocks, NOCRED, 0); 202 #endif 203 vinvalbuf(vp, V_ALT, td, 0, 0); 204 ip->i_din2->di_extsize = 0; 205 for (i = 0; i < NXADDR; i++) { 206 oldblks[i] = ip->i_din2->di_extb[i]; 207 ip->i_din2->di_extb[i] = 0; 208 } 209 ip->i_flag |= IN_CHANGE | IN_UPDATE; 210 if ((error = ffs_update(vp, 1))) 211 return (error); 212 for (i = 0; i < NXADDR; i++) { 213 if (oldblks[i] == 0) 214 continue; 215 ffs_blkfree(ump, fs, ip->i_devvp, oldblks[i], 216 sblksize(fs, osize, i), ip->i_number); 217 } 218 } 219 } 220 if ((flags & IO_NORMAL) == 0) 221 return (0); 222 if (length > fs->fs_maxfilesize) 223 return (EFBIG); 224 if (vp->v_type == VLNK && 225 (ip->i_size < vp->v_mount->mnt_maxsymlinklen || 226 datablocks == 0)) { 227 #ifdef DIAGNOSTIC 228 if (length != 0) 229 panic("ffs_truncate: partial truncate of symlink"); 230 #endif 231 bzero(SHORTLINK(ip), (u_int)ip->i_size); 232 ip->i_size = 0; 233 DIP_SET(ip, i_size, 0); 234 ip->i_flag |= IN_CHANGE | IN_UPDATE; 235 if (needextclean) 236 softdep_setup_freeblocks(ip, length, IO_EXT); 237 return (ffs_update(vp, 1)); 238 } 239 if (ip->i_size == length) { 240 ip->i_flag |= IN_CHANGE | IN_UPDATE; 241 if (needextclean) 242 softdep_setup_freeblocks(ip, length, IO_EXT); 243 return (ffs_update(vp, 0)); 244 } 245 if (fs->fs_ronly) 246 panic("ffs_truncate: read-only filesystem"); 247 #ifdef QUOTA 248 error = getinoquota(ip); 249 if (error) 250 return (error); 251 #endif 252 if ((ip->i_flags & SF_SNAPSHOT) != 0) 253 ffs_snapremove(vp); 254 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 255 if (DOINGSOFTDEP(vp)) { 256 if (length > 0 || softdepslowdown) { 257 /* 258 * If a file is only partially truncated, then 259 * we have to clean up the data structures 260 * describing the allocation past the truncation 261 * point. Finding and deallocating those structures 262 * is a lot of work. Since partial truncation occurs 263 * rarely, we solve the problem by syncing the file 264 * so that it will have no data structures left. 265 */ 266 if ((error = ffs_syncvnode(vp, MNT_WAIT)) != 0) 267 return (error); 268 UFS_LOCK(ump); 269 if (ip->i_flag & IN_SPACECOUNTED) 270 fs->fs_pendingblocks -= datablocks; 271 UFS_UNLOCK(ump); 272 } else { 273 #ifdef QUOTA 274 (void) chkdq(ip, -datablocks, NOCRED, 0); 275 #endif 276 softdep_setup_freeblocks(ip, length, needextclean ? 277 IO_EXT | IO_NORMAL : IO_NORMAL); 278 ASSERT_VOP_LOCKED(vp, "ffs_truncate1"); 279 vinvalbuf(vp, needextclean ? 0 : V_NORMAL, td, 0, 0); 280 vnode_pager_setsize(vp, 0); 281 ip->i_flag |= IN_CHANGE | IN_UPDATE; 282 return (ffs_update(vp, 0)); 283 } 284 } 285 osize = ip->i_size; 286 /* 287 * Lengthen the size of the file. We must ensure that the 288 * last byte of the file is allocated. Since the smallest 289 * value of osize is 0, length will be at least 1. 290 */ 291 if (osize < length) { 292 vnode_pager_setsize(vp, length); 293 flags |= BA_CLRBUF; 294 error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); 295 if (error) 296 return (error); 297 ip->i_size = length; 298 DIP_SET(ip, i_size, length); 299 if (bp->b_bufsize == fs->fs_bsize) 300 bp->b_flags |= B_CLUSTEROK; 301 if (flags & IO_SYNC) 302 bwrite(bp); 303 else 304 bawrite(bp); 305 ip->i_flag |= IN_CHANGE | IN_UPDATE; 306 return (ffs_update(vp, 1)); 307 } 308 /* 309 * Shorten the size of the file. If the file is not being 310 * truncated to a block boundary, the contents of the 311 * partial block following the end of the file must be 312 * zero'ed in case it ever becomes accessible again because 313 * of subsequent file growth. Directories however are not 314 * zero'ed as they should grow back initialized to empty. 315 */ 316 offset = blkoff(fs, length); 317 if (offset == 0) { 318 ip->i_size = length; 319 DIP_SET(ip, i_size, length); 320 } else { 321 lbn = lblkno(fs, length); 322 flags |= BA_CLRBUF; 323 error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); 324 if (error) { 325 return (error); 326 } 327 /* 328 * When we are doing soft updates and the UFS_BALLOC 329 * above fills in a direct block hole with a full sized 330 * block that will be truncated down to a fragment below, 331 * we must flush out the block dependency with an FSYNC 332 * so that we do not get a soft updates inconsistency 333 * when we create the fragment below. 334 */ 335 if (DOINGSOFTDEP(vp) && lbn < NDADDR && 336 fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && 337 (error = ffs_syncvnode(vp, MNT_WAIT)) != 0) 338 return (error); 339 ip->i_size = length; 340 DIP_SET(ip, i_size, length); 341 size = blksize(fs, ip, lbn); 342 if (vp->v_type != VDIR) 343 bzero((char *)bp->b_data + offset, 344 (u_int)(size - offset)); 345 /* Kirk's code has reallocbuf(bp, size, 1) here */ 346 allocbuf(bp, size); 347 if (bp->b_bufsize == fs->fs_bsize) 348 bp->b_flags |= B_CLUSTEROK; 349 if (flags & IO_SYNC) 350 bwrite(bp); 351 else 352 bawrite(bp); 353 } 354 /* 355 * Calculate index into inode's block list of 356 * last direct and indirect blocks (if any) 357 * which we want to keep. Lastblock is -1 when 358 * the file is truncated to 0. 359 */ 360 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 361 lastiblock[SINGLE] = lastblock - NDADDR; 362 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 363 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 364 nblocks = btodb(fs->fs_bsize); 365 /* 366 * Update file and block pointers on disk before we start freeing 367 * blocks. If we crash before free'ing blocks below, the blocks 368 * will be returned to the free list. lastiblock values are also 369 * normalized to -1 for calls to ffs_indirtrunc below. 370 */ 371 for (level = TRIPLE; level >= SINGLE; level--) { 372 oldblks[NDADDR + level] = DIP(ip, i_ib[level]); 373 if (lastiblock[level] < 0) { 374 DIP_SET(ip, i_ib[level], 0); 375 lastiblock[level] = -1; 376 } 377 } 378 for (i = 0; i < NDADDR; i++) { 379 oldblks[i] = DIP(ip, i_db[i]); 380 if (i > lastblock) 381 DIP_SET(ip, i_db[i], 0); 382 } 383 ip->i_flag |= IN_CHANGE | IN_UPDATE; 384 allerror = ffs_update(vp, 1); 385 386 /* 387 * Having written the new inode to disk, save its new configuration 388 * and put back the old block pointers long enough to process them. 389 * Note that we save the new block configuration so we can check it 390 * when we are done. 391 */ 392 for (i = 0; i < NDADDR; i++) { 393 newblks[i] = DIP(ip, i_db[i]); 394 DIP_SET(ip, i_db[i], oldblks[i]); 395 } 396 for (i = 0; i < NIADDR; i++) { 397 newblks[NDADDR + i] = DIP(ip, i_ib[i]); 398 DIP_SET(ip, i_ib[i], oldblks[NDADDR + i]); 399 } 400 ip->i_size = osize; 401 DIP_SET(ip, i_size, osize); 402 403 error = vtruncbuf(vp, cred, td, length, fs->fs_bsize); 404 if (error && (allerror == 0)) 405 allerror = error; 406 407 /* 408 * Indirect blocks first. 409 */ 410 indir_lbn[SINGLE] = -NDADDR; 411 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 412 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 413 for (level = TRIPLE; level >= SINGLE; level--) { 414 bn = DIP(ip, i_ib[level]); 415 if (bn != 0) { 416 error = ffs_indirtrunc(ip, indir_lbn[level], 417 fsbtodb(fs, bn), lastiblock[level], level, &count); 418 if (error) 419 allerror = error; 420 blocksreleased += count; 421 if (lastiblock[level] < 0) { 422 DIP_SET(ip, i_ib[level], 0); 423 ffs_blkfree(ump, fs, ip->i_devvp, bn, 424 fs->fs_bsize, ip->i_number); 425 blocksreleased += nblocks; 426 } 427 } 428 if (lastiblock[level] >= 0) 429 goto done; 430 } 431 432 /* 433 * All whole direct blocks or frags. 434 */ 435 for (i = NDADDR - 1; i > lastblock; i--) { 436 long bsize; 437 438 bn = DIP(ip, i_db[i]); 439 if (bn == 0) 440 continue; 441 DIP_SET(ip, i_db[i], 0); 442 bsize = blksize(fs, ip, i); 443 ffs_blkfree(ump, fs, ip->i_devvp, bn, bsize, ip->i_number); 444 blocksreleased += btodb(bsize); 445 } 446 if (lastblock < 0) 447 goto done; 448 449 /* 450 * Finally, look for a change in size of the 451 * last direct block; release any frags. 452 */ 453 bn = DIP(ip, i_db[lastblock]); 454 if (bn != 0) { 455 long oldspace, newspace; 456 457 /* 458 * Calculate amount of space we're giving 459 * back as old block size minus new block size. 460 */ 461 oldspace = blksize(fs, ip, lastblock); 462 ip->i_size = length; 463 DIP_SET(ip, i_size, length); 464 newspace = blksize(fs, ip, lastblock); 465 if (newspace == 0) 466 panic("ffs_truncate: newspace"); 467 if (oldspace - newspace > 0) { 468 /* 469 * Block number of space to be free'd is 470 * the old block # plus the number of frags 471 * required for the storage we're keeping. 472 */ 473 bn += numfrags(fs, newspace); 474 ffs_blkfree(ump, fs, ip->i_devvp, bn, 475 oldspace - newspace, ip->i_number); 476 blocksreleased += btodb(oldspace - newspace); 477 } 478 } 479 done: 480 #ifdef DIAGNOSTIC 481 for (level = SINGLE; level <= TRIPLE; level++) 482 if (newblks[NDADDR + level] != DIP(ip, i_ib[level])) 483 panic("ffs_truncate1"); 484 for (i = 0; i < NDADDR; i++) 485 if (newblks[i] != DIP(ip, i_db[i])) 486 panic("ffs_truncate2"); 487 VI_LOCK(vp); 488 if (length == 0 && 489 (fs->fs_magic != FS_UFS2_MAGIC || ip->i_din2->di_extsize == 0) && 490 (vp->v_bufobj.bo_dirty.bv_cnt > 0 || 491 vp->v_bufobj.bo_clean.bv_cnt > 0)) 492 panic("ffs_truncate3"); 493 VI_UNLOCK(vp); 494 #endif /* DIAGNOSTIC */ 495 /* 496 * Put back the real size. 497 */ 498 ip->i_size = length; 499 DIP_SET(ip, i_size, length); 500 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - blocksreleased); 501 502 if (DIP(ip, i_blocks) < 0) /* sanity */ 503 DIP_SET(ip, i_blocks, 0); 504 ip->i_flag |= IN_CHANGE; 505 #ifdef QUOTA 506 (void) chkdq(ip, -blocksreleased, NOCRED, 0); 507 #endif 508 return (allerror); 509 } 510 511 /* 512 * Release blocks associated with the inode ip and stored in the indirect 513 * block bn. Blocks are free'd in LIFO order up to (but not including) 514 * lastbn. If level is greater than SINGLE, the block is an indirect block 515 * and recursive calls to indirtrunc must be used to cleanse other indirect 516 * blocks. 517 */ 518 static int 519 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 520 struct inode *ip; 521 ufs2_daddr_t lbn, lastbn; 522 ufs2_daddr_t dbn; 523 int level; 524 ufs2_daddr_t *countp; 525 { 526 struct buf *bp; 527 struct fs *fs = ip->i_fs; 528 struct vnode *vp; 529 caddr_t copy = NULL; 530 int i, nblocks, error = 0, allerror = 0; 531 ufs2_daddr_t nb, nlbn, last; 532 ufs2_daddr_t blkcount, factor, blocksreleased = 0; 533 ufs1_daddr_t *bap1 = NULL; 534 ufs2_daddr_t *bap2 = NULL; 535 # define BAP(ip, i) (((ip)->i_ump->um_fstype == UFS1) ? bap1[i] : bap2[i]) 536 537 /* 538 * Calculate index in current block of last 539 * block to be kept. -1 indicates the entire 540 * block so we need not calculate the index. 541 */ 542 factor = 1; 543 for (i = SINGLE; i < level; i++) 544 factor *= NINDIR(fs); 545 last = lastbn; 546 if (lastbn > 0) 547 last /= factor; 548 nblocks = btodb(fs->fs_bsize); 549 /* 550 * Get buffer of block pointers, zero those entries corresponding 551 * to blocks to be free'd, and update on disk copy first. Since 552 * double(triple) indirect before single(double) indirect, calls 553 * to bmap on these blocks will fail. However, we already have 554 * the on disk address, so we have to set the b_blkno field 555 * explicitly instead of letting bread do everything for us. 556 */ 557 vp = ITOV(ip); 558 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0, 0); 559 if ((bp->b_flags & B_CACHE) == 0) { 560 curproc->p_stats->p_ru.ru_inblock++; /* pay for read */ 561 bp->b_iocmd = BIO_READ; 562 bp->b_flags &= ~B_INVAL; 563 bp->b_ioflags &= ~BIO_ERROR; 564 if (bp->b_bcount > bp->b_bufsize) 565 panic("ffs_indirtrunc: bad buffer size"); 566 bp->b_blkno = dbn; 567 vfs_busy_pages(bp, 0); 568 bp->b_iooffset = dbtob(bp->b_blkno); 569 bstrategy(bp); 570 error = bufwait(bp); 571 } 572 if (error) { 573 brelse(bp); 574 *countp = 0; 575 return (error); 576 } 577 578 if (ip->i_ump->um_fstype == UFS1) 579 bap1 = (ufs1_daddr_t *)bp->b_data; 580 else 581 bap2 = (ufs2_daddr_t *)bp->b_data; 582 if (lastbn != -1) { 583 MALLOC(copy, caddr_t, fs->fs_bsize, M_TEMP, M_WAITOK); 584 bcopy((caddr_t)bp->b_data, copy, (u_int)fs->fs_bsize); 585 for (i = last + 1; i < NINDIR(fs); i++) 586 if (ip->i_ump->um_fstype == UFS1) 587 bap1[i] = 0; 588 else 589 bap2[i] = 0; 590 if (DOINGASYNC(vp)) { 591 bawrite(bp); 592 } else { 593 error = bwrite(bp); 594 if (error) 595 allerror = error; 596 } 597 if (ip->i_ump->um_fstype == UFS1) 598 bap1 = (ufs1_daddr_t *)copy; 599 else 600 bap2 = (ufs2_daddr_t *)copy; 601 } 602 603 /* 604 * Recursively free totally unused blocks. 605 */ 606 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 607 i--, nlbn += factor) { 608 nb = BAP(ip, i); 609 if (nb == 0) 610 continue; 611 if (level > SINGLE) { 612 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 613 (ufs2_daddr_t)-1, level - 1, &blkcount)) != 0) 614 allerror = error; 615 blocksreleased += blkcount; 616 } 617 ffs_blkfree(ip->i_ump, fs, ip->i_devvp, nb, fs->fs_bsize, 618 ip->i_number); 619 blocksreleased += nblocks; 620 } 621 622 /* 623 * Recursively free last partial block. 624 */ 625 if (level > SINGLE && lastbn >= 0) { 626 last = lastbn % factor; 627 nb = BAP(ip, i); 628 if (nb != 0) { 629 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 630 last, level - 1, &blkcount); 631 if (error) 632 allerror = error; 633 blocksreleased += blkcount; 634 } 635 } 636 if (copy != NULL) { 637 FREE(copy, M_TEMP); 638 } else { 639 bp->b_flags |= B_INVAL | B_NOCACHE; 640 brelse(bp); 641 } 642 643 *countp = blocksreleased; 644 return (allerror); 645 } 646