1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_quota.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/mount.h> 40 #include <sys/proc.h> 41 #include <sys/bio.h> 42 #include <sys/buf.h> 43 #include <sys/vnode.h> 44 #include <sys/malloc.h> 45 #include <sys/resourcevar.h> 46 #include <sys/vmmeter.h> 47 #include <sys/stat.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 52 #include <ufs/ufs/extattr.h> 53 #include <ufs/ufs/quota.h> 54 #include <ufs/ufs/ufsmount.h> 55 #include <ufs/ufs/inode.h> 56 #include <ufs/ufs/ufs_extern.h> 57 58 #include <ufs/ffs/fs.h> 59 #include <ufs/ffs/ffs_extern.h> 60 61 static int ffs_indirtrunc(struct inode *, ufs2_daddr_t, ufs2_daddr_t, 62 ufs2_daddr_t, int, ufs2_daddr_t *); 63 64 /* 65 * Update the access, modified, and inode change times as specified by the 66 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 67 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 68 * the timestamp update). The IN_LAZYMOD flag is set to force a write 69 * later if not now. If we write now, then clear both IN_MODIFIED and 70 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 71 * set, then wait for the write to complete. 72 */ 73 int 74 ffs_update(vp, waitfor) 75 struct vnode *vp; 76 int waitfor; 77 { 78 struct fs *fs; 79 struct buf *bp; 80 struct inode *ip; 81 int error; 82 83 #ifdef DEBUG_VFS_LOCKS 84 if ((vp->v_iflag & VI_XLOCK) == 0) 85 ASSERT_VOP_LOCKED(vp, "ffs_update"); 86 #endif 87 ufs_itimes(vp); 88 ip = VTOI(vp); 89 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 90 return (0); 91 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED); 92 fs = ip->i_fs; 93 if (fs->fs_ronly) 94 return (0); 95 /* 96 * Ensure that uid and gid are correct. This is a temporary 97 * fix until fsck has been changed to do the update. 98 */ 99 if (fs->fs_magic == FS_UFS1_MAGIC && /* XXX */ 100 fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */ 101 ip->i_din1->di_ouid = ip->i_uid; /* XXX */ 102 ip->i_din1->di_ogid = ip->i_gid; /* XXX */ 103 } /* XXX */ 104 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 105 (int)fs->fs_bsize, NOCRED, &bp); 106 if (error) { 107 brelse(bp); 108 return (error); 109 } 110 if (DOINGSOFTDEP(vp)) 111 softdep_update_inodeblock(ip, bp, waitfor); 112 else if (ip->i_effnlink != ip->i_nlink) 113 panic("ffs_update: bad link cnt"); 114 if (ip->i_ump->um_fstype == UFS1) 115 *((struct ufs1_dinode *)bp->b_data + 116 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 117 else 118 *((struct ufs2_dinode *)bp->b_data + 119 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 120 if (waitfor && !DOINGASYNC(vp)) { 121 return (bwrite(bp)); 122 } else if (vm_page_count_severe() || buf_dirty_count_severe()) { 123 return (bwrite(bp)); 124 } else { 125 if (bp->b_bufsize == fs->fs_bsize) 126 bp->b_flags |= B_CLUSTEROK; 127 bdwrite(bp); 128 return (0); 129 } 130 } 131 132 #define SINGLE 0 /* index of single indirect block */ 133 #define DOUBLE 1 /* index of double indirect block */ 134 #define TRIPLE 2 /* index of triple indirect block */ 135 /* 136 * Truncate the inode oip to at most length size, freeing the 137 * disk blocks. 138 */ 139 int 140 ffs_truncate(vp, length, flags, cred, td) 141 struct vnode *vp; 142 off_t length; 143 int flags; 144 struct ucred *cred; 145 struct thread *td; 146 { 147 struct vnode *ovp = vp; 148 struct inode *oip; 149 ufs2_daddr_t bn, lbn, lastblock, lastiblock[NIADDR], indir_lbn[NIADDR]; 150 ufs2_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 151 ufs2_daddr_t count, blocksreleased = 0, datablocks; 152 struct fs *fs; 153 struct buf *bp; 154 int needextclean, softdepslowdown, extblocks; 155 int offset, size, level, nblocks; 156 int i, error, allerror; 157 off_t osize; 158 159 oip = VTOI(ovp); 160 fs = oip->i_fs; 161 if (length < 0) 162 return (EINVAL); 163 /* 164 * Historically clients did not have to specify which data 165 * they were truncating. So, if not specified, we assume 166 * traditional behavior, e.g., just the normal data. 167 */ 168 if ((flags & (IO_EXT | IO_NORMAL)) == 0) 169 flags |= IO_NORMAL; 170 /* 171 * If we are truncating the extended-attributes, and cannot 172 * do it with soft updates, then do it slowly here. If we are 173 * truncating both the extended attributes and the file contents 174 * (e.g., the file is being unlinked), then pick it off with 175 * soft updates below. 176 */ 177 needextclean = 0; 178 softdepslowdown = DOINGSOFTDEP(ovp) && softdep_slowdown(ovp); 179 extblocks = 0; 180 datablocks = DIP(oip, i_blocks); 181 if (fs->fs_magic == FS_UFS2_MAGIC && oip->i_din2->di_extsize > 0) { 182 extblocks = btodb(fragroundup(fs, oip->i_din2->di_extsize)); 183 datablocks -= extblocks; 184 } 185 if ((flags & IO_EXT) && extblocks > 0) { 186 if (DOINGSOFTDEP(ovp) && softdepslowdown == 0 && length == 0) { 187 if ((flags & IO_NORMAL) == 0) { 188 softdep_setup_freeblocks(oip, length, IO_EXT); 189 return (0); 190 } 191 needextclean = 1; 192 } else { 193 if (length != 0) 194 panic("ffs_truncate: partial trunc of extdata"); 195 if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT, td)) != 0) 196 return (error); 197 osize = oip->i_din2->di_extsize; 198 oip->i_din2->di_blocks -= extblocks; 199 #ifdef QUOTA 200 (void) chkdq(oip, -extblocks, NOCRED, 0); 201 #endif 202 vinvalbuf(ovp, V_ALT, cred, td, 0, 0); 203 oip->i_din2->di_extsize = 0; 204 for (i = 0; i < NXADDR; i++) { 205 oldblks[i] = oip->i_din2->di_extb[i]; 206 oip->i_din2->di_extb[i] = 0; 207 } 208 oip->i_flag |= IN_CHANGE | IN_UPDATE; 209 if ((error = ffs_update(ovp, 1))) 210 return (error); 211 for (i = 0; i < NXADDR; i++) { 212 if (oldblks[i] == 0) 213 continue; 214 ffs_blkfree(fs, oip->i_devvp, oldblks[i], 215 sblksize(fs, osize, i), oip->i_number); 216 } 217 } 218 } 219 if ((flags & IO_NORMAL) == 0) 220 return (0); 221 if (length > fs->fs_maxfilesize) 222 return (EFBIG); 223 if (ovp->v_type == VLNK && 224 (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || 225 datablocks == 0)) { 226 #ifdef DIAGNOSTIC 227 if (length != 0) 228 panic("ffs_truncate: partial truncate of symlink"); 229 #endif 230 bzero(SHORTLINK(oip), (u_int)oip->i_size); 231 oip->i_size = 0; 232 DIP_SET(oip, i_size, 0); 233 oip->i_flag |= IN_CHANGE | IN_UPDATE; 234 if (needextclean) 235 softdep_setup_freeblocks(oip, length, IO_EXT); 236 return (UFS_UPDATE(ovp, 1)); 237 } 238 if (oip->i_size == length) { 239 oip->i_flag |= IN_CHANGE | IN_UPDATE; 240 if (needextclean) 241 softdep_setup_freeblocks(oip, length, IO_EXT); 242 return (UFS_UPDATE(ovp, 0)); 243 } 244 if (fs->fs_ronly) 245 panic("ffs_truncate: read-only filesystem"); 246 #ifdef QUOTA 247 error = getinoquota(oip); 248 if (error) 249 return (error); 250 #endif 251 if ((oip->i_flags & SF_SNAPSHOT) != 0) 252 ffs_snapremove(ovp); 253 ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; 254 if (DOINGSOFTDEP(ovp)) { 255 if (length > 0 || softdepslowdown) { 256 /* 257 * If a file is only partially truncated, then 258 * we have to clean up the data structures 259 * describing the allocation past the truncation 260 * point. Finding and deallocating those structures 261 * is a lot of work. Since partial truncation occurs 262 * rarely, we solve the problem by syncing the file 263 * so that it will have no data structures left. 264 */ 265 if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT, td)) != 0) 266 return (error); 267 if (oip->i_flag & IN_SPACECOUNTED) 268 fs->fs_pendingblocks -= datablocks; 269 } else { 270 #ifdef QUOTA 271 (void) chkdq(oip, -datablocks, NOCRED, 0); 272 #endif 273 softdep_setup_freeblocks(oip, length, needextclean ? 274 IO_EXT | IO_NORMAL : IO_NORMAL); 275 vinvalbuf(ovp, needextclean ? 0 : V_NORMAL, 276 cred, td, 0, 0); 277 oip->i_flag |= IN_CHANGE | IN_UPDATE; 278 return (ffs_update(ovp, 0)); 279 } 280 } 281 osize = oip->i_size; 282 /* 283 * Lengthen the size of the file. We must ensure that the 284 * last byte of the file is allocated. Since the smallest 285 * value of osize is 0, length will be at least 1. 286 */ 287 if (osize < length) { 288 vnode_pager_setsize(ovp, length); 289 flags |= BA_CLRBUF; 290 error = UFS_BALLOC(ovp, length - 1, 1, cred, flags, &bp); 291 if (error) 292 return (error); 293 oip->i_size = length; 294 DIP_SET(oip, i_size, length); 295 if (bp->b_bufsize == fs->fs_bsize) 296 bp->b_flags |= B_CLUSTEROK; 297 if (flags & IO_SYNC) 298 bwrite(bp); 299 else 300 bawrite(bp); 301 oip->i_flag |= IN_CHANGE | IN_UPDATE; 302 return (UFS_UPDATE(ovp, 1)); 303 } 304 /* 305 * Shorten the size of the file. If the file is not being 306 * truncated to a block boundary, the contents of the 307 * partial block following the end of the file must be 308 * zero'ed in case it ever becomes accessible again because 309 * of subsequent file growth. Directories however are not 310 * zero'ed as they should grow back initialized to empty. 311 */ 312 offset = blkoff(fs, length); 313 if (offset == 0) { 314 oip->i_size = length; 315 DIP_SET(oip, i_size, length); 316 } else { 317 lbn = lblkno(fs, length); 318 flags |= BA_CLRBUF; 319 error = UFS_BALLOC(ovp, length - 1, 1, cred, flags, &bp); 320 if (error) { 321 return (error); 322 } 323 /* 324 * When we are doing soft updates and the UFS_BALLOC 325 * above fills in a direct block hole with a full sized 326 * block that will be truncated down to a fragment below, 327 * we must flush out the block dependency with an FSYNC 328 * so that we do not get a soft updates inconsistency 329 * when we create the fragment below. 330 */ 331 if (DOINGSOFTDEP(ovp) && lbn < NDADDR && 332 fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && 333 (error = VOP_FSYNC(ovp, cred, MNT_WAIT, td)) != 0) 334 return (error); 335 oip->i_size = length; 336 DIP_SET(oip, i_size, length); 337 size = blksize(fs, oip, lbn); 338 if (ovp->v_type != VDIR) 339 bzero((char *)bp->b_data + offset, 340 (u_int)(size - offset)); 341 /* Kirk's code has reallocbuf(bp, size, 1) here */ 342 allocbuf(bp, size); 343 if (bp->b_bufsize == fs->fs_bsize) 344 bp->b_flags |= B_CLUSTEROK; 345 if (flags & IO_SYNC) 346 bwrite(bp); 347 else 348 bawrite(bp); 349 } 350 /* 351 * Calculate index into inode's block list of 352 * last direct and indirect blocks (if any) 353 * which we want to keep. Lastblock is -1 when 354 * the file is truncated to 0. 355 */ 356 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 357 lastiblock[SINGLE] = lastblock - NDADDR; 358 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 359 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 360 nblocks = btodb(fs->fs_bsize); 361 /* 362 * Update file and block pointers on disk before we start freeing 363 * blocks. If we crash before free'ing blocks below, the blocks 364 * will be returned to the free list. lastiblock values are also 365 * normalized to -1 for calls to ffs_indirtrunc below. 366 */ 367 for (level = TRIPLE; level >= SINGLE; level--) { 368 oldblks[NDADDR + level] = DIP(oip, i_ib[level]); 369 if (lastiblock[level] < 0) { 370 DIP_SET(oip, i_ib[level], 0); 371 lastiblock[level] = -1; 372 } 373 } 374 for (i = 0; i < NDADDR; i++) { 375 oldblks[i] = DIP(oip, i_db[i]); 376 if (i > lastblock) 377 DIP_SET(oip, i_db[i], 0); 378 } 379 oip->i_flag |= IN_CHANGE | IN_UPDATE; 380 allerror = UFS_UPDATE(ovp, 1); 381 382 /* 383 * Having written the new inode to disk, save its new configuration 384 * and put back the old block pointers long enough to process them. 385 * Note that we save the new block configuration so we can check it 386 * when we are done. 387 */ 388 for (i = 0; i < NDADDR; i++) { 389 newblks[i] = DIP(oip, i_db[i]); 390 DIP_SET(oip, i_db[i], oldblks[i]); 391 } 392 for (i = 0; i < NIADDR; i++) { 393 newblks[NDADDR + i] = DIP(oip, i_ib[i]); 394 DIP_SET(oip, i_ib[i], oldblks[NDADDR + i]); 395 } 396 oip->i_size = osize; 397 DIP_SET(oip, i_size, osize); 398 399 error = vtruncbuf(ovp, cred, td, length, fs->fs_bsize); 400 if (error && (allerror == 0)) 401 allerror = error; 402 403 /* 404 * Indirect blocks first. 405 */ 406 indir_lbn[SINGLE] = -NDADDR; 407 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 408 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 409 for (level = TRIPLE; level >= SINGLE; level--) { 410 bn = DIP(oip, i_ib[level]); 411 if (bn != 0) { 412 error = ffs_indirtrunc(oip, indir_lbn[level], 413 fsbtodb(fs, bn), lastiblock[level], level, &count); 414 if (error) 415 allerror = error; 416 blocksreleased += count; 417 if (lastiblock[level] < 0) { 418 DIP_SET(oip, i_ib[level], 0); 419 ffs_blkfree(fs, oip->i_devvp, bn, fs->fs_bsize, 420 oip->i_number); 421 blocksreleased += nblocks; 422 } 423 } 424 if (lastiblock[level] >= 0) 425 goto done; 426 } 427 428 /* 429 * All whole direct blocks or frags. 430 */ 431 for (i = NDADDR - 1; i > lastblock; i--) { 432 long bsize; 433 434 bn = DIP(oip, i_db[i]); 435 if (bn == 0) 436 continue; 437 DIP_SET(oip, i_db[i], 0); 438 bsize = blksize(fs, oip, i); 439 ffs_blkfree(fs, oip->i_devvp, bn, bsize, oip->i_number); 440 blocksreleased += btodb(bsize); 441 } 442 if (lastblock < 0) 443 goto done; 444 445 /* 446 * Finally, look for a change in size of the 447 * last direct block; release any frags. 448 */ 449 bn = DIP(oip, i_db[lastblock]); 450 if (bn != 0) { 451 long oldspace, newspace; 452 453 /* 454 * Calculate amount of space we're giving 455 * back as old block size minus new block size. 456 */ 457 oldspace = blksize(fs, oip, lastblock); 458 oip->i_size = length; 459 DIP_SET(oip, i_size, length); 460 newspace = blksize(fs, oip, lastblock); 461 if (newspace == 0) 462 panic("ffs_truncate: newspace"); 463 if (oldspace - newspace > 0) { 464 /* 465 * Block number of space to be free'd is 466 * the old block # plus the number of frags 467 * required for the storage we're keeping. 468 */ 469 bn += numfrags(fs, newspace); 470 ffs_blkfree(fs, oip->i_devvp, bn, oldspace - newspace, 471 oip->i_number); 472 blocksreleased += btodb(oldspace - newspace); 473 } 474 } 475 done: 476 #ifdef DIAGNOSTIC 477 for (level = SINGLE; level <= TRIPLE; level++) 478 if (newblks[NDADDR + level] != DIP(oip, i_ib[level])) 479 panic("ffs_truncate1"); 480 for (i = 0; i < NDADDR; i++) 481 if (newblks[i] != DIP(oip, i_db[i])) 482 panic("ffs_truncate2"); 483 VI_LOCK(ovp); 484 if (length == 0 && 485 (fs->fs_magic != FS_UFS2_MAGIC || oip->i_din2->di_extsize == 0) && 486 (vp->v_bufobj.bo_dirty.bv_cnt > 0 || 487 vp->v_bufobj.bo_clean.bv_cnt > 0)) 488 panic("ffs_truncate3"); 489 VI_UNLOCK(ovp); 490 #endif /* DIAGNOSTIC */ 491 /* 492 * Put back the real size. 493 */ 494 oip->i_size = length; 495 DIP_SET(oip, i_size, length); 496 DIP_SET(oip, i_blocks, DIP(oip, i_blocks) - blocksreleased); 497 498 if (DIP(oip, i_blocks) < 0) /* sanity */ 499 DIP_SET(oip, i_blocks, 0); 500 oip->i_flag |= IN_CHANGE; 501 #ifdef QUOTA 502 (void) chkdq(oip, -blocksreleased, NOCRED, 0); 503 #endif 504 return (allerror); 505 } 506 507 /* 508 * Release blocks associated with the inode ip and stored in the indirect 509 * block bn. Blocks are free'd in LIFO order up to (but not including) 510 * lastbn. If level is greater than SINGLE, the block is an indirect block 511 * and recursive calls to indirtrunc must be used to cleanse other indirect 512 * blocks. 513 */ 514 static int 515 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 516 struct inode *ip; 517 ufs2_daddr_t lbn, lastbn; 518 ufs2_daddr_t dbn; 519 int level; 520 ufs2_daddr_t *countp; 521 { 522 struct buf *bp; 523 struct fs *fs = ip->i_fs; 524 struct vnode *vp; 525 caddr_t copy = NULL; 526 int i, nblocks, error = 0, allerror = 0; 527 ufs2_daddr_t nb, nlbn, last; 528 ufs2_daddr_t blkcount, factor, blocksreleased = 0; 529 ufs1_daddr_t *bap1 = NULL; 530 ufs2_daddr_t *bap2 = NULL; 531 # define BAP(ip, i) (((ip)->i_ump->um_fstype == UFS1) ? bap1[i] : bap2[i]) 532 533 /* 534 * Calculate index in current block of last 535 * block to be kept. -1 indicates the entire 536 * block so we need not calculate the index. 537 */ 538 factor = 1; 539 for (i = SINGLE; i < level; i++) 540 factor *= NINDIR(fs); 541 last = lastbn; 542 if (lastbn > 0) 543 last /= factor; 544 nblocks = btodb(fs->fs_bsize); 545 /* 546 * Get buffer of block pointers, zero those entries corresponding 547 * to blocks to be free'd, and update on disk copy first. Since 548 * double(triple) indirect before single(double) indirect, calls 549 * to bmap on these blocks will fail. However, we already have 550 * the on disk address, so we have to set the b_blkno field 551 * explicitly instead of letting bread do everything for us. 552 */ 553 vp = ITOV(ip); 554 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0, 0); 555 if ((bp->b_flags & B_CACHE) == 0) { 556 curproc->p_stats->p_ru.ru_inblock++; /* pay for read */ 557 bp->b_iocmd = BIO_READ; 558 bp->b_flags &= ~B_INVAL; 559 bp->b_ioflags &= ~BIO_ERROR; 560 if (bp->b_bcount > bp->b_bufsize) 561 panic("ffs_indirtrunc: bad buffer size"); 562 bp->b_blkno = dbn; 563 vfs_busy_pages(bp, 0); 564 bp->b_iooffset = dbtob(bp->b_blkno); 565 bstrategy(bp); 566 error = bufwait(bp); 567 } 568 if (error) { 569 brelse(bp); 570 *countp = 0; 571 return (error); 572 } 573 574 if (ip->i_ump->um_fstype == UFS1) 575 bap1 = (ufs1_daddr_t *)bp->b_data; 576 else 577 bap2 = (ufs2_daddr_t *)bp->b_data; 578 if (lastbn != -1) { 579 MALLOC(copy, caddr_t, fs->fs_bsize, M_TEMP, M_WAITOK); 580 bcopy((caddr_t)bp->b_data, copy, (u_int)fs->fs_bsize); 581 for (i = last + 1; i < NINDIR(fs); i++) 582 if (ip->i_ump->um_fstype == UFS1) 583 bap1[i] = 0; 584 else 585 bap2[i] = 0; 586 if (DOINGASYNC(vp)) { 587 bawrite(bp); 588 } else { 589 error = bwrite(bp); 590 if (error) 591 allerror = error; 592 } 593 if (ip->i_ump->um_fstype == UFS1) 594 bap1 = (ufs1_daddr_t *)copy; 595 else 596 bap2 = (ufs2_daddr_t *)copy; 597 } 598 599 /* 600 * Recursively free totally unused blocks. 601 */ 602 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 603 i--, nlbn += factor) { 604 nb = BAP(ip, i); 605 if (nb == 0) 606 continue; 607 if (level > SINGLE) { 608 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 609 (ufs2_daddr_t)-1, level - 1, &blkcount)) != 0) 610 allerror = error; 611 blocksreleased += blkcount; 612 } 613 ffs_blkfree(fs, ip->i_devvp, nb, fs->fs_bsize, ip->i_number); 614 blocksreleased += nblocks; 615 } 616 617 /* 618 * Recursively free last partial block. 619 */ 620 if (level > SINGLE && lastbn >= 0) { 621 last = lastbn % factor; 622 nb = BAP(ip, i); 623 if (nb != 0) { 624 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 625 last, level - 1, &blkcount); 626 if (error) 627 allerror = error; 628 blocksreleased += blkcount; 629 } 630 } 631 if (copy != NULL) { 632 FREE(copy, M_TEMP); 633 } else { 634 bp->b_flags |= B_INVAL | B_NOCACHE; 635 brelse(bp); 636 } 637 638 *countp = blocksreleased; 639 return (allerror); 640 } 641