1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 34 * $FreeBSD$ 35 */ 36 37 #include "opt_quota.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/mount.h> 42 #include <sys/proc.h> 43 #include <sys/bio.h> 44 #include <sys/buf.h> 45 #include <sys/vnode.h> 46 #include <sys/malloc.h> 47 #include <sys/resourcevar.h> 48 #include <sys/vmmeter.h> 49 #include <sys/stat.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 54 #include <ufs/ufs/extattr.h> 55 #include <ufs/ufs/quota.h> 56 #include <ufs/ufs/ufsmount.h> 57 #include <ufs/ufs/inode.h> 58 #include <ufs/ufs/ufs_extern.h> 59 60 #include <ufs/ffs/fs.h> 61 #include <ufs/ffs/ffs_extern.h> 62 63 static int ffs_indirtrunc(struct inode *, ufs2_daddr_t, ufs2_daddr_t, 64 ufs2_daddr_t, int, ufs2_daddr_t *); 65 66 /* 67 * Update the access, modified, and inode change times as specified by the 68 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 69 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 70 * the timestamp update). The IN_LAZYMOD flag is set to force a write 71 * later if not now. If we write now, then clear both IN_MODIFIED and 72 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 73 * set, then wait for the write to complete. 74 */ 75 int 76 ffs_update(vp, waitfor) 77 struct vnode *vp; 78 int waitfor; 79 { 80 struct fs *fs; 81 struct buf *bp; 82 struct inode *ip; 83 int error; 84 85 ufs_itimes(vp); 86 ip = VTOI(vp); 87 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 88 return (0); 89 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED); 90 fs = ip->i_fs; 91 if (fs->fs_ronly) 92 return (0); 93 /* 94 * Ensure that uid and gid are correct. This is a temporary 95 * fix until fsck has been changed to do the update. 96 */ 97 if (fs->fs_magic == FS_UFS1_MAGIC && /* XXX */ 98 fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */ 99 ip->i_din1->di_ouid = ip->i_uid; /* XXX */ 100 ip->i_din1->di_ogid = ip->i_gid; /* XXX */ 101 } /* XXX */ 102 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 103 (int)fs->fs_bsize, NOCRED, &bp); 104 if (error) { 105 brelse(bp); 106 return (error); 107 } 108 if (DOINGSOFTDEP(vp)) 109 softdep_update_inodeblock(ip, bp, waitfor); 110 else if (ip->i_effnlink != ip->i_nlink) 111 panic("ffs_update: bad link cnt"); 112 if (ip->i_ump->um_fstype == UFS1) 113 *((struct ufs1_dinode *)bp->b_data + 114 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 115 else 116 *((struct ufs2_dinode *)bp->b_data + 117 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 118 if (waitfor && !DOINGASYNC(vp)) { 119 return (bwrite(bp)); 120 } else if (vm_page_count_severe() || buf_dirty_count_severe()) { 121 return (bwrite(bp)); 122 } else { 123 if (bp->b_bufsize == fs->fs_bsize) 124 bp->b_flags |= B_CLUSTEROK; 125 bdwrite(bp); 126 return (0); 127 } 128 } 129 130 #define SINGLE 0 /* index of single indirect block */ 131 #define DOUBLE 1 /* index of double indirect block */ 132 #define TRIPLE 2 /* index of triple indirect block */ 133 /* 134 * Truncate the inode oip to at most length size, freeing the 135 * disk blocks. 136 */ 137 int 138 ffs_truncate(vp, length, flags, cred, td) 139 struct vnode *vp; 140 off_t length; 141 int flags; 142 struct ucred *cred; 143 struct thread *td; 144 { 145 struct vnode *ovp = vp; 146 struct inode *oip; 147 ufs2_daddr_t bn, lbn, lastblock, lastiblock[NIADDR], indir_lbn[NIADDR]; 148 ufs2_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 149 ufs2_daddr_t count, blocksreleased = 0, datablocks; 150 struct fs *fs; 151 struct buf *bp; 152 int needextclean, softdepslowdown, extblocks; 153 int offset, size, level, nblocks; 154 int i, error, allerror; 155 off_t osize; 156 157 oip = VTOI(ovp); 158 fs = oip->i_fs; 159 if (length < 0) 160 return (EINVAL); 161 /* 162 * Historically clients did not have to specify which data 163 * they were truncating. So, if not specified, we assume 164 * traditional behavior, e.g., just the normal data. 165 */ 166 if ((flags & (IO_EXT | IO_NORMAL)) == 0) 167 flags |= IO_NORMAL; 168 /* 169 * If we are truncating the extended-attributes, and cannot 170 * do it with soft updates, then do it slowly here. If we are 171 * truncating both the extended attributes and the file contents 172 * (e.g., the file is being unlinked), then pick it off with 173 * soft updates below. 174 */ 175 needextclean = 0; 176 softdepslowdown = DOINGSOFTDEP(ovp) && softdep_slowdown(ovp); 177 extblocks = 0; 178 datablocks = DIP(oip, i_blocks); 179 if (fs->fs_magic == FS_UFS2_MAGIC && oip->i_din2->di_extsize > 0) { 180 extblocks = btodb(fragroundup(fs, oip->i_din2->di_extsize)); 181 datablocks -= extblocks; 182 } 183 if ((flags & IO_EXT) && extblocks > 0) { 184 if (DOINGSOFTDEP(ovp) && softdepslowdown == 0 && length == 0) { 185 if ((flags & IO_NORMAL) == 0) { 186 softdep_setup_freeblocks(oip, length, IO_EXT); 187 return (0); 188 } 189 needextclean = 1; 190 } else { 191 if (length != 0) 192 panic("ffs_truncate: partial trunc of extdata"); 193 if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT, td)) != 0) 194 return (error); 195 osize = oip->i_din2->di_extsize; 196 oip->i_din2->di_blocks -= extblocks; 197 #ifdef QUOTA 198 (void) chkdq(oip, -extblocks, NOCRED, 0); 199 #endif 200 vinvalbuf(ovp, V_ALT, cred, td, 0, 0); 201 oip->i_din2->di_extsize = 0; 202 for (i = 0; i < NXADDR; i++) { 203 oldblks[i] = oip->i_din2->di_extb[i]; 204 oip->i_din2->di_extb[i] = 0; 205 } 206 oip->i_flag |= IN_CHANGE | IN_UPDATE; 207 if ((error = ffs_update(ovp, 1))) 208 return (error); 209 for (i = 0; i < NXADDR; i++) { 210 if (oldblks[i] == 0) 211 continue; 212 ffs_blkfree(fs, oip->i_devvp, oldblks[i], 213 sblksize(fs, osize, i), oip->i_number); 214 } 215 } 216 } 217 if ((flags & IO_NORMAL) == 0) 218 return (0); 219 if (length > fs->fs_maxfilesize) 220 return (EFBIG); 221 if (ovp->v_type == VLNK && 222 (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || 223 datablocks == 0)) { 224 #ifdef DIAGNOSTIC 225 if (length != 0) 226 panic("ffs_truncate: partial truncate of symlink"); 227 #endif 228 bzero(SHORTLINK(oip), (u_int)oip->i_size); 229 oip->i_size = 0; 230 DIP(oip, i_size) = 0; 231 oip->i_flag |= IN_CHANGE | IN_UPDATE; 232 if (needextclean) 233 softdep_setup_freeblocks(oip, length, IO_EXT); 234 return (UFS_UPDATE(ovp, 1)); 235 } 236 if (oip->i_size == length) { 237 oip->i_flag |= IN_CHANGE | IN_UPDATE; 238 if (needextclean) 239 softdep_setup_freeblocks(oip, length, IO_EXT); 240 return (UFS_UPDATE(ovp, 0)); 241 } 242 if (fs->fs_ronly) 243 panic("ffs_truncate: read-only filesystem"); 244 #ifdef QUOTA 245 error = getinoquota(oip); 246 if (error) 247 return (error); 248 #endif 249 if ((oip->i_flags & SF_SNAPSHOT) != 0) 250 ffs_snapremove(ovp); 251 ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; 252 if (DOINGSOFTDEP(ovp)) { 253 if (length > 0 || softdepslowdown) { 254 /* 255 * If a file is only partially truncated, then 256 * we have to clean up the data structures 257 * describing the allocation past the truncation 258 * point. Finding and deallocating those structures 259 * is a lot of work. Since partial truncation occurs 260 * rarely, we solve the problem by syncing the file 261 * so that it will have no data structures left. 262 */ 263 if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT, td)) != 0) 264 return (error); 265 if (oip->i_flag & IN_SPACECOUNTED) 266 fs->fs_pendingblocks -= datablocks; 267 } else { 268 #ifdef QUOTA 269 (void) chkdq(oip, -datablocks, NOCRED, 0); 270 #endif 271 softdep_setup_freeblocks(oip, length, needextclean ? 272 IO_EXT | IO_NORMAL : IO_NORMAL); 273 vinvalbuf(ovp, needextclean ? 0 : V_NORMAL, 274 cred, td, 0, 0); 275 oip->i_flag |= IN_CHANGE | IN_UPDATE; 276 return (ffs_update(ovp, 0)); 277 } 278 } 279 osize = oip->i_size; 280 /* 281 * Lengthen the size of the file. We must ensure that the 282 * last byte of the file is allocated. Since the smallest 283 * value of osize is 0, length will be at least 1. 284 */ 285 if (osize < length) { 286 vnode_pager_setsize(ovp, length); 287 flags |= BA_CLRBUF; 288 error = UFS_BALLOC(ovp, length - 1, 1, cred, flags, &bp); 289 if (error) 290 return (error); 291 oip->i_size = length; 292 DIP(oip, i_size) = length; 293 if (bp->b_bufsize == fs->fs_bsize) 294 bp->b_flags |= B_CLUSTEROK; 295 if (flags & IO_SYNC) 296 bwrite(bp); 297 else 298 bawrite(bp); 299 oip->i_flag |= IN_CHANGE | IN_UPDATE; 300 return (UFS_UPDATE(ovp, 1)); 301 } 302 /* 303 * Shorten the size of the file. If the file is not being 304 * truncated to a block boundary, the contents of the 305 * partial block following the end of the file must be 306 * zero'ed in case it ever becomes accessible again because 307 * of subsequent file growth. Directories however are not 308 * zero'ed as they should grow back initialized to empty. 309 */ 310 offset = blkoff(fs, length); 311 if (offset == 0) { 312 oip->i_size = length; 313 DIP(oip, i_size) = length; 314 } else { 315 lbn = lblkno(fs, length); 316 flags |= BA_CLRBUF; 317 error = UFS_BALLOC(ovp, length - 1, 1, cred, flags, &bp); 318 if (error) { 319 return (error); 320 } 321 /* 322 * When we are doing soft updates and the UFS_BALLOC 323 * above fills in a direct block hole with a full sized 324 * block that will be truncated down to a fragment below, 325 * we must flush out the block dependency with an FSYNC 326 * so that we do not get a soft updates inconsistency 327 * when we create the fragment below. 328 */ 329 if (DOINGSOFTDEP(ovp) && lbn < NDADDR && 330 fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && 331 (error = VOP_FSYNC(ovp, cred, MNT_WAIT, td)) != 0) 332 return (error); 333 oip->i_size = length; 334 DIP(oip, i_size) = length; 335 size = blksize(fs, oip, lbn); 336 if (ovp->v_type != VDIR) 337 bzero((char *)bp->b_data + offset, 338 (u_int)(size - offset)); 339 /* Kirk's code has reallocbuf(bp, size, 1) here */ 340 allocbuf(bp, size); 341 if (bp->b_bufsize == fs->fs_bsize) 342 bp->b_flags |= B_CLUSTEROK; 343 if (flags & IO_SYNC) 344 bwrite(bp); 345 else 346 bawrite(bp); 347 } 348 /* 349 * Calculate index into inode's block list of 350 * last direct and indirect blocks (if any) 351 * which we want to keep. Lastblock is -1 when 352 * the file is truncated to 0. 353 */ 354 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 355 lastiblock[SINGLE] = lastblock - NDADDR; 356 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 357 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 358 nblocks = btodb(fs->fs_bsize); 359 /* 360 * Update file and block pointers on disk before we start freeing 361 * blocks. If we crash before free'ing blocks below, the blocks 362 * will be returned to the free list. lastiblock values are also 363 * normalized to -1 for calls to ffs_indirtrunc below. 364 */ 365 for (level = TRIPLE; level >= SINGLE; level--) { 366 oldblks[NDADDR + level] = DIP(oip, i_ib[level]); 367 if (lastiblock[level] < 0) { 368 DIP(oip, i_ib[level]) = 0; 369 lastiblock[level] = -1; 370 } 371 } 372 for (i = 0; i < NDADDR; i++) { 373 oldblks[i] = DIP(oip, i_db[i]); 374 if (i > lastblock) 375 DIP(oip, i_db[i]) = 0; 376 } 377 oip->i_flag |= IN_CHANGE | IN_UPDATE; 378 allerror = UFS_UPDATE(ovp, 1); 379 380 /* 381 * Having written the new inode to disk, save its new configuration 382 * and put back the old block pointers long enough to process them. 383 * Note that we save the new block configuration so we can check it 384 * when we are done. 385 */ 386 for (i = 0; i < NDADDR; i++) { 387 newblks[i] = DIP(oip, i_db[i]); 388 DIP(oip, i_db[i]) = oldblks[i]; 389 } 390 for (i = 0; i < NIADDR; i++) { 391 newblks[NDADDR + i] = DIP(oip, i_ib[i]); 392 DIP(oip, i_ib[i]) = oldblks[NDADDR + i]; 393 } 394 oip->i_size = osize; 395 DIP(oip, i_size) = osize; 396 397 error = vtruncbuf(ovp, cred, td, length, fs->fs_bsize); 398 if (error && (allerror == 0)) 399 allerror = error; 400 401 /* 402 * Indirect blocks first. 403 */ 404 indir_lbn[SINGLE] = -NDADDR; 405 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 406 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 407 for (level = TRIPLE; level >= SINGLE; level--) { 408 bn = DIP(oip, i_ib[level]); 409 if (bn != 0) { 410 error = ffs_indirtrunc(oip, indir_lbn[level], 411 fsbtodb(fs, bn), lastiblock[level], level, &count); 412 if (error) 413 allerror = error; 414 blocksreleased += count; 415 if (lastiblock[level] < 0) { 416 DIP(oip, i_ib[level]) = 0; 417 ffs_blkfree(fs, oip->i_devvp, bn, fs->fs_bsize, 418 oip->i_number); 419 blocksreleased += nblocks; 420 } 421 } 422 if (lastiblock[level] >= 0) 423 goto done; 424 } 425 426 /* 427 * All whole direct blocks or frags. 428 */ 429 for (i = NDADDR - 1; i > lastblock; i--) { 430 long bsize; 431 432 bn = DIP(oip, i_db[i]); 433 if (bn == 0) 434 continue; 435 DIP(oip, i_db[i]) = 0; 436 bsize = blksize(fs, oip, i); 437 ffs_blkfree(fs, oip->i_devvp, bn, bsize, oip->i_number); 438 blocksreleased += btodb(bsize); 439 } 440 if (lastblock < 0) 441 goto done; 442 443 /* 444 * Finally, look for a change in size of the 445 * last direct block; release any frags. 446 */ 447 bn = DIP(oip, i_db[lastblock]); 448 if (bn != 0) { 449 long oldspace, newspace; 450 451 /* 452 * Calculate amount of space we're giving 453 * back as old block size minus new block size. 454 */ 455 oldspace = blksize(fs, oip, lastblock); 456 oip->i_size = length; 457 DIP(oip, i_size) = length; 458 newspace = blksize(fs, oip, lastblock); 459 if (newspace == 0) 460 panic("ffs_truncate: newspace"); 461 if (oldspace - newspace > 0) { 462 /* 463 * Block number of space to be free'd is 464 * the old block # plus the number of frags 465 * required for the storage we're keeping. 466 */ 467 bn += numfrags(fs, newspace); 468 ffs_blkfree(fs, oip->i_devvp, bn, oldspace - newspace, 469 oip->i_number); 470 blocksreleased += btodb(oldspace - newspace); 471 } 472 } 473 done: 474 #ifdef DIAGNOSTIC 475 for (level = SINGLE; level <= TRIPLE; level++) 476 if (newblks[NDADDR + level] != DIP(oip, i_ib[level])) 477 panic("ffs_truncate1"); 478 for (i = 0; i < NDADDR; i++) 479 if (newblks[i] != DIP(oip, i_db[i])) 480 panic("ffs_truncate2"); 481 VI_LOCK(ovp); 482 if (length == 0 && 483 (fs->fs_magic != FS_UFS2_MAGIC || oip->i_din2->di_extsize == 0) && 484 (!TAILQ_EMPTY(&ovp->v_dirtyblkhd) || 485 !TAILQ_EMPTY(&ovp->v_cleanblkhd))) 486 panic("ffs_truncate3"); 487 VI_UNLOCK(ovp); 488 #endif /* DIAGNOSTIC */ 489 /* 490 * Put back the real size. 491 */ 492 oip->i_size = length; 493 DIP(oip, i_size) = length; 494 DIP(oip, i_blocks) -= blocksreleased; 495 496 if (DIP(oip, i_blocks) < 0) /* sanity */ 497 DIP(oip, i_blocks) = 0; 498 oip->i_flag |= IN_CHANGE; 499 #ifdef QUOTA 500 (void) chkdq(oip, -blocksreleased, NOCRED, 0); 501 #endif 502 return (allerror); 503 } 504 505 /* 506 * Release blocks associated with the inode ip and stored in the indirect 507 * block bn. Blocks are free'd in LIFO order up to (but not including) 508 * lastbn. If level is greater than SINGLE, the block is an indirect block 509 * and recursive calls to indirtrunc must be used to cleanse other indirect 510 * blocks. 511 */ 512 static int 513 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 514 struct inode *ip; 515 ufs2_daddr_t lbn, lastbn; 516 ufs2_daddr_t dbn; 517 int level; 518 ufs2_daddr_t *countp; 519 { 520 struct buf *bp; 521 struct fs *fs = ip->i_fs; 522 struct vnode *vp; 523 caddr_t copy = NULL; 524 int i, nblocks, error = 0, allerror = 0; 525 ufs2_daddr_t nb, nlbn, last; 526 ufs2_daddr_t blkcount, factor, blocksreleased = 0; 527 ufs1_daddr_t *bap1 = NULL; 528 ufs2_daddr_t *bap2 = NULL; 529 # define BAP(ip, i) (((ip)->i_ump->um_fstype == UFS1) ? bap1[i] : bap2[i]) 530 531 /* 532 * Calculate index in current block of last 533 * block to be kept. -1 indicates the entire 534 * block so we need not calculate the index. 535 */ 536 factor = 1; 537 for (i = SINGLE; i < level; i++) 538 factor *= NINDIR(fs); 539 last = lastbn; 540 if (lastbn > 0) 541 last /= factor; 542 nblocks = btodb(fs->fs_bsize); 543 /* 544 * Get buffer of block pointers, zero those entries corresponding 545 * to blocks to be free'd, and update on disk copy first. Since 546 * double(triple) indirect before single(double) indirect, calls 547 * to bmap on these blocks will fail. However, we already have 548 * the on disk address, so we have to set the b_blkno field 549 * explicitly instead of letting bread do everything for us. 550 */ 551 vp = ITOV(ip); 552 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0); 553 if ((bp->b_flags & B_CACHE) == 0) { 554 curproc->p_stats->p_ru.ru_inblock++; /* pay for read */ 555 bp->b_iocmd = BIO_READ; 556 bp->b_flags &= ~B_INVAL; 557 bp->b_ioflags &= ~BIO_ERROR; 558 if (bp->b_bcount > bp->b_bufsize) 559 panic("ffs_indirtrunc: bad buffer size"); 560 bp->b_blkno = dbn; 561 vfs_busy_pages(bp, 0); 562 VOP_STRATEGY(bp->b_vp, bp); 563 error = bufwait(bp); 564 } 565 if (error) { 566 brelse(bp); 567 *countp = 0; 568 return (error); 569 } 570 571 if (ip->i_ump->um_fstype == UFS1) 572 bap1 = (ufs1_daddr_t *)bp->b_data; 573 else 574 bap2 = (ufs2_daddr_t *)bp->b_data; 575 if (lastbn != -1) { 576 MALLOC(copy, caddr_t, fs->fs_bsize, M_TEMP, 0); 577 bcopy((caddr_t)bp->b_data, copy, (u_int)fs->fs_bsize); 578 for (i = last + 1; i < NINDIR(fs); i++) 579 BAP(ip, i) = 0; 580 if (DOINGASYNC(vp)) { 581 bawrite(bp); 582 } else { 583 error = bwrite(bp); 584 if (error) 585 allerror = error; 586 } 587 if (ip->i_ump->um_fstype == UFS1) 588 bap1 = (ufs1_daddr_t *)copy; 589 else 590 bap2 = (ufs2_daddr_t *)copy; 591 } 592 593 /* 594 * Recursively free totally unused blocks. 595 */ 596 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 597 i--, nlbn += factor) { 598 nb = BAP(ip, i); 599 if (nb == 0) 600 continue; 601 if (level > SINGLE) { 602 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 603 (ufs2_daddr_t)-1, level - 1, &blkcount)) != 0) 604 allerror = error; 605 blocksreleased += blkcount; 606 } 607 ffs_blkfree(fs, ip->i_devvp, nb, fs->fs_bsize, ip->i_number); 608 blocksreleased += nblocks; 609 } 610 611 /* 612 * Recursively free last partial block. 613 */ 614 if (level > SINGLE && lastbn >= 0) { 615 last = lastbn % factor; 616 nb = BAP(ip, i); 617 if (nb != 0) { 618 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 619 last, level - 1, &blkcount); 620 if (error) 621 allerror = error; 622 blocksreleased += blkcount; 623 } 624 } 625 if (copy != NULL) { 626 FREE(copy, M_TEMP); 627 } else { 628 bp->b_flags |= B_INVAL | B_NOCACHE; 629 brelse(bp); 630 } 631 632 *countp = blocksreleased; 633 return (allerror); 634 } 635