1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_quota.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bio.h> 40 #include <sys/buf.h> 41 #include <sys/malloc.h> 42 #include <sys/mount.h> 43 #include <sys/proc.h> 44 #include <sys/random.h> 45 #include <sys/resourcevar.h> 46 #include <sys/rwlock.h> 47 #include <sys/stat.h> 48 #include <sys/vmmeter.h> 49 #include <sys/vnode.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 #include <vm/vm_object.h> 54 55 #include <ufs/ufs/extattr.h> 56 #include <ufs/ufs/quota.h> 57 #include <ufs/ufs/ufsmount.h> 58 #include <ufs/ufs/inode.h> 59 #include <ufs/ufs/ufs_extern.h> 60 61 #include <ufs/ffs/fs.h> 62 #include <ufs/ffs/ffs_extern.h> 63 64 static int ffs_indirtrunc(struct inode *, ufs2_daddr_t, ufs2_daddr_t, 65 ufs2_daddr_t, int, ufs2_daddr_t *); 66 67 /* 68 * Update the access, modified, and inode change times as specified by the 69 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 70 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 71 * the timestamp update). The IN_LAZYMOD flag is set to force a write 72 * later if not now. The IN_LAZYACCESS is set instead of IN_MODIFIED if the fs 73 * is currently being suspended (or is suspended) and vnode has been accessed. 74 * If we write now, then clear IN_MODIFIED, IN_LAZYACCESS and IN_LAZYMOD to 75 * reflect the presumably successful write, and if waitfor is set, then wait 76 * for the write to complete. 77 */ 78 int 79 ffs_update(vp, waitfor) 80 struct vnode *vp; 81 int waitfor; 82 { 83 struct fs *fs; 84 struct buf *bp; 85 struct inode *ip; 86 int flags, error; 87 88 ASSERT_VOP_ELOCKED(vp, "ffs_update"); 89 ufs_itimes(vp); 90 ip = VTOI(vp); 91 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 92 return (0); 93 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 94 fs = ip->i_fs; 95 if (fs->fs_ronly && ip->i_ump->um_fsckpid == 0) 96 return (0); 97 /* 98 * If we are updating a snapshot and another process is currently 99 * writing the buffer containing the inode for this snapshot then 100 * a deadlock can occur when it tries to check the snapshot to see 101 * if that block needs to be copied. Thus when updating a snapshot 102 * we check to see if the buffer is already locked, and if it is 103 * we drop the snapshot lock until the buffer has been written 104 * and is available to us. We have to grab a reference to the 105 * snapshot vnode to prevent it from being removed while we are 106 * waiting for the buffer. 107 */ 108 flags = 0; 109 if (IS_SNAPSHOT(ip)) 110 flags = GB_LOCK_NOWAIT; 111 loop: 112 error = breadn_flags(ip->i_devvp, 113 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 114 (int) fs->fs_bsize, 0, 0, 0, NOCRED, flags, &bp); 115 if (error != 0) { 116 if (error != EBUSY) 117 return (error); 118 KASSERT((IS_SNAPSHOT(ip)), ("EBUSY from non-snapshot")); 119 /* 120 * Wait for our inode block to become available. 121 * 122 * Hold a reference to the vnode to protect against 123 * ffs_snapgone(). Since we hold a reference, it can only 124 * get reclaimed (VI_DOOMED flag) in a forcible downgrade 125 * or unmount. For an unmount, the entire filesystem will be 126 * gone, so we cannot attempt to touch anything associated 127 * with it while the vnode is unlocked; all we can do is 128 * pause briefly and try again. If when we relock the vnode 129 * we discover that it has been reclaimed, updating it is no 130 * longer necessary and we can just return an error. 131 */ 132 vref(vp); 133 VOP_UNLOCK(vp, 0); 134 pause("ffsupd", 1); 135 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 136 vrele(vp); 137 if ((vp->v_iflag & VI_DOOMED) != 0) 138 return (ENOENT); 139 goto loop; 140 } 141 if (DOINGSOFTDEP(vp)) 142 softdep_update_inodeblock(ip, bp, waitfor); 143 else if (ip->i_effnlink != ip->i_nlink) 144 panic("ffs_update: bad link cnt"); 145 if (ip->i_ump->um_fstype == UFS1) { 146 *((struct ufs1_dinode *)bp->b_data + 147 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 148 /* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */ 149 random_harvest_queue(&(ip->i_din1), sizeof(ip->i_din1), 1, RANDOM_FS_ATIME); 150 } else { 151 *((struct ufs2_dinode *)bp->b_data + 152 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 153 /* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */ 154 random_harvest_queue(&(ip->i_din2), sizeof(ip->i_din2), 1, RANDOM_FS_ATIME); 155 } 156 if (waitfor && !DOINGASYNC(vp)) 157 error = bwrite(bp); 158 else if (vm_page_count_severe() || buf_dirty_count_severe()) { 159 bawrite(bp); 160 error = 0; 161 } else { 162 if (bp->b_bufsize == fs->fs_bsize) 163 bp->b_flags |= B_CLUSTEROK; 164 bdwrite(bp); 165 error = 0; 166 } 167 return (error); 168 } 169 170 #define SINGLE 0 /* index of single indirect block */ 171 #define DOUBLE 1 /* index of double indirect block */ 172 #define TRIPLE 2 /* index of triple indirect block */ 173 /* 174 * Truncate the inode ip to at most length size, freeing the 175 * disk blocks. 176 */ 177 int 178 ffs_truncate(vp, length, flags, cred) 179 struct vnode *vp; 180 off_t length; 181 int flags; 182 struct ucred *cred; 183 { 184 struct inode *ip; 185 ufs2_daddr_t bn, lbn, lastblock, lastiblock[NIADDR], indir_lbn[NIADDR]; 186 ufs2_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 187 ufs2_daddr_t count, blocksreleased = 0, datablocks, blkno; 188 struct bufobj *bo; 189 struct fs *fs; 190 struct buf *bp; 191 struct ufsmount *ump; 192 int softdeptrunc, journaltrunc; 193 int needextclean, extblocks; 194 int offset, size, level, nblocks; 195 int i, error, allerror, indiroff; 196 off_t osize; 197 198 ip = VTOI(vp); 199 fs = ip->i_fs; 200 ump = ip->i_ump; 201 bo = &vp->v_bufobj; 202 203 ASSERT_VOP_LOCKED(vp, "ffs_truncate"); 204 205 if (length < 0) 206 return (EINVAL); 207 if (length > fs->fs_maxfilesize) 208 return (EFBIG); 209 #ifdef QUOTA 210 error = getinoquota(ip); 211 if (error) 212 return (error); 213 #endif 214 /* 215 * Historically clients did not have to specify which data 216 * they were truncating. So, if not specified, we assume 217 * traditional behavior, e.g., just the normal data. 218 */ 219 if ((flags & (IO_EXT | IO_NORMAL)) == 0) 220 flags |= IO_NORMAL; 221 if (!DOINGSOFTDEP(vp) && !DOINGASYNC(vp)) 222 flags |= IO_SYNC; 223 /* 224 * If we are truncating the extended-attributes, and cannot 225 * do it with soft updates, then do it slowly here. If we are 226 * truncating both the extended attributes and the file contents 227 * (e.g., the file is being unlinked), then pick it off with 228 * soft updates below. 229 */ 230 allerror = 0; 231 needextclean = 0; 232 softdeptrunc = 0; 233 journaltrunc = DOINGSUJ(vp); 234 if (journaltrunc == 0 && DOINGSOFTDEP(vp) && length == 0) 235 softdeptrunc = !softdep_slowdown(vp); 236 extblocks = 0; 237 datablocks = DIP(ip, i_blocks); 238 if (fs->fs_magic == FS_UFS2_MAGIC && ip->i_din2->di_extsize > 0) { 239 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 240 datablocks -= extblocks; 241 } 242 if ((flags & IO_EXT) && extblocks > 0) { 243 if (length != 0) 244 panic("ffs_truncate: partial trunc of extdata"); 245 if (softdeptrunc || journaltrunc) { 246 if ((flags & IO_NORMAL) == 0) 247 goto extclean; 248 needextclean = 1; 249 } else { 250 if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) 251 return (error); 252 #ifdef QUOTA 253 (void) chkdq(ip, -extblocks, NOCRED, 0); 254 #endif 255 vinvalbuf(vp, V_ALT, 0, 0); 256 vn_pages_remove(vp, 257 OFF_TO_IDX(lblktosize(fs, -extblocks)), 0); 258 osize = ip->i_din2->di_extsize; 259 ip->i_din2->di_blocks -= extblocks; 260 ip->i_din2->di_extsize = 0; 261 for (i = 0; i < NXADDR; i++) { 262 oldblks[i] = ip->i_din2->di_extb[i]; 263 ip->i_din2->di_extb[i] = 0; 264 } 265 ip->i_flag |= IN_CHANGE; 266 if ((error = ffs_update(vp, !DOINGASYNC(vp)))) 267 return (error); 268 for (i = 0; i < NXADDR; i++) { 269 if (oldblks[i] == 0) 270 continue; 271 ffs_blkfree(ump, fs, ip->i_devvp, oldblks[i], 272 sblksize(fs, osize, i), ip->i_number, 273 vp->v_type, NULL); 274 } 275 } 276 } 277 if ((flags & IO_NORMAL) == 0) 278 return (0); 279 if (vp->v_type == VLNK && 280 (ip->i_size < vp->v_mount->mnt_maxsymlinklen || 281 datablocks == 0)) { 282 #ifdef INVARIANTS 283 if (length != 0) 284 panic("ffs_truncate: partial truncate of symlink"); 285 #endif 286 bzero(SHORTLINK(ip), (u_int)ip->i_size); 287 ip->i_size = 0; 288 DIP_SET(ip, i_size, 0); 289 ip->i_flag |= IN_CHANGE | IN_UPDATE; 290 if (needextclean) 291 goto extclean; 292 return (ffs_update(vp, !DOINGASYNC(vp))); 293 } 294 if (ip->i_size == length) { 295 ip->i_flag |= IN_CHANGE | IN_UPDATE; 296 if (needextclean) 297 goto extclean; 298 return (ffs_update(vp, 0)); 299 } 300 if (fs->fs_ronly) 301 panic("ffs_truncate: read-only filesystem"); 302 if (IS_SNAPSHOT(ip)) 303 ffs_snapremove(vp); 304 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 305 osize = ip->i_size; 306 /* 307 * Lengthen the size of the file. We must ensure that the 308 * last byte of the file is allocated. Since the smallest 309 * value of osize is 0, length will be at least 1. 310 */ 311 if (osize < length) { 312 vnode_pager_setsize(vp, length); 313 flags |= BA_CLRBUF; 314 error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); 315 if (error) { 316 vnode_pager_setsize(vp, osize); 317 return (error); 318 } 319 ip->i_size = length; 320 DIP_SET(ip, i_size, length); 321 if (bp->b_bufsize == fs->fs_bsize) 322 bp->b_flags |= B_CLUSTEROK; 323 if (flags & IO_SYNC) 324 bwrite(bp); 325 else if (DOINGASYNC(vp)) 326 bdwrite(bp); 327 else 328 bawrite(bp); 329 ip->i_flag |= IN_CHANGE | IN_UPDATE; 330 return (ffs_update(vp, !DOINGASYNC(vp))); 331 } 332 /* 333 * Lookup block number for a given offset. Zero length files 334 * have no blocks, so return a blkno of -1. 335 */ 336 lbn = lblkno(fs, length - 1); 337 if (length == 0) { 338 blkno = -1; 339 } else if (lbn < NDADDR) { 340 blkno = DIP(ip, i_db[lbn]); 341 } else { 342 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, 343 cred, BA_METAONLY, &bp); 344 if (error) 345 return (error); 346 indiroff = (lbn - NDADDR) % NINDIR(fs); 347 if (ip->i_ump->um_fstype == UFS1) 348 blkno = ((ufs1_daddr_t *)(bp->b_data))[indiroff]; 349 else 350 blkno = ((ufs2_daddr_t *)(bp->b_data))[indiroff]; 351 /* 352 * If the block number is non-zero, then the indirect block 353 * must have been previously allocated and need not be written. 354 * If the block number is zero, then we may have allocated 355 * the indirect block and hence need to write it out. 356 */ 357 if (blkno != 0) 358 brelse(bp); 359 else if (DOINGSOFTDEP(vp) || DOINGASYNC(vp)) 360 bdwrite(bp); 361 else 362 bwrite(bp); 363 } 364 /* 365 * If the block number at the new end of the file is zero, 366 * then we must allocate it to ensure that the last block of 367 * the file is allocated. Soft updates does not handle this 368 * case, so here we have to clean up the soft updates data 369 * structures describing the allocation past the truncation 370 * point. Finding and deallocating those structures is a lot of 371 * work. Since partial truncation with a hole at the end occurs 372 * rarely, we solve the problem by syncing the file so that it 373 * will have no soft updates data structures left. 374 */ 375 if (blkno == 0 && (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) 376 return (error); 377 if (blkno != 0 && DOINGSOFTDEP(vp)) { 378 if (softdeptrunc == 0 && journaltrunc == 0) { 379 /* 380 * If soft updates cannot handle this truncation, 381 * clean up soft dependency data structures and 382 * fall through to the synchronous truncation. 383 */ 384 if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) 385 return (error); 386 } else { 387 flags = IO_NORMAL | (needextclean ? IO_EXT: 0); 388 if (journaltrunc) 389 softdep_journal_freeblocks(ip, cred, length, 390 flags); 391 else 392 softdep_setup_freeblocks(ip, length, flags); 393 ASSERT_VOP_LOCKED(vp, "ffs_truncate1"); 394 if (journaltrunc == 0) { 395 ip->i_flag |= IN_CHANGE | IN_UPDATE; 396 error = ffs_update(vp, 0); 397 } 398 return (error); 399 } 400 } 401 /* 402 * Shorten the size of the file. If the last block of the 403 * shortened file is unallocated, we must allocate it. 404 * Additionally, if the file is not being truncated to a 405 * block boundary, the contents of the partial block 406 * following the end of the file must be zero'ed in 407 * case it ever becomes accessible again because of 408 * subsequent file growth. Directories however are not 409 * zero'ed as they should grow back initialized to empty. 410 */ 411 offset = blkoff(fs, length); 412 if (blkno != 0 && offset == 0) { 413 ip->i_size = length; 414 DIP_SET(ip, i_size, length); 415 } else { 416 lbn = lblkno(fs, length); 417 flags |= BA_CLRBUF; 418 error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); 419 if (error) 420 return (error); 421 /* 422 * When we are doing soft updates and the UFS_BALLOC 423 * above fills in a direct block hole with a full sized 424 * block that will be truncated down to a fragment below, 425 * we must flush out the block dependency with an FSYNC 426 * so that we do not get a soft updates inconsistency 427 * when we create the fragment below. 428 */ 429 if (DOINGSOFTDEP(vp) && lbn < NDADDR && 430 fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && 431 (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) 432 return (error); 433 ip->i_size = length; 434 DIP_SET(ip, i_size, length); 435 size = blksize(fs, ip, lbn); 436 if (vp->v_type != VDIR && offset != 0) 437 bzero((char *)bp->b_data + offset, 438 (u_int)(size - offset)); 439 /* Kirk's code has reallocbuf(bp, size, 1) here */ 440 allocbuf(bp, size); 441 if (bp->b_bufsize == fs->fs_bsize) 442 bp->b_flags |= B_CLUSTEROK; 443 if (flags & IO_SYNC) 444 bwrite(bp); 445 else if (DOINGASYNC(vp)) 446 bdwrite(bp); 447 else 448 bawrite(bp); 449 } 450 /* 451 * Calculate index into inode's block list of 452 * last direct and indirect blocks (if any) 453 * which we want to keep. Lastblock is -1 when 454 * the file is truncated to 0. 455 */ 456 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 457 lastiblock[SINGLE] = lastblock - NDADDR; 458 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 459 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 460 nblocks = btodb(fs->fs_bsize); 461 /* 462 * Update file and block pointers on disk before we start freeing 463 * blocks. If we crash before free'ing blocks below, the blocks 464 * will be returned to the free list. lastiblock values are also 465 * normalized to -1 for calls to ffs_indirtrunc below. 466 */ 467 for (level = TRIPLE; level >= SINGLE; level--) { 468 oldblks[NDADDR + level] = DIP(ip, i_ib[level]); 469 if (lastiblock[level] < 0) { 470 DIP_SET(ip, i_ib[level], 0); 471 lastiblock[level] = -1; 472 } 473 } 474 for (i = 0; i < NDADDR; i++) { 475 oldblks[i] = DIP(ip, i_db[i]); 476 if (i > lastblock) 477 DIP_SET(ip, i_db[i], 0); 478 } 479 ip->i_flag |= IN_CHANGE | IN_UPDATE; 480 allerror = ffs_update(vp, !DOINGASYNC(vp)); 481 482 /* 483 * Having written the new inode to disk, save its new configuration 484 * and put back the old block pointers long enough to process them. 485 * Note that we save the new block configuration so we can check it 486 * when we are done. 487 */ 488 for (i = 0; i < NDADDR; i++) { 489 newblks[i] = DIP(ip, i_db[i]); 490 DIP_SET(ip, i_db[i], oldblks[i]); 491 } 492 for (i = 0; i < NIADDR; i++) { 493 newblks[NDADDR + i] = DIP(ip, i_ib[i]); 494 DIP_SET(ip, i_ib[i], oldblks[NDADDR + i]); 495 } 496 ip->i_size = osize; 497 DIP_SET(ip, i_size, osize); 498 499 error = vtruncbuf(vp, cred, length, fs->fs_bsize); 500 if (error && (allerror == 0)) 501 allerror = error; 502 503 /* 504 * Indirect blocks first. 505 */ 506 indir_lbn[SINGLE] = -NDADDR; 507 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 508 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 509 for (level = TRIPLE; level >= SINGLE; level--) { 510 bn = DIP(ip, i_ib[level]); 511 if (bn != 0) { 512 error = ffs_indirtrunc(ip, indir_lbn[level], 513 fsbtodb(fs, bn), lastiblock[level], level, &count); 514 if (error) 515 allerror = error; 516 blocksreleased += count; 517 if (lastiblock[level] < 0) { 518 DIP_SET(ip, i_ib[level], 0); 519 ffs_blkfree(ump, fs, ip->i_devvp, bn, 520 fs->fs_bsize, ip->i_number, 521 vp->v_type, NULL); 522 blocksreleased += nblocks; 523 } 524 } 525 if (lastiblock[level] >= 0) 526 goto done; 527 } 528 529 /* 530 * All whole direct blocks or frags. 531 */ 532 for (i = NDADDR - 1; i > lastblock; i--) { 533 long bsize; 534 535 bn = DIP(ip, i_db[i]); 536 if (bn == 0) 537 continue; 538 DIP_SET(ip, i_db[i], 0); 539 bsize = blksize(fs, ip, i); 540 ffs_blkfree(ump, fs, ip->i_devvp, bn, bsize, ip->i_number, 541 vp->v_type, NULL); 542 blocksreleased += btodb(bsize); 543 } 544 if (lastblock < 0) 545 goto done; 546 547 /* 548 * Finally, look for a change in size of the 549 * last direct block; release any frags. 550 */ 551 bn = DIP(ip, i_db[lastblock]); 552 if (bn != 0) { 553 long oldspace, newspace; 554 555 /* 556 * Calculate amount of space we're giving 557 * back as old block size minus new block size. 558 */ 559 oldspace = blksize(fs, ip, lastblock); 560 ip->i_size = length; 561 DIP_SET(ip, i_size, length); 562 newspace = blksize(fs, ip, lastblock); 563 if (newspace == 0) 564 panic("ffs_truncate: newspace"); 565 if (oldspace - newspace > 0) { 566 /* 567 * Block number of space to be free'd is 568 * the old block # plus the number of frags 569 * required for the storage we're keeping. 570 */ 571 bn += numfrags(fs, newspace); 572 ffs_blkfree(ump, fs, ip->i_devvp, bn, 573 oldspace - newspace, ip->i_number, vp->v_type, NULL); 574 blocksreleased += btodb(oldspace - newspace); 575 } 576 } 577 done: 578 #ifdef INVARIANTS 579 for (level = SINGLE; level <= TRIPLE; level++) 580 if (newblks[NDADDR + level] != DIP(ip, i_ib[level])) 581 panic("ffs_truncate1"); 582 for (i = 0; i < NDADDR; i++) 583 if (newblks[i] != DIP(ip, i_db[i])) 584 panic("ffs_truncate2"); 585 BO_LOCK(bo); 586 if (length == 0 && 587 (fs->fs_magic != FS_UFS2_MAGIC || ip->i_din2->di_extsize == 0) && 588 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) 589 panic("ffs_truncate3"); 590 BO_UNLOCK(bo); 591 #endif /* INVARIANTS */ 592 /* 593 * Put back the real size. 594 */ 595 ip->i_size = length; 596 DIP_SET(ip, i_size, length); 597 if (DIP(ip, i_blocks) >= blocksreleased) 598 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - blocksreleased); 599 else /* sanity */ 600 DIP_SET(ip, i_blocks, 0); 601 ip->i_flag |= IN_CHANGE; 602 #ifdef QUOTA 603 (void) chkdq(ip, -blocksreleased, NOCRED, 0); 604 #endif 605 return (allerror); 606 607 extclean: 608 if (journaltrunc) 609 softdep_journal_freeblocks(ip, cred, length, IO_EXT); 610 else 611 softdep_setup_freeblocks(ip, length, IO_EXT); 612 return (ffs_update(vp, !DOINGASYNC(vp))); 613 } 614 615 /* 616 * Release blocks associated with the inode ip and stored in the indirect 617 * block bn. Blocks are free'd in LIFO order up to (but not including) 618 * lastbn. If level is greater than SINGLE, the block is an indirect block 619 * and recursive calls to indirtrunc must be used to cleanse other indirect 620 * blocks. 621 */ 622 static int 623 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 624 struct inode *ip; 625 ufs2_daddr_t lbn, lastbn; 626 ufs2_daddr_t dbn; 627 int level; 628 ufs2_daddr_t *countp; 629 { 630 struct buf *bp; 631 struct fs *fs = ip->i_fs; 632 struct vnode *vp; 633 caddr_t copy = NULL; 634 int i, nblocks, error = 0, allerror = 0; 635 ufs2_daddr_t nb, nlbn, last; 636 ufs2_daddr_t blkcount, factor, blocksreleased = 0; 637 ufs1_daddr_t *bap1 = NULL; 638 ufs2_daddr_t *bap2 = NULL; 639 # define BAP(ip, i) (((ip)->i_ump->um_fstype == UFS1) ? bap1[i] : bap2[i]) 640 641 /* 642 * Calculate index in current block of last 643 * block to be kept. -1 indicates the entire 644 * block so we need not calculate the index. 645 */ 646 factor = lbn_offset(fs, level); 647 last = lastbn; 648 if (lastbn > 0) 649 last /= factor; 650 nblocks = btodb(fs->fs_bsize); 651 /* 652 * Get buffer of block pointers, zero those entries corresponding 653 * to blocks to be free'd, and update on disk copy first. Since 654 * double(triple) indirect before single(double) indirect, calls 655 * to bmap on these blocks will fail. However, we already have 656 * the on disk address, so we have to set the b_blkno field 657 * explicitly instead of letting bread do everything for us. 658 */ 659 vp = ITOV(ip); 660 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0, 0); 661 if ((bp->b_flags & B_CACHE) == 0) { 662 curthread->td_ru.ru_inblock++; /* pay for read */ 663 bp->b_iocmd = BIO_READ; 664 bp->b_flags &= ~B_INVAL; 665 bp->b_ioflags &= ~BIO_ERROR; 666 if (bp->b_bcount > bp->b_bufsize) 667 panic("ffs_indirtrunc: bad buffer size"); 668 bp->b_blkno = dbn; 669 vfs_busy_pages(bp, 0); 670 bp->b_iooffset = dbtob(bp->b_blkno); 671 bstrategy(bp); 672 error = bufwait(bp); 673 } 674 if (error) { 675 brelse(bp); 676 *countp = 0; 677 return (error); 678 } 679 680 if (ip->i_ump->um_fstype == UFS1) 681 bap1 = (ufs1_daddr_t *)bp->b_data; 682 else 683 bap2 = (ufs2_daddr_t *)bp->b_data; 684 if (lastbn != -1) { 685 copy = malloc(fs->fs_bsize, M_TEMP, M_WAITOK); 686 bcopy((caddr_t)bp->b_data, copy, (u_int)fs->fs_bsize); 687 for (i = last + 1; i < NINDIR(fs); i++) 688 if (ip->i_ump->um_fstype == UFS1) 689 bap1[i] = 0; 690 else 691 bap2[i] = 0; 692 if (DOINGASYNC(vp)) { 693 bdwrite(bp); 694 } else { 695 error = bwrite(bp); 696 if (error) 697 allerror = error; 698 } 699 if (ip->i_ump->um_fstype == UFS1) 700 bap1 = (ufs1_daddr_t *)copy; 701 else 702 bap2 = (ufs2_daddr_t *)copy; 703 } 704 705 /* 706 * Recursively free totally unused blocks. 707 */ 708 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 709 i--, nlbn += factor) { 710 nb = BAP(ip, i); 711 if (nb == 0) 712 continue; 713 if (level > SINGLE) { 714 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 715 (ufs2_daddr_t)-1, level - 1, &blkcount)) != 0) 716 allerror = error; 717 blocksreleased += blkcount; 718 } 719 ffs_blkfree(ip->i_ump, fs, ip->i_devvp, nb, fs->fs_bsize, 720 ip->i_number, vp->v_type, NULL); 721 blocksreleased += nblocks; 722 } 723 724 /* 725 * Recursively free last partial block. 726 */ 727 if (level > SINGLE && lastbn >= 0) { 728 last = lastbn % factor; 729 nb = BAP(ip, i); 730 if (nb != 0) { 731 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 732 last, level - 1, &blkcount); 733 if (error) 734 allerror = error; 735 blocksreleased += blkcount; 736 } 737 } 738 if (copy != NULL) { 739 free(copy, M_TEMP); 740 } else { 741 bp->b_flags |= B_INVAL | B_NOCACHE; 742 brelse(bp); 743 } 744 745 *countp = blocksreleased; 746 return (allerror); 747 } 748 749 int 750 ffs_rdonly(struct inode *ip) 751 { 752 753 return (ip->i_ump->um_fs->fs_ronly != 0); 754 } 755 756