1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_ufs.h" 38 #include "opt_quota.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bio.h> 43 #include <sys/buf.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/proc.h> 47 #include <sys/racct.h> 48 #include <sys/random.h> 49 #include <sys/resourcevar.h> 50 #include <sys/rwlock.h> 51 #include <sys/stat.h> 52 #include <sys/vmmeter.h> 53 #include <sys/vnode.h> 54 55 #include <vm/vm.h> 56 #include <vm/vm_extern.h> 57 #include <vm/vm_object.h> 58 59 #include <ufs/ufs/extattr.h> 60 #include <ufs/ufs/quota.h> 61 #include <ufs/ufs/ufsmount.h> 62 #include <ufs/ufs/inode.h> 63 #include <ufs/ufs/dir.h> 64 #ifdef UFS_DIRHASH 65 #include <ufs/ufs/dirhash.h> 66 #endif 67 #include <ufs/ufs/ufs_extern.h> 68 69 #include <ufs/ffs/fs.h> 70 #include <ufs/ffs/ffs_extern.h> 71 72 static int ffs_indirtrunc(struct inode *, ufs2_daddr_t, ufs2_daddr_t, 73 ufs2_daddr_t, int, ufs2_daddr_t *); 74 75 static void 76 ffs_inode_bwrite(struct vnode *vp, struct buf *bp, int flags) 77 { 78 if ((flags & IO_SYNC) != 0) 79 bwrite(bp); 80 else if (DOINGASYNC(vp)) 81 bdwrite(bp); 82 else 83 bawrite(bp); 84 } 85 86 /* 87 * Update the access, modified, and inode change times as specified by the 88 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 89 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 90 * the timestamp update). The IN_LAZYMOD flag is set to force a write 91 * later if not now. The IN_LAZYACCESS is set instead of IN_MODIFIED if the fs 92 * is currently being suspended (or is suspended) and vnode has been accessed. 93 * If we write now, then clear IN_MODIFIED, IN_LAZYACCESS and IN_LAZYMOD to 94 * reflect the presumably successful write, and if waitfor is set, then wait 95 * for the write to complete. 96 */ 97 int 98 ffs_update(vp, waitfor) 99 struct vnode *vp; 100 int waitfor; 101 { 102 struct fs *fs; 103 struct buf *bp; 104 struct inode *ip; 105 daddr_t bn; 106 int flags, error; 107 108 ASSERT_VOP_ELOCKED(vp, "ffs_update"); 109 ufs_itimes(vp); 110 ip = VTOI(vp); 111 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 112 return (0); 113 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 114 /* 115 * The IN_SIZEMOD and IN_IBLKDATA flags indicate changes to the 116 * file size and block pointer fields in the inode. When these 117 * fields have been changed, the fsync() and fsyncdata() system 118 * calls must write the inode to ensure their semantics that the 119 * file is on stable store. 120 * 121 * The IN_SIZEMOD and IN_IBLKDATA flags cannot be cleared until 122 * a synchronous write of the inode is done. If they are cleared 123 * on an asynchronous write, then the inode may not yet have been 124 * written to the disk when an fsync() or fsyncdata() call is done. 125 * Absent these flags, these calls would not know that they needed 126 * to write the inode. Thus, these flags only can be cleared on 127 * synchronous writes of the inode. Since the inode will be locked 128 * for the duration of the I/O that writes it to disk, no fsync() 129 * or fsyncdata() will be able to run before the on-disk inode 130 * is complete. 131 */ 132 if (waitfor) 133 ip->i_flag &= ~(IN_SIZEMOD | IN_IBLKDATA); 134 fs = ITOFS(ip); 135 if (fs->fs_ronly && ITOUMP(ip)->um_fsckpid == 0) 136 return (0); 137 /* 138 * If we are updating a snapshot and another process is currently 139 * writing the buffer containing the inode for this snapshot then 140 * a deadlock can occur when it tries to check the snapshot to see 141 * if that block needs to be copied. Thus when updating a snapshot 142 * we check to see if the buffer is already locked, and if it is 143 * we drop the snapshot lock until the buffer has been written 144 * and is available to us. We have to grab a reference to the 145 * snapshot vnode to prevent it from being removed while we are 146 * waiting for the buffer. 147 */ 148 loop: 149 flags = 0; 150 if (IS_SNAPSHOT(ip)) 151 flags = GB_LOCK_NOWAIT; 152 bn = fsbtodb(fs, ino_to_fsba(fs, ip->i_number)); 153 error = ffs_breadz(VFSTOUFS(vp->v_mount), ITODEVVP(ip), bn, bn, 154 (int) fs->fs_bsize, NULL, NULL, 0, NOCRED, flags, NULL, &bp); 155 if (error != 0) { 156 /* 157 * If EBUSY was returned without GB_LOCK_NOWAIT (which 158 * requests trylock for buffer lock), it is for some 159 * other reason and we should not handle it specially. 160 */ 161 if (error != EBUSY || (flags & GB_LOCK_NOWAIT) == 0) 162 return (error); 163 164 /* 165 * Wait for our inode block to become available. 166 * 167 * Hold a reference to the vnode to protect against 168 * ffs_snapgone(). Since we hold a reference, it can only 169 * get reclaimed (VIRF_DOOMED flag) in a forcible downgrade 170 * or unmount. For an unmount, the entire filesystem will be 171 * gone, so we cannot attempt to touch anything associated 172 * with it while the vnode is unlocked; all we can do is 173 * pause briefly and try again. If when we relock the vnode 174 * we discover that it has been reclaimed, updating it is no 175 * longer necessary and we can just return an error. 176 */ 177 vref(vp); 178 VOP_UNLOCK(vp); 179 pause("ffsupd", 1); 180 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 181 vrele(vp); 182 if (VN_IS_DOOMED(vp)) 183 return (ENOENT); 184 185 /* 186 * Recalculate flags, because the vnode was relocked and 187 * could no longer be a snapshot. 188 */ 189 goto loop; 190 } 191 if (DOINGSOFTDEP(vp)) 192 softdep_update_inodeblock(ip, bp, waitfor); 193 else if (ip->i_effnlink != ip->i_nlink) 194 panic("ffs_update: bad link cnt"); 195 if (I_IS_UFS1(ip)) { 196 *((struct ufs1_dinode *)bp->b_data + 197 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 198 /* 199 * XXX: FIX? The entropy here is desirable, 200 * but the harvesting may be expensive 201 */ 202 random_harvest_queue(&(ip->i_din1), sizeof(ip->i_din1), RANDOM_FS_ATIME); 203 } else { 204 ffs_update_dinode_ckhash(fs, ip->i_din2); 205 *((struct ufs2_dinode *)bp->b_data + 206 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 207 /* 208 * XXX: FIX? The entropy here is desirable, 209 * but the harvesting may be expensive 210 */ 211 random_harvest_queue(&(ip->i_din2), sizeof(ip->i_din2), RANDOM_FS_ATIME); 212 } 213 if (waitfor) { 214 error = bwrite(bp); 215 if (ffs_fsfail_cleanup(VFSTOUFS(vp->v_mount), error)) 216 error = 0; 217 } else if (vm_page_count_severe() || buf_dirty_count_severe()) { 218 bawrite(bp); 219 error = 0; 220 } else { 221 if (bp->b_bufsize == fs->fs_bsize) 222 bp->b_flags |= B_CLUSTEROK; 223 bdwrite(bp); 224 error = 0; 225 } 226 return (error); 227 } 228 229 #define SINGLE 0 /* index of single indirect block */ 230 #define DOUBLE 1 /* index of double indirect block */ 231 #define TRIPLE 2 /* index of triple indirect block */ 232 /* 233 * Truncate the inode ip to at most length size, freeing the 234 * disk blocks. 235 */ 236 int 237 ffs_truncate(vp, length, flags, cred) 238 struct vnode *vp; 239 off_t length; 240 int flags; 241 struct ucred *cred; 242 { 243 struct inode *ip; 244 ufs2_daddr_t bn, lbn, lastblock, lastiblock[UFS_NIADDR]; 245 ufs2_daddr_t indir_lbn[UFS_NIADDR], oldblks[UFS_NDADDR + UFS_NIADDR]; 246 ufs2_daddr_t newblks[UFS_NDADDR + UFS_NIADDR]; 247 ufs2_daddr_t count, blocksreleased = 0, datablocks, blkno; 248 struct bufobj *bo; 249 struct fs *fs; 250 struct buf *bp; 251 struct ufsmount *ump; 252 int softdeptrunc, journaltrunc; 253 int needextclean, extblocks; 254 int offset, size, level, nblocks; 255 int i, error, allerror, indiroff, waitforupdate; 256 u_long key; 257 off_t osize; 258 259 ip = VTOI(vp); 260 ump = VFSTOUFS(vp->v_mount); 261 fs = ump->um_fs; 262 bo = &vp->v_bufobj; 263 264 ASSERT_VOP_LOCKED(vp, "ffs_truncate"); 265 266 if (length < 0) 267 return (EINVAL); 268 if (length > fs->fs_maxfilesize) 269 return (EFBIG); 270 #ifdef QUOTA 271 error = getinoquota(ip); 272 if (error) 273 return (error); 274 #endif 275 /* 276 * Historically clients did not have to specify which data 277 * they were truncating. So, if not specified, we assume 278 * traditional behavior, e.g., just the normal data. 279 */ 280 if ((flags & (IO_EXT | IO_NORMAL)) == 0) 281 flags |= IO_NORMAL; 282 if (!DOINGSOFTDEP(vp) && !DOINGASYNC(vp)) 283 flags |= IO_SYNC; 284 waitforupdate = (flags & IO_SYNC) != 0 || !DOINGASYNC(vp); 285 /* 286 * If we are truncating the extended-attributes, and cannot 287 * do it with soft updates, then do it slowly here. If we are 288 * truncating both the extended attributes and the file contents 289 * (e.g., the file is being unlinked), then pick it off with 290 * soft updates below. 291 */ 292 allerror = 0; 293 needextclean = 0; 294 softdeptrunc = 0; 295 journaltrunc = DOINGSUJ(vp); 296 journaltrunc = 0; /* XXX temp patch until bug found */ 297 if (journaltrunc == 0 && DOINGSOFTDEP(vp) && length == 0) 298 softdeptrunc = !softdep_slowdown(vp); 299 extblocks = 0; 300 datablocks = DIP(ip, i_blocks); 301 if (fs->fs_magic == FS_UFS2_MAGIC && ip->i_din2->di_extsize > 0) { 302 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 303 datablocks -= extblocks; 304 } 305 if ((flags & IO_EXT) && extblocks > 0) { 306 if (length != 0) 307 panic("ffs_truncate: partial trunc of extdata"); 308 if (softdeptrunc || journaltrunc) { 309 if ((flags & IO_NORMAL) == 0) 310 goto extclean; 311 needextclean = 1; 312 } else { 313 if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) 314 return (error); 315 #ifdef QUOTA 316 (void) chkdq(ip, -extblocks, NOCRED, FORCE); 317 #endif 318 vinvalbuf(vp, V_ALT, 0, 0); 319 vn_pages_remove(vp, 320 OFF_TO_IDX(lblktosize(fs, -extblocks)), 0); 321 osize = ip->i_din2->di_extsize; 322 ip->i_din2->di_blocks -= extblocks; 323 ip->i_din2->di_extsize = 0; 324 for (i = 0; i < UFS_NXADDR; i++) { 325 oldblks[i] = ip->i_din2->di_extb[i]; 326 ip->i_din2->di_extb[i] = 0; 327 } 328 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE); 329 if ((error = ffs_update(vp, waitforupdate))) 330 return (error); 331 for (i = 0; i < UFS_NXADDR; i++) { 332 if (oldblks[i] == 0) 333 continue; 334 ffs_blkfree(ump, fs, ITODEVVP(ip), oldblks[i], 335 sblksize(fs, osize, i), ip->i_number, 336 vp->v_type, NULL, SINGLETON_KEY); 337 } 338 } 339 } 340 if ((flags & IO_NORMAL) == 0) 341 return (0); 342 if (vp->v_type == VLNK && ip->i_size < ump->um_maxsymlinklen) { 343 #ifdef INVARIANTS 344 if (length != 0) 345 panic("ffs_truncate: partial truncate of symlink"); 346 #endif 347 bzero(SHORTLINK(ip), (u_int)ip->i_size); 348 ip->i_size = 0; 349 DIP_SET(ip, i_size, 0); 350 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_UPDATE); 351 if (needextclean) 352 goto extclean; 353 return (ffs_update(vp, waitforupdate)); 354 } 355 if (ip->i_size == length) { 356 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE); 357 if (needextclean) 358 goto extclean; 359 return (ffs_update(vp, 0)); 360 } 361 if (fs->fs_ronly) 362 panic("ffs_truncate: read-only filesystem"); 363 if (IS_SNAPSHOT(ip)) 364 ffs_snapremove(vp); 365 cluster_init_vn(&ip->i_clusterw); 366 osize = ip->i_size; 367 /* 368 * Lengthen the size of the file. We must ensure that the 369 * last byte of the file is allocated. Since the smallest 370 * value of osize is 0, length will be at least 1. 371 */ 372 if (osize < length) { 373 vnode_pager_setsize(vp, length); 374 flags |= BA_CLRBUF; 375 error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); 376 if (error) { 377 vnode_pager_setsize(vp, osize); 378 return (error); 379 } 380 ip->i_size = length; 381 DIP_SET(ip, i_size, length); 382 if (bp->b_bufsize == fs->fs_bsize) 383 bp->b_flags |= B_CLUSTEROK; 384 ffs_inode_bwrite(vp, bp, flags); 385 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_UPDATE); 386 return (ffs_update(vp, waitforupdate)); 387 } 388 /* 389 * Lookup block number for a given offset. Zero length files 390 * have no blocks, so return a blkno of -1. 391 */ 392 lbn = lblkno(fs, length - 1); 393 if (length == 0) { 394 blkno = -1; 395 } else if (lbn < UFS_NDADDR) { 396 blkno = DIP(ip, i_db[lbn]); 397 } else { 398 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, 399 cred, BA_METAONLY, &bp); 400 if (error) 401 return (error); 402 indiroff = (lbn - UFS_NDADDR) % NINDIR(fs); 403 if (I_IS_UFS1(ip)) 404 blkno = ((ufs1_daddr_t *)(bp->b_data))[indiroff]; 405 else 406 blkno = ((ufs2_daddr_t *)(bp->b_data))[indiroff]; 407 /* 408 * If the block number is non-zero, then the indirect block 409 * must have been previously allocated and need not be written. 410 * If the block number is zero, then we may have allocated 411 * the indirect block and hence need to write it out. 412 */ 413 if (blkno != 0) 414 brelse(bp); 415 else if (flags & IO_SYNC) 416 bwrite(bp); 417 else 418 bdwrite(bp); 419 } 420 /* 421 * If the block number at the new end of the file is zero, 422 * then we must allocate it to ensure that the last block of 423 * the file is allocated. Soft updates does not handle this 424 * case, so here we have to clean up the soft updates data 425 * structures describing the allocation past the truncation 426 * point. Finding and deallocating those structures is a lot of 427 * work. Since partial truncation with a hole at the end occurs 428 * rarely, we solve the problem by syncing the file so that it 429 * will have no soft updates data structures left. 430 */ 431 if (blkno == 0 && (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) 432 return (error); 433 if (blkno != 0 && DOINGSOFTDEP(vp)) { 434 if (softdeptrunc == 0 && journaltrunc == 0) { 435 /* 436 * If soft updates cannot handle this truncation, 437 * clean up soft dependency data structures and 438 * fall through to the synchronous truncation. 439 */ 440 if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) 441 return (error); 442 } else { 443 flags = IO_NORMAL | (needextclean ? IO_EXT: 0); 444 if (journaltrunc) 445 softdep_journal_freeblocks(ip, cred, length, 446 flags); 447 else 448 softdep_setup_freeblocks(ip, length, flags); 449 ASSERT_VOP_LOCKED(vp, "ffs_truncate1"); 450 if (journaltrunc == 0) { 451 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE); 452 error = ffs_update(vp, 0); 453 } 454 return (error); 455 } 456 } 457 /* 458 * Shorten the size of the file. If the last block of the 459 * shortened file is unallocated, we must allocate it. 460 * Additionally, if the file is not being truncated to a 461 * block boundary, the contents of the partial block 462 * following the end of the file must be zero'ed in 463 * case it ever becomes accessible again because of 464 * subsequent file growth. Directories however are not 465 * zero'ed as they should grow back initialized to empty. 466 */ 467 offset = blkoff(fs, length); 468 if (blkno != 0 && offset == 0) { 469 ip->i_size = length; 470 DIP_SET(ip, i_size, length); 471 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_UPDATE); 472 #ifdef UFS_DIRHASH 473 if (vp->v_type == VDIR && ip->i_dirhash != NULL) 474 ufsdirhash_dirtrunc(ip, length); 475 #endif 476 } else { 477 lbn = lblkno(fs, length); 478 flags |= BA_CLRBUF; 479 error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); 480 if (error) 481 return (error); 482 ffs_inode_bwrite(vp, bp, flags); 483 484 /* 485 * When we are doing soft updates and the UFS_BALLOC 486 * above fills in a direct block hole with a full sized 487 * block that will be truncated down to a fragment below, 488 * we must flush out the block dependency with an FSYNC 489 * so that we do not get a soft updates inconsistency 490 * when we create the fragment below. 491 */ 492 if (DOINGSOFTDEP(vp) && lbn < UFS_NDADDR && 493 fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && 494 (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) 495 return (error); 496 497 error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); 498 if (error) 499 return (error); 500 ip->i_size = length; 501 DIP_SET(ip, i_size, length); 502 #ifdef UFS_DIRHASH 503 if (vp->v_type == VDIR && ip->i_dirhash != NULL) 504 ufsdirhash_dirtrunc(ip, length); 505 #endif 506 size = blksize(fs, ip, lbn); 507 if (vp->v_type != VDIR && offset != 0) 508 bzero((char *)bp->b_data + offset, 509 (u_int)(size - offset)); 510 /* Kirk's code has reallocbuf(bp, size, 1) here */ 511 allocbuf(bp, size); 512 if (bp->b_bufsize == fs->fs_bsize) 513 bp->b_flags |= B_CLUSTEROK; 514 ffs_inode_bwrite(vp, bp, flags); 515 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_UPDATE); 516 } 517 /* 518 * Calculate index into inode's block list of 519 * last direct and indirect blocks (if any) 520 * which we want to keep. Lastblock is -1 when 521 * the file is truncated to 0. 522 */ 523 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 524 lastiblock[SINGLE] = lastblock - UFS_NDADDR; 525 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 526 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 527 nblocks = btodb(fs->fs_bsize); 528 /* 529 * Update file and block pointers on disk before we start freeing 530 * blocks. If we crash before free'ing blocks below, the blocks 531 * will be returned to the free list. lastiblock values are also 532 * normalized to -1 for calls to ffs_indirtrunc below. 533 */ 534 for (level = TRIPLE; level >= SINGLE; level--) { 535 oldblks[UFS_NDADDR + level] = DIP(ip, i_ib[level]); 536 if (lastiblock[level] < 0) { 537 DIP_SET(ip, i_ib[level], 0); 538 lastiblock[level] = -1; 539 } 540 } 541 for (i = 0; i < UFS_NDADDR; i++) { 542 oldblks[i] = DIP(ip, i_db[i]); 543 if (i > lastblock) 544 DIP_SET(ip, i_db[i], 0); 545 } 546 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE); 547 allerror = ffs_update(vp, waitforupdate); 548 549 /* 550 * Having written the new inode to disk, save its new configuration 551 * and put back the old block pointers long enough to process them. 552 * Note that we save the new block configuration so we can check it 553 * when we are done. 554 */ 555 for (i = 0; i < UFS_NDADDR; i++) { 556 newblks[i] = DIP(ip, i_db[i]); 557 DIP_SET(ip, i_db[i], oldblks[i]); 558 } 559 for (i = 0; i < UFS_NIADDR; i++) { 560 newblks[UFS_NDADDR + i] = DIP(ip, i_ib[i]); 561 DIP_SET(ip, i_ib[i], oldblks[UFS_NDADDR + i]); 562 } 563 ip->i_size = osize; 564 DIP_SET(ip, i_size, osize); 565 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_UPDATE); 566 567 error = vtruncbuf(vp, length, fs->fs_bsize); 568 if (error && (allerror == 0)) 569 allerror = error; 570 571 /* 572 * Indirect blocks first. 573 */ 574 indir_lbn[SINGLE] = -UFS_NDADDR; 575 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 576 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 577 for (level = TRIPLE; level >= SINGLE; level--) { 578 bn = DIP(ip, i_ib[level]); 579 if (bn != 0) { 580 error = ffs_indirtrunc(ip, indir_lbn[level], 581 fsbtodb(fs, bn), lastiblock[level], level, &count); 582 if (error) 583 allerror = error; 584 blocksreleased += count; 585 if (lastiblock[level] < 0) { 586 DIP_SET(ip, i_ib[level], 0); 587 ffs_blkfree(ump, fs, ump->um_devvp, bn, 588 fs->fs_bsize, ip->i_number, 589 vp->v_type, NULL, SINGLETON_KEY); 590 blocksreleased += nblocks; 591 } 592 } 593 if (lastiblock[level] >= 0) 594 goto done; 595 } 596 597 /* 598 * All whole direct blocks or frags. 599 */ 600 key = ffs_blkrelease_start(ump, ump->um_devvp, ip->i_number); 601 for (i = UFS_NDADDR - 1; i > lastblock; i--) { 602 long bsize; 603 604 bn = DIP(ip, i_db[i]); 605 if (bn == 0) 606 continue; 607 DIP_SET(ip, i_db[i], 0); 608 bsize = blksize(fs, ip, i); 609 ffs_blkfree(ump, fs, ump->um_devvp, bn, bsize, ip->i_number, 610 vp->v_type, NULL, key); 611 blocksreleased += btodb(bsize); 612 } 613 ffs_blkrelease_finish(ump, key); 614 if (lastblock < 0) 615 goto done; 616 617 /* 618 * Finally, look for a change in size of the 619 * last direct block; release any frags. 620 */ 621 bn = DIP(ip, i_db[lastblock]); 622 if (bn != 0) { 623 long oldspace, newspace; 624 625 /* 626 * Calculate amount of space we're giving 627 * back as old block size minus new block size. 628 */ 629 oldspace = blksize(fs, ip, lastblock); 630 ip->i_size = length; 631 DIP_SET(ip, i_size, length); 632 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_UPDATE); 633 newspace = blksize(fs, ip, lastblock); 634 if (newspace == 0) 635 panic("ffs_truncate: newspace"); 636 if (oldspace - newspace > 0) { 637 /* 638 * Block number of space to be free'd is 639 * the old block # plus the number of frags 640 * required for the storage we're keeping. 641 */ 642 bn += numfrags(fs, newspace); 643 ffs_blkfree(ump, fs, ump->um_devvp, bn, 644 oldspace - newspace, ip->i_number, vp->v_type, 645 NULL, SINGLETON_KEY); 646 blocksreleased += btodb(oldspace - newspace); 647 } 648 } 649 done: 650 #ifdef INVARIANTS 651 for (level = SINGLE; level <= TRIPLE; level++) 652 if (newblks[UFS_NDADDR + level] != DIP(ip, i_ib[level])) 653 panic("ffs_truncate1: level %d newblks %jd != i_ib %jd", 654 level, (intmax_t)newblks[UFS_NDADDR + level], 655 (intmax_t)DIP(ip, i_ib[level])); 656 for (i = 0; i < UFS_NDADDR; i++) 657 if (newblks[i] != DIP(ip, i_db[i])) 658 panic("ffs_truncate2: blkno %d newblks %jd != i_db %jd", 659 i, (intmax_t)newblks[UFS_NDADDR + level], 660 (intmax_t)DIP(ip, i_ib[level])); 661 BO_LOCK(bo); 662 if (length == 0 && 663 (fs->fs_magic != FS_UFS2_MAGIC || ip->i_din2->di_extsize == 0) && 664 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) 665 panic("ffs_truncate3: vp = %p, buffers: dirty = %d, clean = %d", 666 vp, bo->bo_dirty.bv_cnt, bo->bo_clean.bv_cnt); 667 BO_UNLOCK(bo); 668 #endif /* INVARIANTS */ 669 /* 670 * Put back the real size. 671 */ 672 ip->i_size = length; 673 DIP_SET(ip, i_size, length); 674 if (DIP(ip, i_blocks) >= blocksreleased) 675 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - blocksreleased); 676 else /* sanity */ 677 DIP_SET(ip, i_blocks, 0); 678 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE); 679 #ifdef QUOTA 680 (void) chkdq(ip, -blocksreleased, NOCRED, FORCE); 681 #endif 682 return (allerror); 683 684 extclean: 685 if (journaltrunc) 686 softdep_journal_freeblocks(ip, cred, length, IO_EXT); 687 else 688 softdep_setup_freeblocks(ip, length, IO_EXT); 689 return (ffs_update(vp, waitforupdate)); 690 } 691 692 /* 693 * Release blocks associated with the inode ip and stored in the indirect 694 * block bn. Blocks are free'd in LIFO order up to (but not including) 695 * lastbn. If level is greater than SINGLE, the block is an indirect block 696 * and recursive calls to indirtrunc must be used to cleanse other indirect 697 * blocks. 698 */ 699 static int 700 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 701 struct inode *ip; 702 ufs2_daddr_t lbn, lastbn; 703 ufs2_daddr_t dbn; 704 int level; 705 ufs2_daddr_t *countp; 706 { 707 struct buf *bp; 708 struct fs *fs; 709 struct ufsmount *ump; 710 struct vnode *vp; 711 caddr_t copy = NULL; 712 u_long key; 713 int i, nblocks, error = 0, allerror = 0; 714 ufs2_daddr_t nb, nlbn, last; 715 ufs2_daddr_t blkcount, factor, blocksreleased = 0; 716 ufs1_daddr_t *bap1 = NULL; 717 ufs2_daddr_t *bap2 = NULL; 718 #define BAP(ip, i) (I_IS_UFS1(ip) ? bap1[i] : bap2[i]) 719 720 fs = ITOFS(ip); 721 ump = ITOUMP(ip); 722 723 /* 724 * Calculate index in current block of last 725 * block to be kept. -1 indicates the entire 726 * block so we need not calculate the index. 727 */ 728 factor = lbn_offset(fs, level); 729 last = lastbn; 730 if (lastbn > 0) 731 last /= factor; 732 nblocks = btodb(fs->fs_bsize); 733 /* 734 * Get buffer of block pointers, zero those entries corresponding 735 * to blocks to be free'd, and update on disk copy first. Since 736 * double(triple) indirect before single(double) indirect, calls 737 * to VOP_BMAP() on these blocks will fail. However, we already 738 * have the on-disk address, so we just pass it to bread() instead 739 * of having bread() attempt to calculate it using VOP_BMAP(). 740 */ 741 vp = ITOV(ip); 742 error = ffs_breadz(ump, vp, lbn, dbn, (int)fs->fs_bsize, NULL, NULL, 0, 743 NOCRED, 0, NULL, &bp); 744 if (error) { 745 *countp = 0; 746 return (error); 747 } 748 749 if (I_IS_UFS1(ip)) 750 bap1 = (ufs1_daddr_t *)bp->b_data; 751 else 752 bap2 = (ufs2_daddr_t *)bp->b_data; 753 if (lastbn != -1) { 754 copy = malloc(fs->fs_bsize, M_TEMP, M_WAITOK); 755 bcopy((caddr_t)bp->b_data, copy, (u_int)fs->fs_bsize); 756 for (i = last + 1; i < NINDIR(fs); i++) 757 if (I_IS_UFS1(ip)) 758 bap1[i] = 0; 759 else 760 bap2[i] = 0; 761 if (DOINGASYNC(vp)) { 762 bdwrite(bp); 763 } else { 764 error = bwrite(bp); 765 if (error) 766 allerror = error; 767 } 768 if (I_IS_UFS1(ip)) 769 bap1 = (ufs1_daddr_t *)copy; 770 else 771 bap2 = (ufs2_daddr_t *)copy; 772 } 773 774 /* 775 * Recursively free totally unused blocks. 776 */ 777 key = ffs_blkrelease_start(ump, ITODEVVP(ip), ip->i_number); 778 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 779 i--, nlbn += factor) { 780 nb = BAP(ip, i); 781 if (nb == 0) 782 continue; 783 if (level > SINGLE) { 784 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 785 (ufs2_daddr_t)-1, level - 1, &blkcount)) != 0) 786 allerror = error; 787 blocksreleased += blkcount; 788 } 789 ffs_blkfree(ump, fs, ITODEVVP(ip), nb, fs->fs_bsize, 790 ip->i_number, vp->v_type, NULL, key); 791 blocksreleased += nblocks; 792 } 793 ffs_blkrelease_finish(ump, key); 794 795 /* 796 * Recursively free last partial block. 797 */ 798 if (level > SINGLE && lastbn >= 0) { 799 last = lastbn % factor; 800 nb = BAP(ip, i); 801 if (nb != 0) { 802 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 803 last, level - 1, &blkcount); 804 if (error) 805 allerror = error; 806 blocksreleased += blkcount; 807 } 808 } 809 if (copy != NULL) { 810 free(copy, M_TEMP); 811 } else { 812 bp->b_flags |= B_INVAL | B_NOCACHE; 813 brelse(bp); 814 } 815 816 *countp = blocksreleased; 817 return (allerror); 818 } 819 820 int 821 ffs_rdonly(struct inode *ip) 822 { 823 824 return (ITOFS(ip)->fs_ronly != 0); 825 } 826