1 /*- 2 * modified for Lites 1.1 3 * 4 * Aug 1995, Godmar Back (gback@cs.utah.edu) 5 * University of Utah, Department of Computer Science 6 */ 7 /*- 8 * Copyright (c) 1982, 1986, 1989, 1993 9 * The Regents of the University of California. All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)ffs_inode.c 8.5 (Berkeley) 12/30/93 36 * $FreeBSD$ 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/mount.h> 42 #include <sys/bio.h> 43 #include <sys/buf.h> 44 #include <sys/vnode.h> 45 #include <sys/malloc.h> 46 #include <sys/rwlock.h> 47 48 #include <vm/vm.h> 49 #include <vm/vm_extern.h> 50 51 #include <fs/ext2fs/inode.h> 52 #include <fs/ext2fs/ext2_mount.h> 53 #include <fs/ext2fs/ext2fs.h> 54 #include <fs/ext2fs/fs.h> 55 #include <fs/ext2fs/ext2_extern.h> 56 #include <fs/ext2fs/ext2_extattr.h> 57 58 /* 59 * Update the access, modified, and inode change times as specified by the 60 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 61 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 62 * the timestamp update). The IN_LAZYMOD flag is set to force a write 63 * later if not now. If we write now, then clear both IN_MODIFIED and 64 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 65 * set, then wait for the write to complete. 66 */ 67 int 68 ext2_update(struct vnode *vp, int waitfor) 69 { 70 struct m_ext2fs *fs; 71 struct buf *bp; 72 struct inode *ip; 73 int error; 74 75 ASSERT_VOP_ELOCKED(vp, "ext2_update"); 76 ext2_itimes(vp); 77 ip = VTOI(vp); 78 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 79 return (0); 80 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 81 fs = ip->i_e2fs; 82 if (fs->e2fs_ronly) 83 return (0); 84 if ((error = bread(ip->i_devvp, 85 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 86 (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { 87 brelse(bp); 88 return (error); 89 } 90 error = ext2_i2ei(ip, (struct ext2fs_dinode *)((char *)bp->b_data + 91 EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number))); 92 if (error) { 93 brelse(bp); 94 return (error); 95 } 96 if (waitfor && !DOINGASYNC(vp)) 97 return (bwrite(bp)); 98 else { 99 bdwrite(bp); 100 return (0); 101 } 102 } 103 104 #define SINGLE 0 /* index of single indirect block */ 105 #define DOUBLE 1 /* index of double indirect block */ 106 #define TRIPLE 2 /* index of triple indirect block */ 107 108 /* 109 * Release blocks associated with the inode ip and stored in the indirect 110 * block bn. Blocks are free'd in LIFO order up to (but not including) 111 * lastbn. If level is greater than SINGLE, the block is an indirect block 112 * and recursive calls to indirtrunc must be used to cleanse other indirect 113 * blocks. 114 * 115 * NB: triple indirect blocks are untested. 116 */ 117 static int 118 ext2_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn, 119 daddr_t lastbn, int level, e4fs_daddr_t *countp) 120 { 121 struct buf *bp; 122 struct m_ext2fs *fs = ip->i_e2fs; 123 struct vnode *vp; 124 e2fs_daddr_t *bap, *copy; 125 int i, nblocks, error = 0, allerror = 0; 126 e2fs_lbn_t nb, nlbn, last; 127 e4fs_daddr_t blkcount, factor, blocksreleased = 0; 128 129 /* 130 * Calculate index in current block of last 131 * block to be kept. -1 indicates the entire 132 * block so we need not calculate the index. 133 */ 134 factor = 1; 135 for (i = SINGLE; i < level; i++) 136 factor *= NINDIR(fs); 137 last = lastbn; 138 if (lastbn > 0) 139 last /= factor; 140 nblocks = btodb(fs->e2fs_bsize); 141 /* 142 * Get buffer of block pointers, zero those entries corresponding 143 * to blocks to be free'd, and update on disk copy first. Since 144 * double(triple) indirect before single(double) indirect, calls 145 * to bmap on these blocks will fail. However, we already have 146 * the on disk address, so we have to set the b_blkno field 147 * explicitly instead of letting bread do everything for us. 148 */ 149 vp = ITOV(ip); 150 bp = getblk(vp, lbn, (int)fs->e2fs_bsize, 0, 0, 0); 151 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { 152 bp->b_iocmd = BIO_READ; 153 if (bp->b_bcount > bp->b_bufsize) 154 panic("ext2_indirtrunc: bad buffer size"); 155 bp->b_blkno = dbn; 156 vfs_busy_pages(bp, 0); 157 bp->b_iooffset = dbtob(bp->b_blkno); 158 bstrategy(bp); 159 error = bufwait(bp); 160 } 161 if (error) { 162 brelse(bp); 163 *countp = 0; 164 return (error); 165 } 166 bap = (e2fs_daddr_t *)bp->b_data; 167 copy = malloc(fs->e2fs_bsize, M_TEMP, M_WAITOK); 168 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->e2fs_bsize); 169 bzero((caddr_t)&bap[last + 1], 170 (NINDIR(fs) - (last + 1)) * sizeof(e2fs_daddr_t)); 171 if (last == -1) 172 bp->b_flags |= B_INVAL; 173 if (DOINGASYNC(vp)) { 174 bdwrite(bp); 175 } else { 176 error = bwrite(bp); 177 if (error) 178 allerror = error; 179 } 180 bap = copy; 181 182 /* 183 * Recursively free totally unused blocks. 184 */ 185 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 186 i--, nlbn += factor) { 187 nb = bap[i]; 188 if (nb == 0) 189 continue; 190 if (level > SINGLE) { 191 if ((error = ext2_indirtrunc(ip, nlbn, 192 fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0) 193 allerror = error; 194 blocksreleased += blkcount; 195 } 196 ext2_blkfree(ip, nb, fs->e2fs_bsize); 197 blocksreleased += nblocks; 198 } 199 200 /* 201 * Recursively free last partial block. 202 */ 203 if (level > SINGLE && lastbn >= 0) { 204 last = lastbn % factor; 205 nb = bap[i]; 206 if (nb != 0) { 207 if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 208 last, level - 1, &blkcount)) != 0) 209 allerror = error; 210 blocksreleased += blkcount; 211 } 212 } 213 free(copy, M_TEMP); 214 *countp = blocksreleased; 215 return (allerror); 216 } 217 218 /* 219 * Truncate the inode oip to at most length size, freeing the 220 * disk blocks. 221 */ 222 static int 223 ext2_ind_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, 224 struct thread *td) 225 { 226 struct vnode *ovp = vp; 227 int32_t lastblock; 228 struct inode *oip; 229 int32_t bn, lbn, lastiblock[EXT2_NIADDR], indir_lbn[EXT2_NIADDR]; 230 uint32_t oldblks[EXT2_NDADDR + EXT2_NIADDR]; 231 uint32_t newblks[EXT2_NDADDR + EXT2_NIADDR]; 232 struct m_ext2fs *fs; 233 struct buf *bp; 234 int offset, size, level; 235 e4fs_daddr_t count, nblocks, blocksreleased = 0; 236 int error, i, allerror; 237 off_t osize; 238 #ifdef INVARIANTS 239 struct bufobj *bo; 240 #endif 241 242 oip = VTOI(ovp); 243 #ifdef INVARIANTS 244 bo = &ovp->v_bufobj; 245 #endif 246 247 fs = oip->i_e2fs; 248 osize = oip->i_size; 249 /* 250 * Lengthen the size of the file. We must ensure that the 251 * last byte of the file is allocated. Since the smallest 252 * value of osize is 0, length will be at least 1. 253 */ 254 if (osize < length) { 255 if (length > oip->i_e2fs->e2fs_maxfilesize) 256 return (EFBIG); 257 vnode_pager_setsize(ovp, length); 258 offset = blkoff(fs, length - 1); 259 lbn = lblkno(fs, length - 1); 260 flags |= BA_CLRBUF; 261 error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); 262 if (error) { 263 vnode_pager_setsize(vp, osize); 264 return (error); 265 } 266 oip->i_size = length; 267 if (bp->b_bufsize == fs->e2fs_bsize) 268 bp->b_flags |= B_CLUSTEROK; 269 if (flags & IO_SYNC) 270 bwrite(bp); 271 else if (DOINGASYNC(ovp)) 272 bdwrite(bp); 273 else 274 bawrite(bp); 275 oip->i_flag |= IN_CHANGE | IN_UPDATE; 276 return (ext2_update(ovp, !DOINGASYNC(ovp))); 277 } 278 /* 279 * Shorten the size of the file. If the file is not being 280 * truncated to a block boundary, the contents of the 281 * partial block following the end of the file must be 282 * zero'ed in case it ever become accessible again because 283 * of subsequent file growth. 284 */ 285 /* I don't understand the comment above */ 286 offset = blkoff(fs, length); 287 if (offset == 0) { 288 oip->i_size = length; 289 } else { 290 lbn = lblkno(fs, length); 291 flags |= BA_CLRBUF; 292 error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); 293 if (error) 294 return (error); 295 oip->i_size = length; 296 size = blksize(fs, oip, lbn); 297 bzero((char *)bp->b_data + offset, (u_int)(size - offset)); 298 allocbuf(bp, size); 299 if (bp->b_bufsize == fs->e2fs_bsize) 300 bp->b_flags |= B_CLUSTEROK; 301 if (flags & IO_SYNC) 302 bwrite(bp); 303 else if (DOINGASYNC(ovp)) 304 bdwrite(bp); 305 else 306 bawrite(bp); 307 } 308 /* 309 * Calculate index into inode's block list of 310 * last direct and indirect blocks (if any) 311 * which we want to keep. Lastblock is -1 when 312 * the file is truncated to 0. 313 */ 314 lastblock = lblkno(fs, length + fs->e2fs_bsize - 1) - 1; 315 lastiblock[SINGLE] = lastblock - EXT2_NDADDR; 316 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 317 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 318 nblocks = btodb(fs->e2fs_bsize); 319 /* 320 * Update file and block pointers on disk before we start freeing 321 * blocks. If we crash before free'ing blocks below, the blocks 322 * will be returned to the free list. lastiblock values are also 323 * normalized to -1 for calls to ext2_indirtrunc below. 324 */ 325 for (level = TRIPLE; level >= SINGLE; level--) { 326 oldblks[EXT2_NDADDR + level] = oip->i_ib[level]; 327 if (lastiblock[level] < 0) { 328 oip->i_ib[level] = 0; 329 lastiblock[level] = -1; 330 } 331 } 332 for (i = 0; i < EXT2_NDADDR; i++) { 333 oldblks[i] = oip->i_db[i]; 334 if (i > lastblock) 335 oip->i_db[i] = 0; 336 } 337 oip->i_flag |= IN_CHANGE | IN_UPDATE; 338 allerror = ext2_update(ovp, !DOINGASYNC(ovp)); 339 340 /* 341 * Having written the new inode to disk, save its new configuration 342 * and put back the old block pointers long enough to process them. 343 * Note that we save the new block configuration so we can check it 344 * when we are done. 345 */ 346 for (i = 0; i < EXT2_NDADDR; i++) { 347 newblks[i] = oip->i_db[i]; 348 oip->i_db[i] = oldblks[i]; 349 } 350 for (i = 0; i < EXT2_NIADDR; i++) { 351 newblks[EXT2_NDADDR + i] = oip->i_ib[i]; 352 oip->i_ib[i] = oldblks[EXT2_NDADDR + i]; 353 } 354 oip->i_size = osize; 355 error = vtruncbuf(ovp, cred, length, (int)fs->e2fs_bsize); 356 if (error && (allerror == 0)) 357 allerror = error; 358 vnode_pager_setsize(ovp, length); 359 360 /* 361 * Indirect blocks first. 362 */ 363 indir_lbn[SINGLE] = -EXT2_NDADDR; 364 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 365 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 366 for (level = TRIPLE; level >= SINGLE; level--) { 367 bn = oip->i_ib[level]; 368 if (bn != 0) { 369 error = ext2_indirtrunc(oip, indir_lbn[level], 370 fsbtodb(fs, bn), lastiblock[level], level, &count); 371 if (error) 372 allerror = error; 373 blocksreleased += count; 374 if (lastiblock[level] < 0) { 375 oip->i_ib[level] = 0; 376 ext2_blkfree(oip, bn, fs->e2fs_fsize); 377 blocksreleased += nblocks; 378 } 379 } 380 if (lastiblock[level] >= 0) 381 goto done; 382 } 383 384 /* 385 * All whole direct blocks or frags. 386 */ 387 for (i = EXT2_NDADDR - 1; i > lastblock; i--) { 388 long bsize; 389 390 bn = oip->i_db[i]; 391 if (bn == 0) 392 continue; 393 oip->i_db[i] = 0; 394 bsize = blksize(fs, oip, i); 395 ext2_blkfree(oip, bn, bsize); 396 blocksreleased += btodb(bsize); 397 } 398 if (lastblock < 0) 399 goto done; 400 401 /* 402 * Finally, look for a change in size of the 403 * last direct block; release any frags. 404 */ 405 bn = oip->i_db[lastblock]; 406 if (bn != 0) { 407 long oldspace, newspace; 408 409 /* 410 * Calculate amount of space we're giving 411 * back as old block size minus new block size. 412 */ 413 oldspace = blksize(fs, oip, lastblock); 414 oip->i_size = length; 415 newspace = blksize(fs, oip, lastblock); 416 if (newspace == 0) 417 panic("ext2_truncate: newspace"); 418 if (oldspace - newspace > 0) { 419 /* 420 * Block number of space to be free'd is 421 * the old block # plus the number of frags 422 * required for the storage we're keeping. 423 */ 424 bn += numfrags(fs, newspace); 425 ext2_blkfree(oip, bn, oldspace - newspace); 426 blocksreleased += btodb(oldspace - newspace); 427 } 428 } 429 done: 430 #ifdef INVARIANTS 431 for (level = SINGLE; level <= TRIPLE; level++) 432 if (newblks[EXT2_NDADDR + level] != oip->i_ib[level]) 433 panic("itrunc1"); 434 for (i = 0; i < EXT2_NDADDR; i++) 435 if (newblks[i] != oip->i_db[i]) 436 panic("itrunc2"); 437 BO_LOCK(bo); 438 if (length == 0 && (bo->bo_dirty.bv_cnt != 0 || 439 bo->bo_clean.bv_cnt != 0)) 440 panic("itrunc3"); 441 BO_UNLOCK(bo); 442 #endif /* INVARIANTS */ 443 /* 444 * Put back the real size. 445 */ 446 oip->i_size = length; 447 if (oip->i_blocks >= blocksreleased) 448 oip->i_blocks -= blocksreleased; 449 else /* sanity */ 450 oip->i_blocks = 0; 451 oip->i_flag |= IN_CHANGE; 452 vnode_pager_setsize(ovp, length); 453 return (allerror); 454 } 455 456 static int 457 ext2_ext_truncate(struct vnode *vp, off_t length, int flags, 458 struct ucred *cred, struct thread *td) 459 { 460 struct vnode *ovp = vp; 461 int32_t lastblock; 462 struct m_ext2fs *fs; 463 struct inode *oip; 464 struct buf *bp; 465 uint32_t lbn, offset; 466 int error, size; 467 off_t osize; 468 469 oip = VTOI(ovp); 470 fs = oip->i_e2fs; 471 osize = oip->i_size; 472 473 if (osize < length) { 474 if (length > oip->i_e2fs->e2fs_maxfilesize) { 475 return (EFBIG); 476 } 477 vnode_pager_setsize(ovp, length); 478 offset = blkoff(fs, length - 1); 479 lbn = lblkno(fs, length - 1); 480 flags |= BA_CLRBUF; 481 error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); 482 if (error) { 483 vnode_pager_setsize(vp, osize); 484 return (error); 485 } 486 oip->i_size = length; 487 if (bp->b_bufsize == fs->e2fs_bsize) 488 bp->b_flags |= B_CLUSTEROK; 489 if (flags & IO_SYNC) 490 bwrite(bp); 491 else if (DOINGASYNC(ovp)) 492 bdwrite(bp); 493 else 494 bawrite(bp); 495 oip->i_flag |= IN_CHANGE | IN_UPDATE; 496 return (ext2_update(ovp, !DOINGASYNC(ovp))); 497 } 498 499 lastblock = (length + fs->e2fs_bsize - 1) / fs->e2fs_bsize; 500 error = ext4_ext_remove_space(oip, lastblock, flags, cred, td); 501 if (error) 502 return (error); 503 504 offset = blkoff(fs, length); 505 if (offset == 0) { 506 oip->i_size = length; 507 } else { 508 lbn = lblkno(fs, length); 509 flags |= BA_CLRBUF; 510 error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); 511 if (error) { 512 return (error); 513 } 514 oip->i_size = length; 515 size = blksize(fs, oip, lbn); 516 bzero((char *)bp->b_data + offset, (u_int)(size - offset)); 517 allocbuf(bp, size); 518 if (bp->b_bufsize == fs->e2fs_bsize) 519 bp->b_flags |= B_CLUSTEROK; 520 if (flags & IO_SYNC) 521 bwrite(bp); 522 else if (DOINGASYNC(ovp)) 523 bdwrite(bp); 524 else 525 bawrite(bp); 526 } 527 528 oip->i_size = osize; 529 error = vtruncbuf(ovp, cred, length, (int)fs->e2fs_bsize); 530 if (error) 531 return (error); 532 533 vnode_pager_setsize(ovp, length); 534 535 oip->i_size = length; 536 oip->i_flag |= IN_CHANGE | IN_UPDATE; 537 error = ext2_update(ovp, !DOINGASYNC(ovp)); 538 539 return (error); 540 } 541 542 /* 543 * Truncate the inode ip to at most length size, freeing the 544 * disk blocks. 545 */ 546 int 547 ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, 548 struct thread *td) 549 { 550 struct inode *ip; 551 int error; 552 553 ASSERT_VOP_LOCKED(vp, "ext2_truncate"); 554 555 if (length < 0) 556 return (EINVAL); 557 558 ip = VTOI(vp); 559 if (vp->v_type == VLNK && 560 ip->i_size < vp->v_mount->mnt_maxsymlinklen) { 561 #ifdef INVARIANTS 562 if (length != 0) 563 panic("ext2_truncate: partial truncate of symlink"); 564 #endif 565 bzero((char *)&ip->i_shortlink, (u_int)ip->i_size); 566 ip->i_size = 0; 567 ip->i_flag |= IN_CHANGE | IN_UPDATE; 568 return (ext2_update(vp, 1)); 569 } 570 if (ip->i_size == length) { 571 ip->i_flag |= IN_CHANGE | IN_UPDATE; 572 return (ext2_update(vp, 0)); 573 } 574 575 if (ip->i_flag & IN_E4EXTENTS) 576 error = ext2_ext_truncate(vp, length, flags, cred, td); 577 else 578 error = ext2_ind_truncate(vp, length, flags, cred, td); 579 580 return (error); 581 } 582 583 /* 584 * discard preallocated blocks 585 */ 586 int 587 ext2_inactive(struct vop_inactive_args *ap) 588 { 589 struct vnode *vp = ap->a_vp; 590 struct inode *ip = VTOI(vp); 591 struct thread *td = ap->a_td; 592 int mode, error = 0; 593 594 /* 595 * Ignore inodes related to stale file handles. 596 */ 597 if (ip->i_mode == 0) 598 goto out; 599 if (ip->i_nlink <= 0) { 600 ext2_extattr_free(ip); 601 error = ext2_truncate(vp, (off_t)0, 0, NOCRED, td); 602 if (!(ip->i_flag & IN_E4EXTENTS)) 603 ip->i_rdev = 0; 604 mode = ip->i_mode; 605 ip->i_mode = 0; 606 ip->i_flag |= IN_CHANGE | IN_UPDATE; 607 ext2_vfree(vp, ip->i_number, mode); 608 } 609 if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) 610 ext2_update(vp, 0); 611 out: 612 /* 613 * If we are done with the inode, reclaim it 614 * so that it can be reused immediately. 615 */ 616 if (ip->i_mode == 0) 617 vrecycle(vp); 618 return (error); 619 } 620 621 /* 622 * Reclaim an inode so that it can be used for other purposes. 623 */ 624 int 625 ext2_reclaim(struct vop_reclaim_args *ap) 626 { 627 struct inode *ip; 628 struct vnode *vp = ap->a_vp; 629 630 ip = VTOI(vp); 631 if (ip->i_flag & IN_LAZYMOD) { 632 ip->i_flag |= IN_MODIFIED; 633 ext2_update(vp, 0); 634 } 635 vfs_hash_remove(vp); 636 free(vp->v_data, M_EXT2NODE); 637 vp->v_data = 0; 638 vnode_destroy_vobject(vp); 639 return (0); 640 } 641