1 /*- 2 * modified for Lites 1.1 3 * 4 * Aug 1995, Godmar Back (gback@cs.utah.edu) 5 * University of Utah, Department of Computer Science 6 */ 7 /*- 8 * SPDX-License-Identifier: BSD-3-Clause 9 * 10 * Copyright (c) 1982, 1986, 1989, 1993 11 * The Regents of the University of California. All rights reserved. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)ffs_inode.c 8.5 (Berkeley) 12/30/93 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/mount.h> 43 #include <sys/bio.h> 44 #include <sys/buf.h> 45 #include <sys/endian.h> 46 #include <sys/vnode.h> 47 #include <sys/malloc.h> 48 #include <sys/rwlock.h> 49 #include <sys/sdt.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 54 #include <fs/ext2fs/fs.h> 55 #include <fs/ext2fs/inode.h> 56 #include <fs/ext2fs/ext2_mount.h> 57 #include <fs/ext2fs/ext2fs.h> 58 #include <fs/ext2fs/fs.h> 59 #include <fs/ext2fs/ext2_extern.h> 60 #include <fs/ext2fs/ext2_extattr.h> 61 62 /* 63 * Update the access, modified, and inode change times as specified by the 64 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 65 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 66 * the timestamp update). The IN_LAZYMOD flag is set to force a write 67 * later if not now. If we write now, then clear both IN_MODIFIED and 68 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 69 * set, then wait for the write to complete. 70 */ 71 int 72 ext2_update(struct vnode *vp, int waitfor) 73 { 74 struct m_ext2fs *fs; 75 struct buf *bp; 76 struct inode *ip; 77 int error; 78 79 ASSERT_VOP_ELOCKED(vp, "ext2_update"); 80 ext2_itimes(vp); 81 ip = VTOI(vp); 82 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 83 return (0); 84 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 85 fs = ip->i_e2fs; 86 if (fs->e2fs_ronly) 87 return (0); 88 if ((error = bread(ip->i_devvp, 89 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 90 (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { 91 brelse(bp); 92 return (error); 93 } 94 error = ext2_i2ei(ip, (struct ext2fs_dinode *)((char *)bp->b_data + 95 EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number))); 96 if (error) { 97 brelse(bp); 98 return (error); 99 } 100 if (waitfor && !DOINGASYNC(vp)) 101 return (bwrite(bp)); 102 else { 103 bdwrite(bp); 104 return (0); 105 } 106 } 107 108 #define SINGLE 0 /* index of single indirect block */ 109 #define DOUBLE 1 /* index of double indirect block */ 110 #define TRIPLE 2 /* index of triple indirect block */ 111 112 /* 113 * Release blocks associated with the inode ip and stored in the indirect 114 * block bn. Blocks are free'd in LIFO order up to (but not including) 115 * lastbn. If level is greater than SINGLE, the block is an indirect block 116 * and recursive calls to indirtrunc must be used to cleanse other indirect 117 * blocks. 118 * 119 * NB: triple indirect blocks are untested. 120 */ 121 static int 122 ext2_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn, 123 daddr_t lastbn, int level, e4fs_daddr_t *countp) 124 { 125 struct buf *bp; 126 struct m_ext2fs *fs = ip->i_e2fs; 127 struct vnode *vp; 128 e2fs_daddr_t *bap, *copy; 129 int i, nblocks, error = 0, allerror = 0; 130 e2fs_lbn_t nb, nlbn, last; 131 e4fs_daddr_t blkcount, factor, blocksreleased = 0; 132 133 /* 134 * Calculate index in current block of last 135 * block to be kept. -1 indicates the entire 136 * block so we need not calculate the index. 137 */ 138 factor = 1; 139 for (i = SINGLE; i < level; i++) 140 factor *= NINDIR(fs); 141 last = lastbn; 142 if (lastbn > 0) 143 last /= factor; 144 nblocks = btodb(fs->e2fs_bsize); 145 /* 146 * Get buffer of block pointers, zero those entries corresponding 147 * to blocks to be free'd, and update on disk copy first. Since 148 * double(triple) indirect before single(double) indirect, calls 149 * to bmap on these blocks will fail. However, we already have 150 * the on disk address, so we have to set the b_blkno field 151 * explicitly instead of letting bread do everything for us. 152 */ 153 vp = ITOV(ip); 154 bp = getblk(vp, lbn, (int)fs->e2fs_bsize, 0, 0, 0); 155 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { 156 bp->b_iocmd = BIO_READ; 157 if (bp->b_bcount > bp->b_bufsize) 158 panic("ext2_indirtrunc: bad buffer size"); 159 bp->b_blkno = dbn; 160 vfs_busy_pages(bp, 0); 161 bp->b_iooffset = dbtob(bp->b_blkno); 162 bstrategy(bp); 163 error = bufwait(bp); 164 } 165 if (error) { 166 brelse(bp); 167 *countp = 0; 168 return (error); 169 } 170 bap = (e2fs_daddr_t *)bp->b_data; 171 copy = malloc(fs->e2fs_bsize, M_TEMP, M_WAITOK); 172 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->e2fs_bsize); 173 bzero((caddr_t)&bap[last + 1], 174 (NINDIR(fs) - (last + 1)) * sizeof(e2fs_daddr_t)); 175 if (last == -1) 176 bp->b_flags |= B_INVAL; 177 if (DOINGASYNC(vp)) { 178 bdwrite(bp); 179 } else { 180 error = bwrite(bp); 181 if (error) 182 allerror = error; 183 } 184 bap = copy; 185 186 /* 187 * Recursively free totally unused blocks. 188 */ 189 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 190 i--, nlbn += factor) { 191 nb = le32toh(bap[i]); 192 if (nb == 0) 193 continue; 194 if (level > SINGLE) { 195 if ((error = ext2_indirtrunc(ip, nlbn, 196 fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0) 197 allerror = error; 198 blocksreleased += blkcount; 199 } 200 ext2_blkfree(ip, nb, fs->e2fs_bsize); 201 blocksreleased += nblocks; 202 } 203 204 /* 205 * Recursively free last partial block. 206 */ 207 if (level > SINGLE && lastbn >= 0) { 208 last = lastbn % factor; 209 nb = le32toh(bap[i]); 210 if (nb != 0) { 211 if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 212 last, level - 1, &blkcount)) != 0) 213 allerror = error; 214 blocksreleased += blkcount; 215 } 216 } 217 free(copy, M_TEMP); 218 *countp = blocksreleased; 219 return (allerror); 220 } 221 222 /* 223 * Truncate the inode oip to at most length size, freeing the 224 * disk blocks. 225 */ 226 static int 227 ext2_ind_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, 228 struct thread *td) 229 { 230 struct vnode *ovp = vp; 231 e4fs_daddr_t lastblock; 232 struct inode *oip; 233 e4fs_daddr_t bn, lbn, lastiblock[EXT2_NIADDR], indir_lbn[EXT2_NIADDR]; 234 uint32_t oldblks[EXT2_NDADDR + EXT2_NIADDR]; 235 #ifdef INVARIANTS 236 uint32_t newblks[EXT2_NDADDR + EXT2_NIADDR]; 237 #endif 238 struct m_ext2fs *fs; 239 struct buf *bp; 240 int offset, size, level; 241 e4fs_daddr_t count, nblocks, blocksreleased = 0; 242 int error, i, allerror; 243 off_t osize; 244 #ifdef INVARIANTS 245 struct bufobj *bo; 246 #endif 247 248 oip = VTOI(ovp); 249 #ifdef INVARIANTS 250 bo = &ovp->v_bufobj; 251 #endif 252 253 fs = oip->i_e2fs; 254 osize = oip->i_size; 255 /* 256 * Lengthen the size of the file. We must ensure that the 257 * last byte of the file is allocated. Since the smallest 258 * value of osize is 0, length will be at least 1. 259 */ 260 if (osize < length) { 261 if (length > oip->i_e2fs->e2fs_maxfilesize) 262 return (EFBIG); 263 vnode_pager_setsize(ovp, length); 264 offset = blkoff(fs, length - 1); 265 lbn = lblkno(fs, length - 1); 266 flags |= BA_CLRBUF; 267 error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); 268 if (error) { 269 vnode_pager_setsize(vp, osize); 270 return (error); 271 } 272 oip->i_size = length; 273 if (bp->b_bufsize == fs->e2fs_bsize) 274 bp->b_flags |= B_CLUSTEROK; 275 if (flags & IO_SYNC) 276 bwrite(bp); 277 else if (DOINGASYNC(ovp)) 278 bdwrite(bp); 279 else 280 bawrite(bp); 281 oip->i_flag |= IN_CHANGE | IN_UPDATE; 282 return (ext2_update(ovp, !DOINGASYNC(ovp))); 283 } 284 /* 285 * Shorten the size of the file. If the file is not being 286 * truncated to a block boundary, the contents of the 287 * partial block following the end of the file must be 288 * zero'ed in case it ever become accessible again because 289 * of subsequent file growth. 290 */ 291 /* I don't understand the comment above */ 292 offset = blkoff(fs, length); 293 if (offset == 0) { 294 oip->i_size = length; 295 } else { 296 lbn = lblkno(fs, length); 297 flags |= BA_CLRBUF; 298 error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); 299 if (error) 300 return (error); 301 oip->i_size = length; 302 size = blksize(fs, oip, lbn); 303 bzero((char *)bp->b_data + offset, (u_int)(size - offset)); 304 allocbuf(bp, size); 305 if (bp->b_bufsize == fs->e2fs_bsize) 306 bp->b_flags |= B_CLUSTEROK; 307 if (flags & IO_SYNC) 308 bwrite(bp); 309 else if (DOINGASYNC(ovp)) 310 bdwrite(bp); 311 else 312 bawrite(bp); 313 } 314 /* 315 * Calculate index into inode's block list of 316 * last direct and indirect blocks (if any) 317 * which we want to keep. Lastblock is -1 when 318 * the file is truncated to 0. 319 */ 320 lastblock = lblkno(fs, length + fs->e2fs_bsize - 1) - 1; 321 lastiblock[SINGLE] = lastblock - EXT2_NDADDR; 322 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 323 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 324 nblocks = btodb(fs->e2fs_bsize); 325 /* 326 * Update file and block pointers on disk before we start freeing 327 * blocks. If we crash before free'ing blocks below, the blocks 328 * will be returned to the free list. lastiblock values are also 329 * normalized to -1 for calls to ext2_indirtrunc below. 330 */ 331 for (level = TRIPLE; level >= SINGLE; level--) { 332 oldblks[EXT2_NDADDR + level] = oip->i_ib[level]; 333 if (lastiblock[level] < 0) { 334 oip->i_ib[level] = 0; 335 lastiblock[level] = -1; 336 } 337 } 338 for (i = 0; i < EXT2_NDADDR; i++) { 339 oldblks[i] = oip->i_db[i]; 340 if (i > lastblock) 341 oip->i_db[i] = 0; 342 } 343 oip->i_flag |= IN_CHANGE | IN_UPDATE; 344 allerror = ext2_update(ovp, !DOINGASYNC(ovp)); 345 346 /* 347 * Having written the new inode to disk, save its new configuration 348 * and put back the old block pointers long enough to process them. 349 * Note that we save the new block configuration so we can check it 350 * when we are done. 351 */ 352 for (i = 0; i < EXT2_NDADDR; i++) { 353 #ifdef INVARIANTS 354 newblks[i] = oip->i_db[i]; 355 #endif 356 oip->i_db[i] = oldblks[i]; 357 } 358 for (i = 0; i < EXT2_NIADDR; i++) { 359 #ifdef INVARIANTS 360 newblks[EXT2_NDADDR + i] = oip->i_ib[i]; 361 #endif 362 oip->i_ib[i] = oldblks[EXT2_NDADDR + i]; 363 } 364 oip->i_size = osize; 365 error = vtruncbuf(ovp, length, (int)fs->e2fs_bsize); 366 if (error && (allerror == 0)) 367 allerror = error; 368 vnode_pager_setsize(ovp, length); 369 370 /* 371 * Indirect blocks first. 372 */ 373 indir_lbn[SINGLE] = -EXT2_NDADDR; 374 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 375 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 376 for (level = TRIPLE; level >= SINGLE; level--) { 377 bn = oip->i_ib[level]; 378 if (bn != 0) { 379 error = ext2_indirtrunc(oip, indir_lbn[level], 380 fsbtodb(fs, bn), lastiblock[level], level, &count); 381 if (error) 382 allerror = error; 383 blocksreleased += count; 384 if (lastiblock[level] < 0) { 385 oip->i_ib[level] = 0; 386 ext2_blkfree(oip, bn, fs->e2fs_fsize); 387 blocksreleased += nblocks; 388 } 389 } 390 if (lastiblock[level] >= 0) 391 goto done; 392 } 393 394 /* 395 * All whole direct blocks or frags. 396 */ 397 for (i = EXT2_NDADDR - 1; i > lastblock; i--) { 398 long bsize; 399 400 bn = oip->i_db[i]; 401 if (bn == 0) 402 continue; 403 oip->i_db[i] = 0; 404 bsize = blksize(fs, oip, i); 405 ext2_blkfree(oip, bn, bsize); 406 blocksreleased += btodb(bsize); 407 } 408 if (lastblock < 0) 409 goto done; 410 411 /* 412 * Finally, look for a change in size of the 413 * last direct block; release any frags. 414 */ 415 bn = oip->i_db[lastblock]; 416 if (bn != 0) { 417 long oldspace, newspace; 418 419 /* 420 * Calculate amount of space we're giving 421 * back as old block size minus new block size. 422 */ 423 oldspace = blksize(fs, oip, lastblock); 424 oip->i_size = length; 425 newspace = blksize(fs, oip, lastblock); 426 if (newspace == 0) 427 panic("ext2_truncate: newspace"); 428 if (oldspace - newspace > 0) { 429 /* 430 * Block number of space to be free'd is 431 * the old block # plus the number of frags 432 * required for the storage we're keeping. 433 */ 434 bn += numfrags(fs, newspace); 435 ext2_blkfree(oip, bn, oldspace - newspace); 436 blocksreleased += btodb(oldspace - newspace); 437 } 438 } 439 done: 440 #ifdef INVARIANTS 441 for (level = SINGLE; level <= TRIPLE; level++) 442 if (newblks[EXT2_NDADDR + level] != oip->i_ib[level]) 443 panic("itrunc1"); 444 for (i = 0; i < EXT2_NDADDR; i++) 445 if (newblks[i] != oip->i_db[i]) 446 panic("itrunc2"); 447 BO_LOCK(bo); 448 if (length == 0 && (bo->bo_dirty.bv_cnt != 0 || 449 bo->bo_clean.bv_cnt != 0)) 450 panic("itrunc3"); 451 BO_UNLOCK(bo); 452 #endif /* INVARIANTS */ 453 /* 454 * Put back the real size. 455 */ 456 oip->i_size = length; 457 if (oip->i_blocks >= blocksreleased) 458 oip->i_blocks -= blocksreleased; 459 else /* sanity */ 460 oip->i_blocks = 0; 461 oip->i_flag |= IN_CHANGE; 462 vnode_pager_setsize(ovp, length); 463 return (allerror); 464 } 465 466 static int 467 ext2_ext_truncate(struct vnode *vp, off_t length, int flags, 468 struct ucred *cred, struct thread *td) 469 { 470 struct vnode *ovp = vp; 471 int32_t lastblock; 472 struct m_ext2fs *fs; 473 struct inode *oip; 474 struct buf *bp; 475 uint32_t lbn, offset; 476 int error, size; 477 off_t osize; 478 479 oip = VTOI(ovp); 480 fs = oip->i_e2fs; 481 osize = oip->i_size; 482 483 if (osize < length) { 484 if (length > oip->i_e2fs->e2fs_maxfilesize) { 485 return (EFBIG); 486 } 487 vnode_pager_setsize(ovp, length); 488 offset = blkoff(fs, length - 1); 489 lbn = lblkno(fs, length - 1); 490 flags |= BA_CLRBUF; 491 error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); 492 if (error) { 493 vnode_pager_setsize(vp, osize); 494 return (error); 495 } 496 oip->i_size = length; 497 if (bp->b_bufsize == fs->e2fs_bsize) 498 bp->b_flags |= B_CLUSTEROK; 499 if (flags & IO_SYNC) 500 bwrite(bp); 501 else if (DOINGASYNC(ovp)) 502 bdwrite(bp); 503 else 504 bawrite(bp); 505 oip->i_flag |= IN_CHANGE | IN_UPDATE; 506 return (ext2_update(ovp, !DOINGASYNC(ovp))); 507 } 508 509 lastblock = (length + fs->e2fs_bsize - 1) / fs->e2fs_bsize; 510 error = ext4_ext_remove_space(oip, lastblock, flags, cred, td); 511 if (error) 512 return (error); 513 514 offset = blkoff(fs, length); 515 if (offset == 0) { 516 oip->i_size = length; 517 } else { 518 lbn = lblkno(fs, length); 519 flags |= BA_CLRBUF; 520 error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); 521 if (error) { 522 return (error); 523 } 524 oip->i_size = length; 525 size = blksize(fs, oip, lbn); 526 bzero((char *)bp->b_data + offset, (u_int)(size - offset)); 527 allocbuf(bp, size); 528 if (bp->b_bufsize == fs->e2fs_bsize) 529 bp->b_flags |= B_CLUSTEROK; 530 if (flags & IO_SYNC) 531 bwrite(bp); 532 else if (DOINGASYNC(ovp)) 533 bdwrite(bp); 534 else 535 bawrite(bp); 536 } 537 538 oip->i_size = osize; 539 error = vtruncbuf(ovp, length, (int)fs->e2fs_bsize); 540 if (error) 541 return (error); 542 543 vnode_pager_setsize(ovp, length); 544 545 oip->i_size = length; 546 oip->i_flag |= IN_CHANGE | IN_UPDATE; 547 error = ext2_update(ovp, !DOINGASYNC(ovp)); 548 549 return (error); 550 } 551 552 /* 553 * Truncate the inode ip to at most length size, freeing the 554 * disk blocks. 555 */ 556 int 557 ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, 558 struct thread *td) 559 { 560 struct inode *ip; 561 int error; 562 563 ASSERT_VOP_LOCKED(vp, "ext2_truncate"); 564 565 if (length < 0) 566 return (EINVAL); 567 568 ip = VTOI(vp); 569 if (vp->v_type == VLNK && 570 ip->i_size < VFSTOEXT2(vp->v_mount)->um_e2fs->e2fs_maxsymlinklen) { 571 #ifdef INVARIANTS 572 if (length != 0) 573 panic("ext2_truncate: partial truncate of symlink"); 574 #endif 575 bzero((char *)&ip->i_shortlink, (u_int)ip->i_size); 576 ip->i_size = 0; 577 ip->i_flag |= IN_CHANGE | IN_UPDATE; 578 return (ext2_update(vp, 1)); 579 } 580 if (ip->i_size == length) { 581 ip->i_flag |= IN_CHANGE | IN_UPDATE; 582 return (ext2_update(vp, 0)); 583 } 584 585 if (ip->i_flag & IN_E4EXTENTS) 586 error = ext2_ext_truncate(vp, length, flags, cred, td); 587 else 588 error = ext2_ind_truncate(vp, length, flags, cred, td); 589 cluster_init_vn(&ip->i_clusterw); 590 591 return (error); 592 } 593 594 /* 595 * discard preallocated blocks 596 */ 597 int 598 ext2_inactive(struct vop_inactive_args *ap) 599 { 600 struct vnode *vp = ap->a_vp; 601 struct inode *ip = VTOI(vp); 602 struct thread *td = curthread; 603 int mode, error = 0; 604 605 /* 606 * Ignore inodes related to stale file handles. 607 */ 608 if (ip->i_mode == 0) 609 goto out; 610 if (ip->i_nlink <= 0) { 611 ext2_extattr_free(ip); 612 error = ext2_truncate(vp, (off_t)0, 0, NOCRED, td); 613 ip->i_rdev = 0; 614 mode = ip->i_mode; 615 ip->i_mode = 0; 616 ip->i_flag |= IN_CHANGE | IN_UPDATE; 617 ext2_vfree(vp, ip->i_number, mode); 618 } 619 if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) 620 ext2_update(vp, 0); 621 out: 622 /* 623 * If we are done with the inode, reclaim it 624 * so that it can be reused immediately. 625 */ 626 if (ip->i_mode == 0) 627 vrecycle(vp); 628 return (error); 629 } 630 631 /* 632 * Reclaim an inode so that it can be used for other purposes. 633 */ 634 int 635 ext2_reclaim(struct vop_reclaim_args *ap) 636 { 637 struct inode *ip; 638 struct vnode *vp = ap->a_vp; 639 640 ip = VTOI(vp); 641 if (ip->i_flag & IN_LAZYMOD) { 642 ip->i_flag |= IN_MODIFIED; 643 ext2_update(vp, 0); 644 } 645 vfs_hash_remove(vp); 646 free(vp->v_data, M_EXT2NODE); 647 vp->v_data = 0; 648 return (0); 649 } 650