1 /*- 2 * modified for Lites 1.1 3 * 4 * Aug 1995, Godmar Back (gback@cs.utah.edu) 5 * University of Utah, Department of Computer Science 6 */ 7 /*- 8 * SPDX-License-Identifier: BSD-3-Clause 9 * 10 * Copyright (c) 1982, 1986, 1989, 1993 11 * The Regents of the University of California. All rights reserved. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/mount.h> 41 #include <sys/bio.h> 42 #include <sys/buf.h> 43 #include <sys/endian.h> 44 #include <sys/vnode.h> 45 #include <sys/malloc.h> 46 #include <sys/rwlock.h> 47 #include <sys/sdt.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 52 #include <fs/ext2fs/fs.h> 53 #include <fs/ext2fs/inode.h> 54 #include <fs/ext2fs/ext2_mount.h> 55 #include <fs/ext2fs/ext2fs.h> 56 #include <fs/ext2fs/fs.h> 57 #include <fs/ext2fs/ext2_extern.h> 58 #include <fs/ext2fs/ext2_extattr.h> 59 60 /* 61 * Update the access, modified, and inode change times as specified by the 62 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 63 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 64 * the timestamp update). The IN_LAZYMOD flag is set to force a write 65 * later if not now. If we write now, then clear both IN_MODIFIED and 66 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 67 * set, then wait for the write to complete. 68 */ 69 int 70 ext2_update(struct vnode *vp, int waitfor) 71 { 72 struct m_ext2fs *fs; 73 struct buf *bp; 74 struct inode *ip; 75 int error; 76 77 ASSERT_VOP_ELOCKED(vp, "ext2_update"); 78 ext2_itimes(vp); 79 ip = VTOI(vp); 80 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 81 return (0); 82 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 83 fs = ip->i_e2fs; 84 if (fs->e2fs_ronly) 85 return (0); 86 if ((error = bread(ip->i_devvp, 87 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 88 (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { 89 brelse(bp); 90 return (error); 91 } 92 error = ext2_i2ei(ip, (struct ext2fs_dinode *)((char *)bp->b_data + 93 EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number))); 94 if (error) { 95 brelse(bp); 96 return (error); 97 } 98 if (waitfor && !DOINGASYNC(vp)) 99 return (bwrite(bp)); 100 else { 101 bdwrite(bp); 102 return (0); 103 } 104 } 105 106 #define SINGLE 0 /* index of single indirect block */ 107 #define DOUBLE 1 /* index of double indirect block */ 108 #define TRIPLE 2 /* index of triple indirect block */ 109 110 /* 111 * Release blocks associated with the inode ip and stored in the indirect 112 * block bn. Blocks are free'd in LIFO order up to (but not including) 113 * lastbn. If level is greater than SINGLE, the block is an indirect block 114 * and recursive calls to indirtrunc must be used to cleanse other indirect 115 * blocks. 116 * 117 * NB: triple indirect blocks are untested. 118 */ 119 static int 120 ext2_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn, 121 daddr_t lastbn, int level, e4fs_daddr_t *countp) 122 { 123 struct buf *bp; 124 struct m_ext2fs *fs = ip->i_e2fs; 125 struct vnode *vp; 126 e2fs_daddr_t *bap, *copy; 127 int i, nblocks, error = 0, allerror = 0; 128 e2fs_lbn_t nb, nlbn, last; 129 e4fs_daddr_t blkcount, factor, blocksreleased = 0; 130 131 /* 132 * Calculate index in current block of last 133 * block to be kept. -1 indicates the entire 134 * block so we need not calculate the index. 135 */ 136 factor = 1; 137 for (i = SINGLE; i < level; i++) 138 factor *= NINDIR(fs); 139 last = lastbn; 140 if (lastbn > 0) 141 last /= factor; 142 nblocks = btodb(fs->e2fs_bsize); 143 /* 144 * Get buffer of block pointers, zero those entries corresponding 145 * to blocks to be free'd, and update on disk copy first. Since 146 * double(triple) indirect before single(double) indirect, calls 147 * to bmap on these blocks will fail. However, we already have 148 * the on disk address, so we have to set the b_blkno field 149 * explicitly instead of letting bread do everything for us. 150 */ 151 vp = ITOV(ip); 152 bp = getblk(vp, lbn, (int)fs->e2fs_bsize, 0, 0, 0); 153 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { 154 bp->b_iocmd = BIO_READ; 155 if (bp->b_bcount > bp->b_bufsize) 156 panic("ext2_indirtrunc: bad buffer size"); 157 bp->b_blkno = dbn; 158 vfs_busy_pages(bp, 0); 159 bp->b_iooffset = dbtob(bp->b_blkno); 160 bstrategy(bp); 161 error = bufwait(bp); 162 } 163 if (error) { 164 brelse(bp); 165 *countp = 0; 166 return (error); 167 } 168 bap = (e2fs_daddr_t *)bp->b_data; 169 copy = malloc(fs->e2fs_bsize, M_TEMP, M_WAITOK); 170 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->e2fs_bsize); 171 bzero((caddr_t)&bap[last + 1], 172 (NINDIR(fs) - (last + 1)) * sizeof(e2fs_daddr_t)); 173 if (last == -1) 174 bp->b_flags |= B_INVAL; 175 if (DOINGASYNC(vp)) { 176 bdwrite(bp); 177 } else { 178 error = bwrite(bp); 179 if (error) 180 allerror = error; 181 } 182 bap = copy; 183 184 /* 185 * Recursively free totally unused blocks. 186 */ 187 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 188 i--, nlbn += factor) { 189 nb = le32toh(bap[i]); 190 if (nb == 0) 191 continue; 192 if (level > SINGLE) { 193 if ((error = ext2_indirtrunc(ip, nlbn, 194 fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0) 195 allerror = error; 196 blocksreleased += blkcount; 197 } 198 ext2_blkfree(ip, nb, fs->e2fs_bsize); 199 blocksreleased += nblocks; 200 } 201 202 /* 203 * Recursively free last partial block. 204 */ 205 if (level > SINGLE && lastbn >= 0) { 206 last = lastbn % factor; 207 nb = le32toh(bap[i]); 208 if (nb != 0) { 209 if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 210 last, level - 1, &blkcount)) != 0) 211 allerror = error; 212 blocksreleased += blkcount; 213 } 214 } 215 free(copy, M_TEMP); 216 *countp = blocksreleased; 217 return (allerror); 218 } 219 220 /* 221 * Truncate the inode oip to at most length size, freeing the 222 * disk blocks. 223 */ 224 static int 225 ext2_ind_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, 226 struct thread *td) 227 { 228 struct vnode *ovp = vp; 229 e4fs_daddr_t lastblock; 230 struct inode *oip; 231 e4fs_daddr_t bn, lbn, lastiblock[EXT2_NIADDR], indir_lbn[EXT2_NIADDR]; 232 uint32_t oldblks[EXT2_NDADDR + EXT2_NIADDR]; 233 #ifdef INVARIANTS 234 uint32_t newblks[EXT2_NDADDR + EXT2_NIADDR]; 235 #endif 236 struct m_ext2fs *fs; 237 struct buf *bp; 238 int offset, size, level; 239 e4fs_daddr_t count, nblocks, blocksreleased = 0; 240 int error, i, allerror; 241 off_t osize; 242 #ifdef INVARIANTS 243 struct bufobj *bo; 244 #endif 245 246 oip = VTOI(ovp); 247 #ifdef INVARIANTS 248 bo = &ovp->v_bufobj; 249 #endif 250 251 fs = oip->i_e2fs; 252 osize = oip->i_size; 253 /* 254 * Lengthen the size of the file. We must ensure that the 255 * last byte of the file is allocated. Since the smallest 256 * value of osize is 0, length will be at least 1. 257 */ 258 if (osize < length) { 259 if (length > oip->i_e2fs->e2fs_maxfilesize) 260 return (EFBIG); 261 vnode_pager_setsize(ovp, length); 262 offset = blkoff(fs, length - 1); 263 lbn = lblkno(fs, length - 1); 264 flags |= BA_CLRBUF; 265 error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); 266 if (error) { 267 vnode_pager_setsize(vp, osize); 268 return (error); 269 } 270 oip->i_size = length; 271 if (bp->b_bufsize == fs->e2fs_bsize) 272 bp->b_flags |= B_CLUSTEROK; 273 if (flags & IO_SYNC) 274 bwrite(bp); 275 else if (DOINGASYNC(ovp)) 276 bdwrite(bp); 277 else 278 bawrite(bp); 279 oip->i_flag |= IN_CHANGE | IN_UPDATE; 280 return (ext2_update(ovp, !DOINGASYNC(ovp))); 281 } 282 /* 283 * Shorten the size of the file. If the file is not being 284 * truncated to a block boundary, the contents of the 285 * partial block following the end of the file must be 286 * zero'ed in case it ever become accessible again because 287 * of subsequent file growth. 288 */ 289 /* I don't understand the comment above */ 290 offset = blkoff(fs, length); 291 if (offset == 0) { 292 oip->i_size = length; 293 } else { 294 lbn = lblkno(fs, length); 295 flags |= BA_CLRBUF; 296 error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); 297 if (error) 298 return (error); 299 oip->i_size = length; 300 size = blksize(fs, oip, lbn); 301 bzero((char *)bp->b_data + offset, (u_int)(size - offset)); 302 allocbuf(bp, size); 303 if (bp->b_bufsize == fs->e2fs_bsize) 304 bp->b_flags |= B_CLUSTEROK; 305 if (flags & IO_SYNC) 306 bwrite(bp); 307 else if (DOINGASYNC(ovp)) 308 bdwrite(bp); 309 else 310 bawrite(bp); 311 } 312 /* 313 * Calculate index into inode's block list of 314 * last direct and indirect blocks (if any) 315 * which we want to keep. Lastblock is -1 when 316 * the file is truncated to 0. 317 */ 318 lastblock = lblkno(fs, length + fs->e2fs_bsize - 1) - 1; 319 lastiblock[SINGLE] = lastblock - EXT2_NDADDR; 320 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 321 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 322 nblocks = btodb(fs->e2fs_bsize); 323 /* 324 * Update file and block pointers on disk before we start freeing 325 * blocks. If we crash before free'ing blocks below, the blocks 326 * will be returned to the free list. lastiblock values are also 327 * normalized to -1 for calls to ext2_indirtrunc below. 328 */ 329 for (level = TRIPLE; level >= SINGLE; level--) { 330 oldblks[EXT2_NDADDR + level] = oip->i_ib[level]; 331 if (lastiblock[level] < 0) { 332 oip->i_ib[level] = 0; 333 lastiblock[level] = -1; 334 } 335 } 336 for (i = 0; i < EXT2_NDADDR; i++) { 337 oldblks[i] = oip->i_db[i]; 338 if (i > lastblock) 339 oip->i_db[i] = 0; 340 } 341 oip->i_flag |= IN_CHANGE | IN_UPDATE; 342 allerror = ext2_update(ovp, !DOINGASYNC(ovp)); 343 344 /* 345 * Having written the new inode to disk, save its new configuration 346 * and put back the old block pointers long enough to process them. 347 * Note that we save the new block configuration so we can check it 348 * when we are done. 349 */ 350 for (i = 0; i < EXT2_NDADDR; i++) { 351 #ifdef INVARIANTS 352 newblks[i] = oip->i_db[i]; 353 #endif 354 oip->i_db[i] = oldblks[i]; 355 } 356 for (i = 0; i < EXT2_NIADDR; i++) { 357 #ifdef INVARIANTS 358 newblks[EXT2_NDADDR + i] = oip->i_ib[i]; 359 #endif 360 oip->i_ib[i] = oldblks[EXT2_NDADDR + i]; 361 } 362 oip->i_size = osize; 363 error = vtruncbuf(ovp, length, (int)fs->e2fs_bsize); 364 if (error && (allerror == 0)) 365 allerror = error; 366 vnode_pager_setsize(ovp, length); 367 368 /* 369 * Indirect blocks first. 370 */ 371 indir_lbn[SINGLE] = -EXT2_NDADDR; 372 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 373 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 374 for (level = TRIPLE; level >= SINGLE; level--) { 375 bn = oip->i_ib[level]; 376 if (bn != 0) { 377 error = ext2_indirtrunc(oip, indir_lbn[level], 378 fsbtodb(fs, bn), lastiblock[level], level, &count); 379 if (error) 380 allerror = error; 381 blocksreleased += count; 382 if (lastiblock[level] < 0) { 383 oip->i_ib[level] = 0; 384 ext2_blkfree(oip, bn, fs->e2fs_fsize); 385 blocksreleased += nblocks; 386 } 387 } 388 if (lastiblock[level] >= 0) 389 goto done; 390 } 391 392 /* 393 * All whole direct blocks or frags. 394 */ 395 for (i = EXT2_NDADDR - 1; i > lastblock; i--) { 396 long bsize; 397 398 bn = oip->i_db[i]; 399 if (bn == 0) 400 continue; 401 oip->i_db[i] = 0; 402 bsize = blksize(fs, oip, i); 403 ext2_blkfree(oip, bn, bsize); 404 blocksreleased += btodb(bsize); 405 } 406 if (lastblock < 0) 407 goto done; 408 409 /* 410 * Finally, look for a change in size of the 411 * last direct block; release any frags. 412 */ 413 bn = oip->i_db[lastblock]; 414 if (bn != 0) { 415 long oldspace, newspace; 416 417 /* 418 * Calculate amount of space we're giving 419 * back as old block size minus new block size. 420 */ 421 oldspace = blksize(fs, oip, lastblock); 422 oip->i_size = length; 423 newspace = blksize(fs, oip, lastblock); 424 if (newspace == 0) 425 panic("ext2_truncate: newspace"); 426 if (oldspace - newspace > 0) { 427 /* 428 * Block number of space to be free'd is 429 * the old block # plus the number of frags 430 * required for the storage we're keeping. 431 */ 432 bn += numfrags(fs, newspace); 433 ext2_blkfree(oip, bn, oldspace - newspace); 434 blocksreleased += btodb(oldspace - newspace); 435 } 436 } 437 done: 438 #ifdef INVARIANTS 439 for (level = SINGLE; level <= TRIPLE; level++) 440 if (newblks[EXT2_NDADDR + level] != oip->i_ib[level]) 441 panic("itrunc1"); 442 for (i = 0; i < EXT2_NDADDR; i++) 443 if (newblks[i] != oip->i_db[i]) 444 panic("itrunc2"); 445 BO_LOCK(bo); 446 if (length == 0 && (bo->bo_dirty.bv_cnt != 0 || 447 bo->bo_clean.bv_cnt != 0)) 448 panic("itrunc3"); 449 BO_UNLOCK(bo); 450 #endif /* INVARIANTS */ 451 /* 452 * Put back the real size. 453 */ 454 oip->i_size = length; 455 if (oip->i_blocks >= blocksreleased) 456 oip->i_blocks -= blocksreleased; 457 else /* sanity */ 458 oip->i_blocks = 0; 459 oip->i_flag |= IN_CHANGE; 460 vnode_pager_setsize(ovp, length); 461 return (allerror); 462 } 463 464 static int 465 ext2_ext_truncate(struct vnode *vp, off_t length, int flags, 466 struct ucred *cred, struct thread *td) 467 { 468 struct vnode *ovp = vp; 469 int32_t lastblock; 470 struct m_ext2fs *fs; 471 struct inode *oip; 472 struct buf *bp; 473 uint32_t lbn, offset; 474 int error, size; 475 off_t osize; 476 477 oip = VTOI(ovp); 478 fs = oip->i_e2fs; 479 osize = oip->i_size; 480 481 if (osize < length) { 482 if (length > oip->i_e2fs->e2fs_maxfilesize) { 483 return (EFBIG); 484 } 485 vnode_pager_setsize(ovp, length); 486 offset = blkoff(fs, length - 1); 487 lbn = lblkno(fs, length - 1); 488 flags |= BA_CLRBUF; 489 error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); 490 if (error) { 491 vnode_pager_setsize(vp, osize); 492 return (error); 493 } 494 oip->i_size = length; 495 if (bp->b_bufsize == fs->e2fs_bsize) 496 bp->b_flags |= B_CLUSTEROK; 497 if (flags & IO_SYNC) 498 bwrite(bp); 499 else if (DOINGASYNC(ovp)) 500 bdwrite(bp); 501 else 502 bawrite(bp); 503 oip->i_flag |= IN_CHANGE | IN_UPDATE; 504 return (ext2_update(ovp, !DOINGASYNC(ovp))); 505 } 506 507 lastblock = (length + fs->e2fs_bsize - 1) / fs->e2fs_bsize; 508 error = ext4_ext_remove_space(oip, lastblock, flags, cred, td); 509 if (error) 510 return (error); 511 512 offset = blkoff(fs, length); 513 if (offset == 0) { 514 oip->i_size = length; 515 } else { 516 lbn = lblkno(fs, length); 517 flags |= BA_CLRBUF; 518 error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); 519 if (error) { 520 return (error); 521 } 522 oip->i_size = length; 523 size = blksize(fs, oip, lbn); 524 bzero((char *)bp->b_data + offset, (u_int)(size - offset)); 525 allocbuf(bp, size); 526 if (bp->b_bufsize == fs->e2fs_bsize) 527 bp->b_flags |= B_CLUSTEROK; 528 if (flags & IO_SYNC) 529 bwrite(bp); 530 else if (DOINGASYNC(ovp)) 531 bdwrite(bp); 532 else 533 bawrite(bp); 534 } 535 536 oip->i_size = osize; 537 error = vtruncbuf(ovp, length, (int)fs->e2fs_bsize); 538 if (error) 539 return (error); 540 541 vnode_pager_setsize(ovp, length); 542 543 oip->i_size = length; 544 oip->i_flag |= IN_CHANGE | IN_UPDATE; 545 error = ext2_update(ovp, !DOINGASYNC(ovp)); 546 547 return (error); 548 } 549 550 /* 551 * Truncate the inode ip to at most length size, freeing the 552 * disk blocks. 553 */ 554 int 555 ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, 556 struct thread *td) 557 { 558 struct inode *ip; 559 int error; 560 561 ASSERT_VOP_LOCKED(vp, "ext2_truncate"); 562 563 if (length < 0) 564 return (EINVAL); 565 566 ip = VTOI(vp); 567 if (vp->v_type == VLNK && 568 ip->i_size < VFSTOEXT2(vp->v_mount)->um_e2fs->e2fs_maxsymlinklen) { 569 #ifdef INVARIANTS 570 if (length != 0) 571 panic("ext2_truncate: partial truncate of symlink"); 572 #endif 573 bzero((char *)&ip->i_shortlink, (u_int)ip->i_size); 574 ip->i_size = 0; 575 ip->i_flag |= IN_CHANGE | IN_UPDATE; 576 return (ext2_update(vp, 1)); 577 } 578 if (ip->i_size == length) { 579 ip->i_flag |= IN_CHANGE | IN_UPDATE; 580 return (ext2_update(vp, 0)); 581 } 582 583 if (ip->i_flag & IN_E4EXTENTS) 584 error = ext2_ext_truncate(vp, length, flags, cred, td); 585 else 586 error = ext2_ind_truncate(vp, length, flags, cred, td); 587 cluster_init_vn(&ip->i_clusterw); 588 589 return (error); 590 } 591 592 /* 593 * discard preallocated blocks 594 */ 595 int 596 ext2_inactive(struct vop_inactive_args *ap) 597 { 598 struct vnode *vp = ap->a_vp; 599 struct inode *ip = VTOI(vp); 600 struct thread *td = curthread; 601 int mode, error = 0; 602 603 /* 604 * Ignore inodes related to stale file handles. 605 */ 606 if (ip->i_mode == 0) 607 goto out; 608 if (ip->i_nlink <= 0) { 609 ext2_extattr_free(ip); 610 error = ext2_truncate(vp, (off_t)0, 0, NOCRED, td); 611 ip->i_rdev = 0; 612 mode = ip->i_mode; 613 ip->i_mode = 0; 614 ip->i_flag |= IN_CHANGE | IN_UPDATE; 615 ext2_vfree(vp, ip->i_number, mode); 616 } 617 if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) 618 ext2_update(vp, 0); 619 out: 620 /* 621 * If we are done with the inode, reclaim it 622 * so that it can be reused immediately. 623 */ 624 if (ip->i_mode == 0) 625 vrecycle(vp); 626 return (error); 627 } 628 629 /* 630 * Reclaim an inode so that it can be used for other purposes. 631 */ 632 int 633 ext2_reclaim(struct vop_reclaim_args *ap) 634 { 635 struct inode *ip; 636 struct vnode *vp = ap->a_vp; 637 638 ip = VTOI(vp); 639 if (ip->i_flag & IN_LAZYMOD) { 640 ip->i_flag |= IN_MODIFIED; 641 ext2_update(vp, 0); 642 } 643 vfs_hash_remove(vp); 644 free(vp->v_data, M_EXT2NODE); 645 vp->v_data = 0; 646 return (0); 647 } 648