1 /*- 2 * modified for Lites 1.1 3 * 4 * Aug 1995, Godmar Back (gback@cs.utah.edu) 5 * University of Utah, Department of Computer Science 6 */ 7 /*- 8 * SPDX-License-Identifier: BSD-3-Clause 9 * 10 * Copyright (c) 1982, 1986, 1989, 1993 11 * The Regents of the University of California. All rights reserved. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)ffs_inode.c 8.5 (Berkeley) 12/30/93 38 * $FreeBSD$ 39 */ 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mount.h> 44 #include <sys/bio.h> 45 #include <sys/buf.h> 46 #include <sys/endian.h> 47 #include <sys/vnode.h> 48 #include <sys/malloc.h> 49 #include <sys/rwlock.h> 50 #include <sys/sdt.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_extern.h> 54 55 #include <fs/ext2fs/fs.h> 56 #include <fs/ext2fs/inode.h> 57 #include <fs/ext2fs/ext2_mount.h> 58 #include <fs/ext2fs/ext2fs.h> 59 #include <fs/ext2fs/fs.h> 60 #include <fs/ext2fs/ext2_extern.h> 61 #include <fs/ext2fs/ext2_extattr.h> 62 63 /* 64 * Update the access, modified, and inode change times as specified by the 65 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 66 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 67 * the timestamp update). The IN_LAZYMOD flag is set to force a write 68 * later if not now. If we write now, then clear both IN_MODIFIED and 69 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 70 * set, then wait for the write to complete. 71 */ 72 int 73 ext2_update(struct vnode *vp, int waitfor) 74 { 75 struct m_ext2fs *fs; 76 struct buf *bp; 77 struct inode *ip; 78 int error; 79 80 ASSERT_VOP_ELOCKED(vp, "ext2_update"); 81 ext2_itimes(vp); 82 ip = VTOI(vp); 83 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 84 return (0); 85 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 86 fs = ip->i_e2fs; 87 if (fs->e2fs_ronly) 88 return (0); 89 if ((error = bread(ip->i_devvp, 90 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 91 (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { 92 brelse(bp); 93 return (error); 94 } 95 error = ext2_i2ei(ip, (struct ext2fs_dinode *)((char *)bp->b_data + 96 EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number))); 97 if (error) { 98 brelse(bp); 99 return (error); 100 } 101 if (waitfor && !DOINGASYNC(vp)) 102 return (bwrite(bp)); 103 else { 104 bdwrite(bp); 105 return (0); 106 } 107 } 108 109 #define SINGLE 0 /* index of single indirect block */ 110 #define DOUBLE 1 /* index of double indirect block */ 111 #define TRIPLE 2 /* index of triple indirect block */ 112 113 /* 114 * Release blocks associated with the inode ip and stored in the indirect 115 * block bn. Blocks are free'd in LIFO order up to (but not including) 116 * lastbn. If level is greater than SINGLE, the block is an indirect block 117 * and recursive calls to indirtrunc must be used to cleanse other indirect 118 * blocks. 119 * 120 * NB: triple indirect blocks are untested. 121 */ 122 static int 123 ext2_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn, 124 daddr_t lastbn, int level, e4fs_daddr_t *countp) 125 { 126 struct buf *bp; 127 struct m_ext2fs *fs = ip->i_e2fs; 128 struct vnode *vp; 129 e2fs_daddr_t *bap, *copy; 130 int i, nblocks, error = 0, allerror = 0; 131 e2fs_lbn_t nb, nlbn, last; 132 e4fs_daddr_t blkcount, factor, blocksreleased = 0; 133 134 /* 135 * Calculate index in current block of last 136 * block to be kept. -1 indicates the entire 137 * block so we need not calculate the index. 138 */ 139 factor = 1; 140 for (i = SINGLE; i < level; i++) 141 factor *= NINDIR(fs); 142 last = lastbn; 143 if (lastbn > 0) 144 last /= factor; 145 nblocks = btodb(fs->e2fs_bsize); 146 /* 147 * Get buffer of block pointers, zero those entries corresponding 148 * to blocks to be free'd, and update on disk copy first. Since 149 * double(triple) indirect before single(double) indirect, calls 150 * to bmap on these blocks will fail. However, we already have 151 * the on disk address, so we have to set the b_blkno field 152 * explicitly instead of letting bread do everything for us. 153 */ 154 vp = ITOV(ip); 155 bp = getblk(vp, lbn, (int)fs->e2fs_bsize, 0, 0, 0); 156 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { 157 bp->b_iocmd = BIO_READ; 158 if (bp->b_bcount > bp->b_bufsize) 159 panic("ext2_indirtrunc: bad buffer size"); 160 bp->b_blkno = dbn; 161 vfs_busy_pages(bp, 0); 162 bp->b_iooffset = dbtob(bp->b_blkno); 163 bstrategy(bp); 164 error = bufwait(bp); 165 } 166 if (error) { 167 brelse(bp); 168 *countp = 0; 169 return (error); 170 } 171 bap = (e2fs_daddr_t *)bp->b_data; 172 copy = malloc(fs->e2fs_bsize, M_TEMP, M_WAITOK); 173 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->e2fs_bsize); 174 bzero((caddr_t)&bap[last + 1], 175 (NINDIR(fs) - (last + 1)) * sizeof(e2fs_daddr_t)); 176 if (last == -1) 177 bp->b_flags |= B_INVAL; 178 if (DOINGASYNC(vp)) { 179 bdwrite(bp); 180 } else { 181 error = bwrite(bp); 182 if (error) 183 allerror = error; 184 } 185 bap = copy; 186 187 /* 188 * Recursively free totally unused blocks. 189 */ 190 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 191 i--, nlbn += factor) { 192 nb = le32toh(bap[i]); 193 if (nb == 0) 194 continue; 195 if (level > SINGLE) { 196 if ((error = ext2_indirtrunc(ip, nlbn, 197 fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0) 198 allerror = error; 199 blocksreleased += blkcount; 200 } 201 ext2_blkfree(ip, nb, fs->e2fs_bsize); 202 blocksreleased += nblocks; 203 } 204 205 /* 206 * Recursively free last partial block. 207 */ 208 if (level > SINGLE && lastbn >= 0) { 209 last = lastbn % factor; 210 nb = le32toh(bap[i]); 211 if (nb != 0) { 212 if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 213 last, level - 1, &blkcount)) != 0) 214 allerror = error; 215 blocksreleased += blkcount; 216 } 217 } 218 free(copy, M_TEMP); 219 *countp = blocksreleased; 220 return (allerror); 221 } 222 223 /* 224 * Truncate the inode oip to at most length size, freeing the 225 * disk blocks. 226 */ 227 static int 228 ext2_ind_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, 229 struct thread *td) 230 { 231 struct vnode *ovp = vp; 232 e4fs_daddr_t lastblock; 233 struct inode *oip; 234 e4fs_daddr_t bn, lbn, lastiblock[EXT2_NIADDR], indir_lbn[EXT2_NIADDR]; 235 uint32_t oldblks[EXT2_NDADDR + EXT2_NIADDR]; 236 uint32_t newblks[EXT2_NDADDR + EXT2_NIADDR]; 237 struct m_ext2fs *fs; 238 struct buf *bp; 239 int offset, size, level; 240 e4fs_daddr_t count, nblocks, blocksreleased = 0; 241 int error, i, allerror; 242 off_t osize; 243 #ifdef INVARIANTS 244 struct bufobj *bo; 245 #endif 246 247 oip = VTOI(ovp); 248 #ifdef INVARIANTS 249 bo = &ovp->v_bufobj; 250 #endif 251 252 fs = oip->i_e2fs; 253 osize = oip->i_size; 254 /* 255 * Lengthen the size of the file. We must ensure that the 256 * last byte of the file is allocated. Since the smallest 257 * value of osize is 0, length will be at least 1. 258 */ 259 if (osize < length) { 260 if (length > oip->i_e2fs->e2fs_maxfilesize) 261 return (EFBIG); 262 vnode_pager_setsize(ovp, length); 263 offset = blkoff(fs, length - 1); 264 lbn = lblkno(fs, length - 1); 265 flags |= BA_CLRBUF; 266 error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); 267 if (error) { 268 vnode_pager_setsize(vp, osize); 269 return (error); 270 } 271 oip->i_size = length; 272 if (bp->b_bufsize == fs->e2fs_bsize) 273 bp->b_flags |= B_CLUSTEROK; 274 if (flags & IO_SYNC) 275 bwrite(bp); 276 else if (DOINGASYNC(ovp)) 277 bdwrite(bp); 278 else 279 bawrite(bp); 280 oip->i_flag |= IN_CHANGE | IN_UPDATE; 281 return (ext2_update(ovp, !DOINGASYNC(ovp))); 282 } 283 /* 284 * Shorten the size of the file. If the file is not being 285 * truncated to a block boundary, the contents of the 286 * partial block following the end of the file must be 287 * zero'ed in case it ever become accessible again because 288 * of subsequent file growth. 289 */ 290 /* I don't understand the comment above */ 291 offset = blkoff(fs, length); 292 if (offset == 0) { 293 oip->i_size = length; 294 } else { 295 lbn = lblkno(fs, length); 296 flags |= BA_CLRBUF; 297 error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); 298 if (error) 299 return (error); 300 oip->i_size = length; 301 size = blksize(fs, oip, lbn); 302 bzero((char *)bp->b_data + offset, (u_int)(size - offset)); 303 allocbuf(bp, size); 304 if (bp->b_bufsize == fs->e2fs_bsize) 305 bp->b_flags |= B_CLUSTEROK; 306 if (flags & IO_SYNC) 307 bwrite(bp); 308 else if (DOINGASYNC(ovp)) 309 bdwrite(bp); 310 else 311 bawrite(bp); 312 } 313 /* 314 * Calculate index into inode's block list of 315 * last direct and indirect blocks (if any) 316 * which we want to keep. Lastblock is -1 when 317 * the file is truncated to 0. 318 */ 319 lastblock = lblkno(fs, length + fs->e2fs_bsize - 1) - 1; 320 lastiblock[SINGLE] = lastblock - EXT2_NDADDR; 321 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 322 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 323 nblocks = btodb(fs->e2fs_bsize); 324 /* 325 * Update file and block pointers on disk before we start freeing 326 * blocks. If we crash before free'ing blocks below, the blocks 327 * will be returned to the free list. lastiblock values are also 328 * normalized to -1 for calls to ext2_indirtrunc below. 329 */ 330 for (level = TRIPLE; level >= SINGLE; level--) { 331 oldblks[EXT2_NDADDR + level] = oip->i_ib[level]; 332 if (lastiblock[level] < 0) { 333 oip->i_ib[level] = 0; 334 lastiblock[level] = -1; 335 } 336 } 337 for (i = 0; i < EXT2_NDADDR; i++) { 338 oldblks[i] = oip->i_db[i]; 339 if (i > lastblock) 340 oip->i_db[i] = 0; 341 } 342 oip->i_flag |= IN_CHANGE | IN_UPDATE; 343 allerror = ext2_update(ovp, !DOINGASYNC(ovp)); 344 345 /* 346 * Having written the new inode to disk, save its new configuration 347 * and put back the old block pointers long enough to process them. 348 * Note that we save the new block configuration so we can check it 349 * when we are done. 350 */ 351 for (i = 0; i < EXT2_NDADDR; i++) { 352 newblks[i] = oip->i_db[i]; 353 oip->i_db[i] = oldblks[i]; 354 } 355 for (i = 0; i < EXT2_NIADDR; i++) { 356 newblks[EXT2_NDADDR + i] = oip->i_ib[i]; 357 oip->i_ib[i] = oldblks[EXT2_NDADDR + i]; 358 } 359 oip->i_size = osize; 360 error = vtruncbuf(ovp, length, (int)fs->e2fs_bsize); 361 if (error && (allerror == 0)) 362 allerror = error; 363 vnode_pager_setsize(ovp, length); 364 365 /* 366 * Indirect blocks first. 367 */ 368 indir_lbn[SINGLE] = -EXT2_NDADDR; 369 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 370 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 371 for (level = TRIPLE; level >= SINGLE; level--) { 372 bn = oip->i_ib[level]; 373 if (bn != 0) { 374 error = ext2_indirtrunc(oip, indir_lbn[level], 375 fsbtodb(fs, bn), lastiblock[level], level, &count); 376 if (error) 377 allerror = error; 378 blocksreleased += count; 379 if (lastiblock[level] < 0) { 380 oip->i_ib[level] = 0; 381 ext2_blkfree(oip, bn, fs->e2fs_fsize); 382 blocksreleased += nblocks; 383 } 384 } 385 if (lastiblock[level] >= 0) 386 goto done; 387 } 388 389 /* 390 * All whole direct blocks or frags. 391 */ 392 for (i = EXT2_NDADDR - 1; i > lastblock; i--) { 393 long bsize; 394 395 bn = oip->i_db[i]; 396 if (bn == 0) 397 continue; 398 oip->i_db[i] = 0; 399 bsize = blksize(fs, oip, i); 400 ext2_blkfree(oip, bn, bsize); 401 blocksreleased += btodb(bsize); 402 } 403 if (lastblock < 0) 404 goto done; 405 406 /* 407 * Finally, look for a change in size of the 408 * last direct block; release any frags. 409 */ 410 bn = oip->i_db[lastblock]; 411 if (bn != 0) { 412 long oldspace, newspace; 413 414 /* 415 * Calculate amount of space we're giving 416 * back as old block size minus new block size. 417 */ 418 oldspace = blksize(fs, oip, lastblock); 419 oip->i_size = length; 420 newspace = blksize(fs, oip, lastblock); 421 if (newspace == 0) 422 panic("ext2_truncate: newspace"); 423 if (oldspace - newspace > 0) { 424 /* 425 * Block number of space to be free'd is 426 * the old block # plus the number of frags 427 * required for the storage we're keeping. 428 */ 429 bn += numfrags(fs, newspace); 430 ext2_blkfree(oip, bn, oldspace - newspace); 431 blocksreleased += btodb(oldspace - newspace); 432 } 433 } 434 done: 435 #ifdef INVARIANTS 436 for (level = SINGLE; level <= TRIPLE; level++) 437 if (newblks[EXT2_NDADDR + level] != oip->i_ib[level]) 438 panic("itrunc1"); 439 for (i = 0; i < EXT2_NDADDR; i++) 440 if (newblks[i] != oip->i_db[i]) 441 panic("itrunc2"); 442 BO_LOCK(bo); 443 if (length == 0 && (bo->bo_dirty.bv_cnt != 0 || 444 bo->bo_clean.bv_cnt != 0)) 445 panic("itrunc3"); 446 BO_UNLOCK(bo); 447 #endif /* INVARIANTS */ 448 /* 449 * Put back the real size. 450 */ 451 oip->i_size = length; 452 if (oip->i_blocks >= blocksreleased) 453 oip->i_blocks -= blocksreleased; 454 else /* sanity */ 455 oip->i_blocks = 0; 456 oip->i_flag |= IN_CHANGE; 457 vnode_pager_setsize(ovp, length); 458 return (allerror); 459 } 460 461 static int 462 ext2_ext_truncate(struct vnode *vp, off_t length, int flags, 463 struct ucred *cred, struct thread *td) 464 { 465 struct vnode *ovp = vp; 466 int32_t lastblock; 467 struct m_ext2fs *fs; 468 struct inode *oip; 469 struct buf *bp; 470 uint32_t lbn, offset; 471 int error, size; 472 off_t osize; 473 474 oip = VTOI(ovp); 475 fs = oip->i_e2fs; 476 osize = oip->i_size; 477 478 if (osize < length) { 479 if (length > oip->i_e2fs->e2fs_maxfilesize) { 480 return (EFBIG); 481 } 482 vnode_pager_setsize(ovp, length); 483 offset = blkoff(fs, length - 1); 484 lbn = lblkno(fs, length - 1); 485 flags |= BA_CLRBUF; 486 error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); 487 if (error) { 488 vnode_pager_setsize(vp, osize); 489 return (error); 490 } 491 oip->i_size = length; 492 if (bp->b_bufsize == fs->e2fs_bsize) 493 bp->b_flags |= B_CLUSTEROK; 494 if (flags & IO_SYNC) 495 bwrite(bp); 496 else if (DOINGASYNC(ovp)) 497 bdwrite(bp); 498 else 499 bawrite(bp); 500 oip->i_flag |= IN_CHANGE | IN_UPDATE; 501 return (ext2_update(ovp, !DOINGASYNC(ovp))); 502 } 503 504 lastblock = (length + fs->e2fs_bsize - 1) / fs->e2fs_bsize; 505 error = ext4_ext_remove_space(oip, lastblock, flags, cred, td); 506 if (error) 507 return (error); 508 509 offset = blkoff(fs, length); 510 if (offset == 0) { 511 oip->i_size = length; 512 } else { 513 lbn = lblkno(fs, length); 514 flags |= BA_CLRBUF; 515 error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); 516 if (error) { 517 return (error); 518 } 519 oip->i_size = length; 520 size = blksize(fs, oip, lbn); 521 bzero((char *)bp->b_data + offset, (u_int)(size - offset)); 522 allocbuf(bp, size); 523 if (bp->b_bufsize == fs->e2fs_bsize) 524 bp->b_flags |= B_CLUSTEROK; 525 if (flags & IO_SYNC) 526 bwrite(bp); 527 else if (DOINGASYNC(ovp)) 528 bdwrite(bp); 529 else 530 bawrite(bp); 531 } 532 533 oip->i_size = osize; 534 error = vtruncbuf(ovp, length, (int)fs->e2fs_bsize); 535 if (error) 536 return (error); 537 538 vnode_pager_setsize(ovp, length); 539 540 oip->i_size = length; 541 oip->i_flag |= IN_CHANGE | IN_UPDATE; 542 error = ext2_update(ovp, !DOINGASYNC(ovp)); 543 544 return (error); 545 } 546 547 /* 548 * Truncate the inode ip to at most length size, freeing the 549 * disk blocks. 550 */ 551 int 552 ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, 553 struct thread *td) 554 { 555 struct inode *ip; 556 int error; 557 558 ASSERT_VOP_LOCKED(vp, "ext2_truncate"); 559 560 if (length < 0) 561 return (EINVAL); 562 563 ip = VTOI(vp); 564 if (vp->v_type == VLNK && 565 ip->i_size < vp->v_mount->mnt_maxsymlinklen) { 566 #ifdef INVARIANTS 567 if (length != 0) 568 panic("ext2_truncate: partial truncate of symlink"); 569 #endif 570 bzero((char *)&ip->i_shortlink, (u_int)ip->i_size); 571 ip->i_size = 0; 572 ip->i_flag |= IN_CHANGE | IN_UPDATE; 573 return (ext2_update(vp, 1)); 574 } 575 if (ip->i_size == length) { 576 ip->i_flag |= IN_CHANGE | IN_UPDATE; 577 return (ext2_update(vp, 0)); 578 } 579 580 if (ip->i_flag & IN_E4EXTENTS) 581 error = ext2_ext_truncate(vp, length, flags, cred, td); 582 else 583 error = ext2_ind_truncate(vp, length, flags, cred, td); 584 cluster_init_vn(&ip->i_clusterw); 585 586 return (error); 587 } 588 589 /* 590 * discard preallocated blocks 591 */ 592 int 593 ext2_inactive(struct vop_inactive_args *ap) 594 { 595 struct vnode *vp = ap->a_vp; 596 struct inode *ip = VTOI(vp); 597 struct thread *td = curthread; 598 int mode, error = 0; 599 600 /* 601 * Ignore inodes related to stale file handles. 602 */ 603 if (ip->i_mode == 0) 604 goto out; 605 if (ip->i_nlink <= 0) { 606 ext2_extattr_free(ip); 607 error = ext2_truncate(vp, (off_t)0, 0, NOCRED, td); 608 if (!(ip->i_flag & IN_E4EXTENTS)) 609 ip->i_rdev = 0; 610 mode = ip->i_mode; 611 ip->i_mode = 0; 612 ip->i_flag |= IN_CHANGE | IN_UPDATE; 613 ext2_vfree(vp, ip->i_number, mode); 614 } 615 if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) 616 ext2_update(vp, 0); 617 out: 618 /* 619 * If we are done with the inode, reclaim it 620 * so that it can be reused immediately. 621 */ 622 if (ip->i_mode == 0) 623 vrecycle(vp); 624 return (error); 625 } 626 627 /* 628 * Reclaim an inode so that it can be used for other purposes. 629 */ 630 int 631 ext2_reclaim(struct vop_reclaim_args *ap) 632 { 633 struct inode *ip; 634 struct vnode *vp = ap->a_vp; 635 636 ip = VTOI(vp); 637 if (ip->i_flag & IN_LAZYMOD) { 638 ip->i_flag |= IN_MODIFIED; 639 ext2_update(vp, 0); 640 } 641 vfs_hash_remove(vp); 642 free(vp->v_data, M_EXT2NODE); 643 vp->v_data = 0; 644 return (0); 645 } 646