1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 34 * $FreeBSD$ 35 */ 36 37 #include "opt_quota.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/mount.h> 42 #include <sys/proc.h> 43 #include <sys/bio.h> 44 #include <sys/buf.h> 45 #include <sys/vnode.h> 46 #include <sys/malloc.h> 47 #include <sys/resourcevar.h> 48 #include <sys/vmmeter.h> 49 #include <sys/stat.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 54 #include <ufs/ufs/extattr.h> 55 #include <ufs/ufs/quota.h> 56 #include <ufs/ufs/ufsmount.h> 57 #include <ufs/ufs/inode.h> 58 #include <ufs/ufs/ufs_extern.h> 59 60 #include <ufs/ffs/fs.h> 61 #include <ufs/ffs/ffs_extern.h> 62 63 static int ffs_indirtrunc(struct inode *, ufs2_daddr_t, ufs2_daddr_t, 64 ufs2_daddr_t, int, ufs2_daddr_t *); 65 66 /* 67 * Update the access, modified, and inode change times as specified by the 68 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 69 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 70 * the timestamp update). The IN_LAZYMOD flag is set to force a write 71 * later if not now. If we write now, then clear both IN_MODIFIED and 72 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 73 * set, then wait for the write to complete. 74 */ 75 int 76 ffs_update(vp, waitfor) 77 struct vnode *vp; 78 int waitfor; 79 { 80 struct fs *fs; 81 struct buf *bp; 82 struct inode *ip; 83 int error; 84 85 ufs_itimes(vp); 86 ip = VTOI(vp); 87 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 88 return (0); 89 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED); 90 fs = ip->i_fs; 91 if (fs->fs_ronly) 92 return (0); 93 /* 94 * Ensure that uid and gid are correct. This is a temporary 95 * fix until fsck has been changed to do the update. 96 */ 97 if (fs->fs_magic == FS_UFS1_MAGIC && /* XXX */ 98 fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */ 99 ip->i_din1->di_ouid = ip->i_uid; /* XXX */ 100 ip->i_din1->di_ogid = ip->i_gid; /* XXX */ 101 } /* XXX */ 102 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 103 (int)fs->fs_bsize, NOCRED, &bp); 104 if (error) { 105 brelse(bp); 106 return (error); 107 } 108 if (DOINGSOFTDEP(vp)) 109 softdep_update_inodeblock(ip, bp, waitfor); 110 else if (ip->i_effnlink != ip->i_nlink) 111 panic("ffs_update: bad link cnt"); 112 if (ip->i_ump->um_fstype == UFS1) 113 *((struct ufs1_dinode *)bp->b_data + 114 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 115 else 116 *((struct ufs2_dinode *)bp->b_data + 117 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 118 if (waitfor && !DOINGASYNC(vp)) { 119 return (bwrite(bp)); 120 } else if (vm_page_count_severe() || buf_dirty_count_severe()) { 121 return (bwrite(bp)); 122 } else { 123 if (bp->b_bufsize == fs->fs_bsize) 124 bp->b_flags |= B_CLUSTEROK; 125 bdwrite(bp); 126 return (0); 127 } 128 } 129 130 #define SINGLE 0 /* index of single indirect block */ 131 #define DOUBLE 1 /* index of double indirect block */ 132 #define TRIPLE 2 /* index of triple indirect block */ 133 /* 134 * Truncate the inode oip to at most length size, freeing the 135 * disk blocks. 136 */ 137 int 138 ffs_truncate(vp, length, flags, cred, td) 139 struct vnode *vp; 140 off_t length; 141 int flags; 142 struct ucred *cred; 143 struct thread *td; 144 { 145 struct vnode *ovp = vp; 146 struct inode *oip; 147 ufs2_daddr_t bn, lbn, lastblock, lastiblock[NIADDR], indir_lbn[NIADDR]; 148 ufs2_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 149 ufs2_daddr_t count, blocksreleased = 0; 150 struct fs *fs; 151 struct buf *bp; 152 int offset, size, level, nblocks; 153 int i, aflags, error, allerror; 154 off_t osize; 155 156 oip = VTOI(ovp); 157 fs = oip->i_fs; 158 if (length < 0) 159 return (EINVAL); 160 if (length > fs->fs_maxfilesize) 161 return (EFBIG); 162 if (ovp->v_type == VLNK && 163 (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || 164 DIP(oip, i_blocks) == 0)) { 165 #ifdef DIAGNOSTIC 166 if (length != 0) 167 panic("ffs_truncate: partial truncate of symlink"); 168 #endif 169 bzero(SHORTLINK(oip), (u_int)oip->i_size); 170 oip->i_size = 0; 171 DIP(oip, i_size) = 0; 172 oip->i_flag |= IN_CHANGE | IN_UPDATE; 173 return (UFS_UPDATE(ovp, 1)); 174 } 175 if (oip->i_size == length) { 176 oip->i_flag |= IN_CHANGE | IN_UPDATE; 177 return (UFS_UPDATE(ovp, 0)); 178 } 179 if (fs->fs_ronly) 180 panic("ffs_truncate: read-only filesystem"); 181 #ifdef QUOTA 182 error = getinoquota(oip); 183 if (error) 184 return (error); 185 #endif 186 if ((oip->i_flags & SF_SNAPSHOT) != 0) 187 ffs_snapremove(ovp); 188 ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; 189 if (DOINGSOFTDEP(ovp)) { 190 if (length > 0 || softdep_slowdown(ovp)) { 191 /* 192 * If a file is only partially truncated, then 193 * we have to clean up the data structures 194 * describing the allocation past the truncation 195 * point. Finding and deallocating those structures 196 * is a lot of work. Since partial truncation occurs 197 * rarely, we solve the problem by syncing the file 198 * so that it will have no data structures left. 199 */ 200 if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT, 201 td)) != 0) 202 return (error); 203 if (oip->i_flag & IN_SPACECOUNTED) 204 fs->fs_pendingblocks -= DIP(oip, i_blocks); 205 } else { 206 #ifdef QUOTA 207 (void) chkdq(oip, -DIP(oip, i_blocks), NOCRED, 0); 208 #endif 209 softdep_setup_freeblocks(oip, length); 210 vinvalbuf(ovp, 0, cred, td, 0, 0); 211 oip->i_flag |= IN_CHANGE | IN_UPDATE; 212 return (ffs_update(ovp, 0)); 213 } 214 } 215 osize = oip->i_size; 216 /* 217 * Lengthen the size of the file. We must ensure that the 218 * last byte of the file is allocated. Since the smallest 219 * value of osize is 0, length will be at least 1. 220 */ 221 if (osize < length) { 222 vnode_pager_setsize(ovp, length); 223 aflags = BA_CLRBUF; 224 if (flags & IO_SYNC) 225 aflags |= BA_SYNC; 226 error = UFS_BALLOC(ovp, length - 1, 1, 227 cred, aflags, &bp); 228 if (error) 229 return (error); 230 oip->i_size = length; 231 DIP(oip, i_size) = length; 232 if (bp->b_bufsize == fs->fs_bsize) 233 bp->b_flags |= B_CLUSTEROK; 234 if (aflags & BA_SYNC) 235 bwrite(bp); 236 else 237 bawrite(bp); 238 oip->i_flag |= IN_CHANGE | IN_UPDATE; 239 return (UFS_UPDATE(ovp, 1)); 240 } 241 /* 242 * Shorten the size of the file. If the file is not being 243 * truncated to a block boundary, the contents of the 244 * partial block following the end of the file must be 245 * zero'ed in case it ever becomes accessible again because 246 * of subsequent file growth. Directories however are not 247 * zero'ed as they should grow back initialized to empty. 248 */ 249 offset = blkoff(fs, length); 250 if (offset == 0) { 251 oip->i_size = length; 252 DIP(oip, i_size) = length; 253 } else { 254 lbn = lblkno(fs, length); 255 aflags = BA_CLRBUF; 256 if (flags & IO_SYNC) 257 aflags |= BA_SYNC; 258 error = UFS_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); 259 if (error) { 260 return (error); 261 } 262 /* 263 * When we are doing soft updates and the UFS_BALLOC 264 * above fills in a direct block hole with a full sized 265 * block that will be truncated down to a fragment below, 266 * we must flush out the block dependency with an FSYNC 267 * so that we do not get a soft updates inconsistency 268 * when we create the fragment below. 269 */ 270 if (DOINGSOFTDEP(ovp) && lbn < NDADDR && 271 fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && 272 (error = VOP_FSYNC(ovp, cred, MNT_WAIT, td)) != 0) 273 return (error); 274 oip->i_size = length; 275 DIP(oip, i_size) = length; 276 size = blksize(fs, oip, lbn); 277 if (ovp->v_type != VDIR) 278 bzero((char *)bp->b_data + offset, 279 (u_int)(size - offset)); 280 /* Kirk's code has reallocbuf(bp, size, 1) here */ 281 allocbuf(bp, size); 282 if (bp->b_bufsize == fs->fs_bsize) 283 bp->b_flags |= B_CLUSTEROK; 284 if (aflags & BA_SYNC) 285 bwrite(bp); 286 else 287 bawrite(bp); 288 } 289 /* 290 * Calculate index into inode's block list of 291 * last direct and indirect blocks (if any) 292 * which we want to keep. Lastblock is -1 when 293 * the file is truncated to 0. 294 */ 295 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 296 lastiblock[SINGLE] = lastblock - NDADDR; 297 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 298 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 299 nblocks = btodb(fs->fs_bsize); 300 /* 301 * Update file and block pointers on disk before we start freeing 302 * blocks. If we crash before free'ing blocks below, the blocks 303 * will be returned to the free list. lastiblock values are also 304 * normalized to -1 for calls to ffs_indirtrunc below. 305 */ 306 for (level = TRIPLE; level >= SINGLE; level--) { 307 oldblks[NDADDR + level] = DIP(oip, i_ib[level]); 308 if (lastiblock[level] < 0) { 309 DIP(oip, i_ib[level]) = 0; 310 lastiblock[level] = -1; 311 } 312 } 313 for (i = 0; i < NDADDR; i++) { 314 oldblks[i] = DIP(oip, i_db[i]); 315 if (i > lastblock) 316 DIP(oip, i_db[i]) = 0; 317 } 318 oip->i_flag |= IN_CHANGE | IN_UPDATE; 319 allerror = UFS_UPDATE(ovp, 1); 320 321 /* 322 * Having written the new inode to disk, save its new configuration 323 * and put back the old block pointers long enough to process them. 324 * Note that we save the new block configuration so we can check it 325 * when we are done. 326 */ 327 for (i = 0; i < NDADDR; i++) { 328 newblks[i] = DIP(oip, i_db[i]); 329 DIP(oip, i_db[i]) = oldblks[i]; 330 } 331 for (i = 0; i < NIADDR; i++) { 332 newblks[NDADDR + i] = DIP(oip, i_ib[i]); 333 DIP(oip, i_ib[i]) = oldblks[NDADDR + i]; 334 } 335 oip->i_size = osize; 336 DIP(oip, i_size) = osize; 337 338 error = vtruncbuf(ovp, cred, td, length, fs->fs_bsize); 339 if (error && (allerror == 0)) 340 allerror = error; 341 342 /* 343 * Indirect blocks first. 344 */ 345 indir_lbn[SINGLE] = -NDADDR; 346 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 347 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 348 for (level = TRIPLE; level >= SINGLE; level--) { 349 bn = DIP(oip, i_ib[level]); 350 if (bn != 0) { 351 error = ffs_indirtrunc(oip, indir_lbn[level], 352 fsbtodb(fs, bn), lastiblock[level], level, &count); 353 if (error) 354 allerror = error; 355 blocksreleased += count; 356 if (lastiblock[level] < 0) { 357 DIP(oip, i_ib[level]) = 0; 358 ffs_blkfree(fs, oip->i_devvp, bn, fs->fs_bsize, 359 oip->i_number); 360 blocksreleased += nblocks; 361 } 362 } 363 if (lastiblock[level] >= 0) 364 goto done; 365 } 366 367 /* 368 * All whole direct blocks or frags. 369 */ 370 for (i = NDADDR - 1; i > lastblock; i--) { 371 long bsize; 372 373 bn = DIP(oip, i_db[i]); 374 if (bn == 0) 375 continue; 376 DIP(oip, i_db[i]) = 0; 377 bsize = blksize(fs, oip, i); 378 ffs_blkfree(fs, oip->i_devvp, bn, bsize, oip->i_number); 379 blocksreleased += btodb(bsize); 380 } 381 if (lastblock < 0) 382 goto done; 383 384 /* 385 * Finally, look for a change in size of the 386 * last direct block; release any frags. 387 */ 388 bn = DIP(oip, i_db[lastblock]); 389 if (bn != 0) { 390 long oldspace, newspace; 391 392 /* 393 * Calculate amount of space we're giving 394 * back as old block size minus new block size. 395 */ 396 oldspace = blksize(fs, oip, lastblock); 397 oip->i_size = length; 398 DIP(oip, i_size) = length; 399 newspace = blksize(fs, oip, lastblock); 400 if (newspace == 0) 401 panic("ffs_truncate: newspace"); 402 if (oldspace - newspace > 0) { 403 /* 404 * Block number of space to be free'd is 405 * the old block # plus the number of frags 406 * required for the storage we're keeping. 407 */ 408 bn += numfrags(fs, newspace); 409 ffs_blkfree(fs, oip->i_devvp, bn, oldspace - newspace, 410 oip->i_number); 411 blocksreleased += btodb(oldspace - newspace); 412 } 413 } 414 done: 415 #ifdef DIAGNOSTIC 416 for (level = SINGLE; level <= TRIPLE; level++) 417 if (newblks[NDADDR + level] != DIP(oip, i_ib[level])) 418 panic("ffs_truncate1"); 419 for (i = 0; i < NDADDR; i++) 420 if (newblks[i] != DIP(oip, i_db[i])) 421 panic("ffs_truncate2"); 422 if (length == 0 && 423 (!TAILQ_EMPTY(&ovp->v_dirtyblkhd) || 424 !TAILQ_EMPTY(&ovp->v_cleanblkhd))) 425 panic("ffs_truncate3"); 426 #endif /* DIAGNOSTIC */ 427 /* 428 * Put back the real size. 429 */ 430 oip->i_size = length; 431 DIP(oip, i_size) = length; 432 DIP(oip, i_blocks) -= blocksreleased; 433 434 if (DIP(oip, i_blocks) < 0) /* sanity */ 435 DIP(oip, i_blocks) = 0; 436 oip->i_flag |= IN_CHANGE; 437 #ifdef QUOTA 438 (void) chkdq(oip, -blocksreleased, NOCRED, 0); 439 #endif 440 return (allerror); 441 } 442 443 /* 444 * Release blocks associated with the inode ip and stored in the indirect 445 * block bn. Blocks are free'd in LIFO order up to (but not including) 446 * lastbn. If level is greater than SINGLE, the block is an indirect block 447 * and recursive calls to indirtrunc must be used to cleanse other indirect 448 * blocks. 449 */ 450 static int 451 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 452 struct inode *ip; 453 ufs2_daddr_t lbn, lastbn; 454 ufs2_daddr_t dbn; 455 int level; 456 ufs2_daddr_t *countp; 457 { 458 struct buf *bp; 459 struct fs *fs = ip->i_fs; 460 struct vnode *vp; 461 caddr_t copy = NULL; 462 int i, nblocks, error = 0, allerror = 0; 463 ufs2_daddr_t nb, nlbn, last; 464 ufs2_daddr_t blkcount, factor, blocksreleased = 0; 465 ufs1_daddr_t *bap1 = NULL; 466 ufs2_daddr_t *bap2 = NULL; 467 # define BAP(ip, i) (((ip)->i_ump->um_fstype == UFS1) ? bap1[i] : bap2[i]) 468 469 /* 470 * Calculate index in current block of last 471 * block to be kept. -1 indicates the entire 472 * block so we need not calculate the index. 473 */ 474 factor = 1; 475 for (i = SINGLE; i < level; i++) 476 factor *= NINDIR(fs); 477 last = lastbn; 478 if (lastbn > 0) 479 last /= factor; 480 nblocks = btodb(fs->fs_bsize); 481 /* 482 * Get buffer of block pointers, zero those entries corresponding 483 * to blocks to be free'd, and update on disk copy first. Since 484 * double(triple) indirect before single(double) indirect, calls 485 * to bmap on these blocks will fail. However, we already have 486 * the on disk address, so we have to set the b_blkno field 487 * explicitly instead of letting bread do everything for us. 488 */ 489 vp = ITOV(ip); 490 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0); 491 if ((bp->b_flags & B_CACHE) == 0) { 492 curproc->p_stats->p_ru.ru_inblock++; /* pay for read */ 493 bp->b_iocmd = BIO_READ; 494 bp->b_flags &= ~B_INVAL; 495 bp->b_ioflags &= ~BIO_ERROR; 496 if (bp->b_bcount > bp->b_bufsize) 497 panic("ffs_indirtrunc: bad buffer size"); 498 bp->b_blkno = dbn; 499 vfs_busy_pages(bp, 0); 500 BUF_STRATEGY(bp); 501 error = bufwait(bp); 502 } 503 if (error) { 504 brelse(bp); 505 *countp = 0; 506 return (error); 507 } 508 509 if (ip->i_ump->um_fstype == UFS1) 510 bap1 = (ufs1_daddr_t *)bp->b_data; 511 else 512 bap2 = (ufs2_daddr_t *)bp->b_data; 513 if (lastbn != -1) { 514 MALLOC(copy, caddr_t, fs->fs_bsize, M_TEMP, M_WAITOK); 515 bcopy((caddr_t)bp->b_data, copy, (u_int)fs->fs_bsize); 516 for (i = last + 1; i < NINDIR(fs); i++) 517 BAP(ip, i) = 0; 518 if (DOINGASYNC(vp)) { 519 bawrite(bp); 520 } else { 521 error = bwrite(bp); 522 if (error) 523 allerror = error; 524 } 525 if (ip->i_ump->um_fstype == UFS1) 526 bap1 = (ufs1_daddr_t *)copy; 527 else 528 bap2 = (ufs2_daddr_t *)copy; 529 } 530 531 /* 532 * Recursively free totally unused blocks. 533 */ 534 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 535 i--, nlbn += factor) { 536 nb = BAP(ip, i); 537 if (nb == 0) 538 continue; 539 if (level > SINGLE) { 540 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 541 (ufs2_daddr_t)-1, level - 1, &blkcount)) != 0) 542 allerror = error; 543 blocksreleased += blkcount; 544 } 545 ffs_blkfree(fs, ip->i_devvp, nb, fs->fs_bsize, ip->i_number); 546 blocksreleased += nblocks; 547 } 548 549 /* 550 * Recursively free last partial block. 551 */ 552 if (level > SINGLE && lastbn >= 0) { 553 last = lastbn % factor; 554 nb = BAP(ip, i); 555 if (nb != 0) { 556 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 557 last, level - 1, &blkcount); 558 if (error) 559 allerror = error; 560 blocksreleased += blkcount; 561 } 562 } 563 if (copy != NULL) { 564 FREE(copy, M_TEMP); 565 } else { 566 bp->b_flags |= B_INVAL | B_NOCACHE; 567 brelse(bp); 568 } 569 570 *countp = blocksreleased; 571 return (allerror); 572 } 573