1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 34 * $FreeBSD$ 35 */ 36 37 #include "opt_quota.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/mount.h> 42 #include <sys/proc.h> 43 #include <sys/bio.h> 44 #include <sys/buf.h> 45 #include <sys/vnode.h> 46 #include <sys/malloc.h> 47 #include <sys/resourcevar.h> 48 #include <sys/vmmeter.h> 49 #include <sys/stat.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 54 #include <ufs/ufs/extattr.h> 55 #include <ufs/ufs/quota.h> 56 #include <ufs/ufs/ufsmount.h> 57 #include <ufs/ufs/inode.h> 58 #include <ufs/ufs/ufs_extern.h> 59 60 #include <ufs/ffs/fs.h> 61 #include <ufs/ffs/ffs_extern.h> 62 63 static int ffs_indirtrunc(struct inode *, ufs_daddr_t, ufs_daddr_t, 64 ufs_daddr_t, int, long *); 65 66 /* 67 * Update the access, modified, and inode change times as specified by the 68 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 69 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 70 * the timestamp update). The IN_LAZYMOD flag is set to force a write 71 * later if not now. If we write now, then clear both IN_MODIFIED and 72 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 73 * set, then wait for the write to complete. 74 */ 75 int 76 ffs_update(vp, waitfor) 77 struct vnode *vp; 78 int waitfor; 79 { 80 struct fs *fs; 81 struct buf *bp; 82 struct inode *ip; 83 int error; 84 85 ufs_itimes(vp); 86 ip = VTOI(vp); 87 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 88 return (0); 89 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED); 90 fs = ip->i_fs; 91 if (fs->fs_ronly) 92 return (0); 93 /* 94 * Ensure that uid and gid are correct. This is a temporary 95 * fix until fsck has been changed to do the update. 96 */ 97 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 98 ip->i_din.di_ouid = ip->i_uid; /* XXX */ 99 ip->i_din.di_ogid = ip->i_gid; /* XXX */ 100 } /* XXX */ 101 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 102 (int)fs->fs_bsize, NOCRED, &bp); 103 if (error) { 104 brelse(bp); 105 return (error); 106 } 107 if (DOINGSOFTDEP(vp)) 108 softdep_update_inodeblock(ip, bp, waitfor); 109 else if (ip->i_effnlink != ip->i_nlink) 110 panic("ffs_update: bad link cnt"); 111 *((struct dinode *)bp->b_data + 112 ino_to_fsbo(fs, ip->i_number)) = ip->i_din; 113 if (waitfor && !DOINGASYNC(vp)) { 114 return (bwrite(bp)); 115 } else if (vm_page_count_severe() || buf_dirty_count_severe()) { 116 return (bwrite(bp)); 117 } else { 118 if (bp->b_bufsize == fs->fs_bsize) 119 bp->b_flags |= B_CLUSTEROK; 120 bdwrite(bp); 121 return (0); 122 } 123 } 124 125 #define SINGLE 0 /* index of single indirect block */ 126 #define DOUBLE 1 /* index of double indirect block */ 127 #define TRIPLE 2 /* index of triple indirect block */ 128 /* 129 * Truncate the inode oip to at most length size, freeing the 130 * disk blocks. 131 */ 132 int 133 ffs_truncate(vp, length, flags, cred, td) 134 struct vnode *vp; 135 off_t length; 136 int flags; 137 struct ucred *cred; 138 struct thread *td; 139 { 140 struct vnode *ovp = vp; 141 ufs_daddr_t lastblock; 142 struct inode *oip; 143 ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; 144 ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 145 struct fs *fs; 146 struct buf *bp; 147 int offset, size, level; 148 long count, nblocks, blocksreleased = 0; 149 int i; 150 int aflags, error, allerror; 151 off_t osize; 152 153 oip = VTOI(ovp); 154 fs = oip->i_fs; 155 if (length < 0) 156 return (EINVAL); 157 if (length > fs->fs_maxfilesize) 158 return (EFBIG); 159 if (ovp->v_type == VLNK && 160 (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || oip->i_din.di_blocks == 0)) { 161 #ifdef DIAGNOSTIC 162 if (length != 0) 163 panic("ffs_truncate: partial truncate of symlink"); 164 #endif 165 bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); 166 oip->i_size = 0; 167 oip->i_flag |= IN_CHANGE | IN_UPDATE; 168 return (UFS_UPDATE(ovp, 1)); 169 } 170 if (oip->i_size == length) { 171 oip->i_flag |= IN_CHANGE | IN_UPDATE; 172 return (UFS_UPDATE(ovp, 0)); 173 } 174 if (fs->fs_ronly) 175 panic("ffs_truncate: read-only filesystem"); 176 #ifdef QUOTA 177 error = getinoquota(oip); 178 if (error) 179 return (error); 180 #endif 181 if ((oip->i_flags & SF_SNAPSHOT) != 0) 182 ffs_snapremove(ovp); 183 ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; 184 if (DOINGSOFTDEP(ovp)) { 185 if (length > 0 || softdep_slowdown(ovp)) { 186 /* 187 * If a file is only partially truncated, then 188 * we have to clean up the data structures 189 * describing the allocation past the truncation 190 * point. Finding and deallocating those structures 191 * is a lot of work. Since partial truncation occurs 192 * rarely, we solve the problem by syncing the file 193 * so that it will have no data structures left. 194 */ 195 if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT, 196 td)) != 0) 197 return (error); 198 if (oip->i_flag & IN_SPACECOUNTED) 199 fs->fs_pendingblocks -= oip->i_blocks; 200 } else { 201 #ifdef QUOTA 202 (void) chkdq(oip, -oip->i_blocks, NOCRED, 0); 203 #endif 204 softdep_setup_freeblocks(oip, length); 205 vinvalbuf(ovp, 0, cred, td, 0, 0); 206 oip->i_flag |= IN_CHANGE | IN_UPDATE; 207 return (ffs_update(ovp, 0)); 208 } 209 } 210 osize = oip->i_size; 211 /* 212 * Lengthen the size of the file. We must ensure that the 213 * last byte of the file is allocated. Since the smallest 214 * value of osize is 0, length will be at least 1. 215 */ 216 if (osize < length) { 217 vnode_pager_setsize(ovp, length); 218 aflags = B_CLRBUF; 219 if (flags & IO_SYNC) 220 aflags |= B_SYNC; 221 error = UFS_BALLOC(ovp, length - 1, 1, 222 cred, aflags, &bp); 223 if (error) 224 return (error); 225 oip->i_size = length; 226 if (bp->b_bufsize == fs->fs_bsize) 227 bp->b_flags |= B_CLUSTEROK; 228 if (aflags & B_SYNC) 229 bwrite(bp); 230 else 231 bawrite(bp); 232 oip->i_flag |= IN_CHANGE | IN_UPDATE; 233 return (UFS_UPDATE(ovp, 1)); 234 } 235 /* 236 * Shorten the size of the file. If the file is not being 237 * truncated to a block boundary, the contents of the 238 * partial block following the end of the file must be 239 * zero'ed in case it ever becomes accessible again because 240 * of subsequent file growth. Directories however are not 241 * zero'ed as they should grow back initialized to empty. 242 */ 243 offset = blkoff(fs, length); 244 if (offset == 0) { 245 oip->i_size = length; 246 } else { 247 lbn = lblkno(fs, length); 248 aflags = B_CLRBUF; 249 if (flags & IO_SYNC) 250 aflags |= B_SYNC; 251 error = UFS_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); 252 if (error) { 253 return (error); 254 } 255 /* 256 * When we are doing soft updates and the UFS_BALLOC 257 * above fills in a direct block hole with a full sized 258 * block that will be truncated down to a fragment below, 259 * we must flush out the block dependency with an FSYNC 260 * so that we do not get a soft updates inconsistency 261 * when we create the fragment below. 262 */ 263 if (DOINGSOFTDEP(ovp) && lbn < NDADDR && 264 fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && 265 (error = VOP_FSYNC(ovp, cred, MNT_WAIT, td)) != 0) 266 return (error); 267 oip->i_size = length; 268 size = blksize(fs, oip, lbn); 269 if (ovp->v_type != VDIR) 270 bzero((char *)bp->b_data + offset, 271 (u_int)(size - offset)); 272 /* Kirk's code has reallocbuf(bp, size, 1) here */ 273 allocbuf(bp, size); 274 if (bp->b_bufsize == fs->fs_bsize) 275 bp->b_flags |= B_CLUSTEROK; 276 if (aflags & B_SYNC) 277 bwrite(bp); 278 else 279 bawrite(bp); 280 } 281 /* 282 * Calculate index into inode's block list of 283 * last direct and indirect blocks (if any) 284 * which we want to keep. Lastblock is -1 when 285 * the file is truncated to 0. 286 */ 287 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 288 lastiblock[SINGLE] = lastblock - NDADDR; 289 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 290 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 291 nblocks = btodb(fs->fs_bsize); 292 /* 293 * Update file and block pointers on disk before we start freeing 294 * blocks. If we crash before free'ing blocks below, the blocks 295 * will be returned to the free list. lastiblock values are also 296 * normalized to -1 for calls to ffs_indirtrunc below. 297 */ 298 bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); 299 for (level = TRIPLE; level >= SINGLE; level--) 300 if (lastiblock[level] < 0) { 301 oip->i_ib[level] = 0; 302 lastiblock[level] = -1; 303 } 304 for (i = NDADDR - 1; i > lastblock; i--) 305 oip->i_db[i] = 0; 306 oip->i_flag |= IN_CHANGE | IN_UPDATE; 307 allerror = UFS_UPDATE(ovp, 1); 308 309 /* 310 * Having written the new inode to disk, save its new configuration 311 * and put back the old block pointers long enough to process them. 312 * Note that we save the new block configuration so we can check it 313 * when we are done. 314 */ 315 bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); 316 bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); 317 oip->i_size = osize; 318 319 error = vtruncbuf(ovp, cred, td, length, fs->fs_bsize); 320 if (error && (allerror == 0)) 321 allerror = error; 322 323 /* 324 * Indirect blocks first. 325 */ 326 indir_lbn[SINGLE] = -NDADDR; 327 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 328 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 329 for (level = TRIPLE; level >= SINGLE; level--) { 330 bn = oip->i_ib[level]; 331 if (bn != 0) { 332 error = ffs_indirtrunc(oip, indir_lbn[level], 333 fsbtodb(fs, bn), lastiblock[level], level, &count); 334 if (error) 335 allerror = error; 336 blocksreleased += count; 337 if (lastiblock[level] < 0) { 338 oip->i_ib[level] = 0; 339 ffs_blkfree(fs, oip->i_devvp, bn, fs->fs_bsize, 340 oip->i_number); 341 blocksreleased += nblocks; 342 } 343 } 344 if (lastiblock[level] >= 0) 345 goto done; 346 } 347 348 /* 349 * All whole direct blocks or frags. 350 */ 351 for (i = NDADDR - 1; i > lastblock; i--) { 352 long bsize; 353 354 bn = oip->i_db[i]; 355 if (bn == 0) 356 continue; 357 oip->i_db[i] = 0; 358 bsize = blksize(fs, oip, i); 359 ffs_blkfree(fs, oip->i_devvp, bn, bsize, oip->i_number); 360 blocksreleased += btodb(bsize); 361 } 362 if (lastblock < 0) 363 goto done; 364 365 /* 366 * Finally, look for a change in size of the 367 * last direct block; release any frags. 368 */ 369 bn = oip->i_db[lastblock]; 370 if (bn != 0) { 371 long oldspace, newspace; 372 373 /* 374 * Calculate amount of space we're giving 375 * back as old block size minus new block size. 376 */ 377 oldspace = blksize(fs, oip, lastblock); 378 oip->i_size = length; 379 newspace = blksize(fs, oip, lastblock); 380 if (newspace == 0) 381 panic("ffs_truncate: newspace"); 382 if (oldspace - newspace > 0) { 383 /* 384 * Block number of space to be free'd is 385 * the old block # plus the number of frags 386 * required for the storage we're keeping. 387 */ 388 bn += numfrags(fs, newspace); 389 ffs_blkfree(fs, oip->i_devvp, bn, oldspace - newspace, 390 oip->i_number); 391 blocksreleased += btodb(oldspace - newspace); 392 } 393 } 394 done: 395 #ifdef DIAGNOSTIC 396 for (level = SINGLE; level <= TRIPLE; level++) 397 if (newblks[NDADDR + level] != oip->i_ib[level]) 398 panic("ffs_truncate1"); 399 for (i = 0; i < NDADDR; i++) 400 if (newblks[i] != oip->i_db[i]) 401 panic("ffs_truncate2"); 402 if (length == 0 && 403 (!TAILQ_EMPTY(&ovp->v_dirtyblkhd) || 404 !TAILQ_EMPTY(&ovp->v_cleanblkhd))) 405 panic("ffs_truncate3"); 406 #endif /* DIAGNOSTIC */ 407 /* 408 * Put back the real size. 409 */ 410 oip->i_size = length; 411 oip->i_blocks -= blocksreleased; 412 413 if (oip->i_blocks < 0) /* sanity */ 414 oip->i_blocks = 0; 415 oip->i_flag |= IN_CHANGE; 416 #ifdef QUOTA 417 (void) chkdq(oip, -blocksreleased, NOCRED, 0); 418 #endif 419 return (allerror); 420 } 421 422 /* 423 * Release blocks associated with the inode ip and stored in the indirect 424 * block bn. Blocks are free'd in LIFO order up to (but not including) 425 * lastbn. If level is greater than SINGLE, the block is an indirect block 426 * and recursive calls to indirtrunc must be used to cleanse other indirect 427 * blocks. 428 * 429 * NB: triple indirect blocks are untested. 430 */ 431 static int 432 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 433 struct inode *ip; 434 ufs_daddr_t lbn, lastbn; 435 ufs_daddr_t dbn; 436 int level; 437 long *countp; 438 { 439 int i; 440 struct buf *bp; 441 struct fs *fs = ip->i_fs; 442 ufs_daddr_t *bap; 443 struct vnode *vp; 444 ufs_daddr_t *copy = NULL, nb, nlbn, last; 445 long blkcount, factor; 446 int nblocks, blocksreleased = 0; 447 int error = 0, allerror = 0; 448 449 /* 450 * Calculate index in current block of last 451 * block to be kept. -1 indicates the entire 452 * block so we need not calculate the index. 453 */ 454 factor = 1; 455 for (i = SINGLE; i < level; i++) 456 factor *= NINDIR(fs); 457 last = lastbn; 458 if (lastbn > 0) 459 last /= factor; 460 nblocks = btodb(fs->fs_bsize); 461 /* 462 * Get buffer of block pointers, zero those entries corresponding 463 * to blocks to be free'd, and update on disk copy first. Since 464 * double(triple) indirect before single(double) indirect, calls 465 * to bmap on these blocks will fail. However, we already have 466 * the on disk address, so we have to set the b_blkno field 467 * explicitly instead of letting bread do everything for us. 468 */ 469 vp = ITOV(ip); 470 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0); 471 if ((bp->b_flags & B_CACHE) == 0) { 472 curproc->p_stats->p_ru.ru_inblock++; /* pay for read */ 473 bp->b_iocmd = BIO_READ; 474 bp->b_flags &= ~B_INVAL; 475 bp->b_ioflags &= ~BIO_ERROR; 476 if (bp->b_bcount > bp->b_bufsize) 477 panic("ffs_indirtrunc: bad buffer size"); 478 bp->b_blkno = dbn; 479 vfs_busy_pages(bp, 0); 480 BUF_STRATEGY(bp); 481 error = bufwait(bp); 482 } 483 if (error) { 484 brelse(bp); 485 *countp = 0; 486 return (error); 487 } 488 489 bap = (ufs_daddr_t *)bp->b_data; 490 if (lastbn != -1) { 491 MALLOC(copy, ufs_daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK); 492 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize); 493 bzero((caddr_t)&bap[last + 1], 494 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t)); 495 if (DOINGASYNC(vp)) { 496 bawrite(bp); 497 } else { 498 error = bwrite(bp); 499 if (error) 500 allerror = error; 501 } 502 bap = copy; 503 } 504 505 /* 506 * Recursively free totally unused blocks. 507 */ 508 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 509 i--, nlbn += factor) { 510 nb = bap[i]; 511 if (nb == 0) 512 continue; 513 if (level > SINGLE) { 514 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 515 (ufs_daddr_t)-1, level - 1, &blkcount)) != 0) 516 allerror = error; 517 blocksreleased += blkcount; 518 } 519 ffs_blkfree(fs, ip->i_devvp, nb, fs->fs_bsize, ip->i_number); 520 blocksreleased += nblocks; 521 } 522 523 /* 524 * Recursively free last partial block. 525 */ 526 if (level > SINGLE && lastbn >= 0) { 527 last = lastbn % factor; 528 nb = bap[i]; 529 if (nb != 0) { 530 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 531 last, level - 1, &blkcount); 532 if (error) 533 allerror = error; 534 blocksreleased += blkcount; 535 } 536 } 537 if (copy != NULL) { 538 FREE(copy, M_TEMP); 539 } else { 540 bp->b_flags |= B_INVAL | B_NOCACHE; 541 brelse(bp); 542 } 543 544 *countp = blocksreleased; 545 return (allerror); 546 } 547