1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 34 * $Id: ffs_inode.c,v 1.53 1999/01/28 00:57:54 dillon Exp $ 35 */ 36 37 #include "opt_quota.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/mount.h> 42 #include <sys/proc.h> 43 #include <sys/buf.h> 44 #include <sys/vnode.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/resourcevar.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 52 #include <ufs/ufs/quota.h> 53 #include <ufs/ufs/ufsmount.h> 54 #include <ufs/ufs/inode.h> 55 #include <ufs/ufs/ufs_extern.h> 56 57 #include <ufs/ffs/fs.h> 58 #include <ufs/ffs/ffs_extern.h> 59 60 static int ffs_indirtrunc __P((struct inode *, ufs_daddr_t, ufs_daddr_t, 61 ufs_daddr_t, int, long *)); 62 63 /* 64 * Update the access, modified, and inode change times as specified by the 65 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 66 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 67 * the timestamp update). The IN_LAZYMOD flag is set to force a write 68 * later if not now. If we write now, then clear both IN_MODIFIED and 69 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 70 * set, then wait for the write to complete. 71 */ 72 int 73 ffs_update(vp, waitfor) 74 struct vnode *vp; 75 int waitfor; 76 { 77 register struct fs *fs; 78 struct buf *bp; 79 struct inode *ip; 80 int error; 81 82 ufs_itimes(vp); 83 ip = VTOI(vp); 84 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 85 return (0); 86 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED); 87 if (vp->v_mount->mnt_flag & MNT_RDONLY) 88 return (0); 89 fs = ip->i_fs; 90 /* 91 * Ensure that uid and gid are correct. This is a temporary 92 * fix until fsck has been changed to do the update. 93 */ 94 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 95 ip->i_din.di_ouid = ip->i_uid; /* XXX */ 96 ip->i_din.di_ogid = ip->i_gid; /* XXX */ 97 } /* XXX */ 98 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 99 (int)fs->fs_bsize, NOCRED, &bp); 100 if (error) { 101 brelse(bp); 102 return (error); 103 } 104 if (DOINGSOFTDEP(vp)) 105 softdep_update_inodeblock(ip, bp, waitfor); 106 else if (ip->i_effnlink != ip->i_nlink) 107 panic("ffs_update: bad link cnt"); 108 *((struct dinode *)bp->b_data + 109 ino_to_fsbo(fs, ip->i_number)) = ip->i_din; 110 if (waitfor && (vp->v_mount->mnt_flag & MNT_ASYNC) == 0) { 111 return (bwrite(bp)); 112 } else { 113 if (bp->b_bufsize == fs->fs_bsize) 114 bp->b_flags |= B_CLUSTEROK; 115 bdwrite(bp); 116 return (0); 117 } 118 } 119 120 #define SINGLE 0 /* index of single indirect block */ 121 #define DOUBLE 1 /* index of double indirect block */ 122 #define TRIPLE 2 /* index of triple indirect block */ 123 /* 124 * Truncate the inode oip to at most length size, freeing the 125 * disk blocks. 126 */ 127 int 128 ffs_truncate(vp, length, flags, cred, p) 129 struct vnode *vp; 130 off_t length; 131 int flags; 132 struct ucred *cred; 133 struct proc *p; 134 { 135 register struct vnode *ovp = vp; 136 ufs_daddr_t lastblock; 137 register struct inode *oip; 138 ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; 139 ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 140 register struct fs *fs; 141 struct buf *bp; 142 int offset, size, level; 143 long count, nblocks, blocksreleased = 0; 144 register int i; 145 int aflags, error, allerror; 146 off_t osize; 147 148 oip = VTOI(ovp); 149 if (oip->i_size == length) 150 return (0); 151 fs = oip->i_fs; 152 if (length < 0) 153 return (EINVAL); 154 if (length > fs->fs_maxfilesize) 155 return (EFBIG); 156 if (ovp->v_type == VLNK && 157 (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || oip->i_din.di_blocks == 0)) { 158 #ifdef DIAGNOSTIC 159 if (length != 0) 160 panic("ffs_truncate: partial truncate of symlink"); 161 #endif 162 bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); 163 oip->i_size = 0; 164 oip->i_flag |= IN_CHANGE | IN_UPDATE; 165 return (UFS_UPDATE(ovp, 1)); 166 } 167 if (oip->i_size == length) { 168 oip->i_flag |= IN_CHANGE | IN_UPDATE; 169 return (UFS_UPDATE(ovp, 0)); 170 } 171 #ifdef QUOTA 172 error = getinoquota(oip); 173 if (error) 174 return (error); 175 #endif 176 ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; 177 if (DOINGSOFTDEP(ovp)) { 178 if (length > 0) { 179 /* 180 * If a file is only partially truncated, then 181 * we have to clean up the data structures 182 * describing the allocation past the truncation 183 * point. Finding and deallocating those structures 184 * is a lot of work. Since partial truncation occurs 185 * rarely, we solve the problem by syncing the file 186 * so that it will have no data structures left. 187 */ 188 if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT, 189 p)) != 0) 190 return (error); 191 } else { 192 #ifdef QUOTA 193 (void) chkdq(oip, -oip->i_blocks, NOCRED, 0); 194 #endif 195 softdep_setup_freeblocks(oip, length); 196 vinvalbuf(ovp, 0, cred, p, 0, 0); 197 oip->i_flag |= IN_CHANGE | IN_UPDATE; 198 return (ffs_update(ovp, 0)); 199 } 200 } 201 osize = oip->i_size; 202 /* 203 * Lengthen the size of the file. We must ensure that the 204 * last byte of the file is allocated. Since the smallest 205 * value of osize is 0, length will be at least 1. 206 */ 207 if (osize < length) { 208 vnode_pager_setsize(ovp, length); 209 aflags = B_CLRBUF; 210 if (flags & IO_SYNC) 211 aflags |= B_SYNC; 212 error = VOP_BALLOC(ovp, length - 1, 1, 213 cred, aflags, &bp); 214 if (error) 215 return (error); 216 oip->i_size = length; 217 if (bp->b_bufsize == fs->fs_bsize) 218 bp->b_flags |= B_CLUSTEROK; 219 if (aflags & B_SYNC) 220 bwrite(bp); 221 else if (ovp->v_mount->mnt_flag & MNT_ASYNC) 222 bdwrite(bp); 223 else 224 bawrite(bp); 225 oip->i_flag |= IN_CHANGE | IN_UPDATE; 226 return (UFS_UPDATE(ovp, 1)); 227 } 228 /* 229 * Shorten the size of the file. If the file is not being 230 * truncated to a block boundary, the contents of the 231 * partial block following the end of the file must be 232 * zero'ed in case it ever becomes accessible again because 233 * of subsequent file growth. Directories however are not 234 * zero'ed as they should grow back initialized to empty. 235 */ 236 offset = blkoff(fs, length); 237 if (offset == 0) { 238 oip->i_size = length; 239 } else { 240 lbn = lblkno(fs, length); 241 aflags = B_CLRBUF; 242 if (flags & IO_SYNC) 243 aflags |= B_SYNC; 244 error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); 245 if (error) { 246 return (error); 247 } 248 oip->i_size = length; 249 size = blksize(fs, oip, lbn); 250 if (ovp->v_type != VDIR) 251 bzero((char *)bp->b_data + offset, 252 (u_int)(size - offset)); 253 /* Kirk's code has reallocbuf(bp, size, 1) here */ 254 allocbuf(bp, size); 255 if (bp->b_bufsize == fs->fs_bsize) 256 bp->b_flags |= B_CLUSTEROK; 257 if (aflags & B_SYNC) 258 bwrite(bp); 259 else if (ovp->v_mount->mnt_flag & MNT_ASYNC) 260 bdwrite(bp); 261 else 262 bawrite(bp); 263 } 264 /* 265 * Calculate index into inode's block list of 266 * last direct and indirect blocks (if any) 267 * which we want to keep. Lastblock is -1 when 268 * the file is truncated to 0. 269 */ 270 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 271 lastiblock[SINGLE] = lastblock - NDADDR; 272 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 273 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 274 nblocks = btodb(fs->fs_bsize); 275 /* 276 * Update file and block pointers on disk before we start freeing 277 * blocks. If we crash before free'ing blocks below, the blocks 278 * will be returned to the free list. lastiblock values are also 279 * normalized to -1 for calls to ffs_indirtrunc below. 280 */ 281 bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); 282 for (level = TRIPLE; level >= SINGLE; level--) 283 if (lastiblock[level] < 0) { 284 oip->i_ib[level] = 0; 285 lastiblock[level] = -1; 286 } 287 for (i = NDADDR - 1; i > lastblock; i--) 288 oip->i_db[i] = 0; 289 oip->i_flag |= IN_CHANGE | IN_UPDATE; 290 allerror = UFS_UPDATE(ovp, ((length > 0) ? 0 : 1)); 291 292 /* 293 * Having written the new inode to disk, save its new configuration 294 * and put back the old block pointers long enough to process them. 295 * Note that we save the new block configuration so we can check it 296 * when we are done. 297 */ 298 bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); 299 bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); 300 oip->i_size = osize; 301 302 error = vtruncbuf(ovp, cred, p, length, fs->fs_bsize); 303 if (error && (allerror == 0)) 304 allerror = error; 305 306 /* 307 * Indirect blocks first. 308 */ 309 indir_lbn[SINGLE] = -NDADDR; 310 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 311 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 312 for (level = TRIPLE; level >= SINGLE; level--) { 313 bn = oip->i_ib[level]; 314 if (bn != 0) { 315 error = ffs_indirtrunc(oip, indir_lbn[level], 316 fsbtodb(fs, bn), lastiblock[level], level, &count); 317 if (error) 318 allerror = error; 319 blocksreleased += count; 320 if (lastiblock[level] < 0) { 321 oip->i_ib[level] = 0; 322 ffs_blkfree(oip, bn, fs->fs_bsize); 323 blocksreleased += nblocks; 324 } 325 } 326 if (lastiblock[level] >= 0) 327 goto done; 328 } 329 330 /* 331 * All whole direct blocks or frags. 332 */ 333 for (i = NDADDR - 1; i > lastblock; i--) { 334 register long bsize; 335 336 bn = oip->i_db[i]; 337 if (bn == 0) 338 continue; 339 oip->i_db[i] = 0; 340 bsize = blksize(fs, oip, i); 341 ffs_blkfree(oip, bn, bsize); 342 blocksreleased += btodb(bsize); 343 } 344 if (lastblock < 0) 345 goto done; 346 347 /* 348 * Finally, look for a change in size of the 349 * last direct block; release any frags. 350 */ 351 bn = oip->i_db[lastblock]; 352 if (bn != 0) { 353 long oldspace, newspace; 354 355 /* 356 * Calculate amount of space we're giving 357 * back as old block size minus new block size. 358 */ 359 oldspace = blksize(fs, oip, lastblock); 360 oip->i_size = length; 361 newspace = blksize(fs, oip, lastblock); 362 if (newspace == 0) 363 panic("ffs_truncate: newspace"); 364 if (oldspace - newspace > 0) { 365 /* 366 * Block number of space to be free'd is 367 * the old block # plus the number of frags 368 * required for the storage we're keeping. 369 */ 370 bn += numfrags(fs, newspace); 371 ffs_blkfree(oip, bn, oldspace - newspace); 372 blocksreleased += btodb(oldspace - newspace); 373 } 374 } 375 done: 376 #ifdef DIAGNOSTIC 377 for (level = SINGLE; level <= TRIPLE; level++) 378 if (newblks[NDADDR + level] != oip->i_ib[level]) 379 panic("ffs_truncate1"); 380 for (i = 0; i < NDADDR; i++) 381 if (newblks[i] != oip->i_db[i]) 382 panic("ffs_truncate2"); 383 if (length == 0 && 384 (!TAILQ_EMPTY(&ovp->v_dirtyblkhd) || 385 !TAILQ_EMPTY(&ovp->v_cleanblkhd))) 386 panic("ffs_truncate3"); 387 #endif /* DIAGNOSTIC */ 388 /* 389 * Put back the real size. 390 */ 391 oip->i_size = length; 392 oip->i_blocks -= blocksreleased; 393 394 if (oip->i_blocks < 0) /* sanity */ 395 oip->i_blocks = 0; 396 oip->i_flag |= IN_CHANGE; 397 #ifdef QUOTA 398 (void) chkdq(oip, -blocksreleased, NOCRED, 0); 399 #endif 400 return (allerror); 401 } 402 403 /* 404 * Release blocks associated with the inode ip and stored in the indirect 405 * block bn. Blocks are free'd in LIFO order up to (but not including) 406 * lastbn. If level is greater than SINGLE, the block is an indirect block 407 * and recursive calls to indirtrunc must be used to cleanse other indirect 408 * blocks. 409 * 410 * NB: triple indirect blocks are untested. 411 */ 412 static int 413 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 414 register struct inode *ip; 415 ufs_daddr_t lbn, lastbn; 416 ufs_daddr_t dbn; 417 int level; 418 long *countp; 419 { 420 register int i; 421 struct buf *bp; 422 register struct fs *fs = ip->i_fs; 423 register ufs_daddr_t *bap; 424 struct vnode *vp; 425 ufs_daddr_t *copy = NULL, nb, nlbn, last; 426 long blkcount, factor; 427 int nblocks, blocksreleased = 0; 428 int error = 0, allerror = 0; 429 430 /* 431 * Calculate index in current block of last 432 * block to be kept. -1 indicates the entire 433 * block so we need not calculate the index. 434 */ 435 factor = 1; 436 for (i = SINGLE; i < level; i++) 437 factor *= NINDIR(fs); 438 last = lastbn; 439 if (lastbn > 0) 440 last /= factor; 441 nblocks = btodb(fs->fs_bsize); 442 /* 443 * Get buffer of block pointers, zero those entries corresponding 444 * to blocks to be free'd, and update on disk copy first. Since 445 * double(triple) indirect before single(double) indirect, calls 446 * to bmap on these blocks will fail. However, we already have 447 * the on disk address, so we have to set the b_blkno field 448 * explicitly instead of letting bread do everything for us. 449 */ 450 vp = ITOV(ip); 451 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0); 452 if ((bp->b_flags & B_CACHE) == 0) { 453 curproc->p_stats->p_ru.ru_inblock++; /* pay for read */ 454 bp->b_flags |= B_READ; 455 bp->b_flags &= ~(B_ERROR|B_INVAL); 456 if (bp->b_bcount > bp->b_bufsize) 457 panic("ffs_indirtrunc: bad buffer size"); 458 bp->b_blkno = dbn; 459 vfs_busy_pages(bp, 0); 460 VOP_STRATEGY(bp->b_vp, bp); 461 error = biowait(bp); 462 } 463 if (error) { 464 brelse(bp); 465 *countp = 0; 466 return (error); 467 } 468 469 bap = (ufs_daddr_t *)bp->b_data; 470 if (lastbn != -1) { 471 MALLOC(copy, ufs_daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK); 472 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize); 473 bzero((caddr_t)&bap[last + 1], 474 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t)); 475 if ((vp->v_mount->mnt_flag & MNT_ASYNC) == 0) { 476 error = bwrite(bp); 477 if (error) 478 allerror = error; 479 } else { 480 bawrite(bp); 481 } 482 bap = copy; 483 } 484 485 /* 486 * Recursively free totally unused blocks. 487 */ 488 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 489 i--, nlbn += factor) { 490 nb = bap[i]; 491 if (nb == 0) 492 continue; 493 if (level > SINGLE) { 494 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 495 (ufs_daddr_t)-1, level - 1, &blkcount)) != 0) 496 allerror = error; 497 blocksreleased += blkcount; 498 } 499 ffs_blkfree(ip, nb, fs->fs_bsize); 500 blocksreleased += nblocks; 501 } 502 503 /* 504 * Recursively free last partial block. 505 */ 506 if (level > SINGLE && lastbn >= 0) { 507 last = lastbn % factor; 508 nb = bap[i]; 509 if (nb != 0) { 510 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 511 last, level - 1, &blkcount); 512 if (error) 513 allerror = error; 514 blocksreleased += blkcount; 515 } 516 } 517 if (copy != NULL) { 518 FREE(copy, M_TEMP); 519 } else { 520 bp->b_flags |= B_INVAL | B_NOCACHE; 521 brelse(bp); 522 } 523 524 *countp = blocksreleased; 525 return (allerror); 526 } 527