1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 34 * $FreeBSD$ 35 */ 36 37 #include "opt_quota.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/mount.h> 42 #include <sys/proc.h> 43 #include <sys/bio.h> 44 #include <sys/buf.h> 45 #include <sys/vnode.h> 46 #include <sys/malloc.h> 47 #include <sys/resourcevar.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 52 #include <ufs/ufs/extattr.h> 53 #include <ufs/ufs/quota.h> 54 #include <ufs/ufs/ufsmount.h> 55 #include <ufs/ufs/inode.h> 56 #include <ufs/ufs/ufs_extern.h> 57 58 #include <ufs/ffs/fs.h> 59 #include <ufs/ffs/ffs_extern.h> 60 61 static int ffs_indirtrunc __P((struct inode *, ufs_daddr_t, ufs_daddr_t, 62 ufs_daddr_t, int, long *)); 63 64 /* 65 * Update the access, modified, and inode change times as specified by the 66 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 67 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 68 * the timestamp update). The IN_LAZYMOD flag is set to force a write 69 * later if not now. If we write now, then clear both IN_MODIFIED and 70 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 71 * set, then wait for the write to complete. 72 */ 73 int 74 ffs_update(vp, waitfor) 75 struct vnode *vp; 76 int waitfor; 77 { 78 register struct fs *fs; 79 struct buf *bp; 80 struct inode *ip; 81 int error; 82 83 ufs_itimes(vp); 84 ip = VTOI(vp); 85 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 86 return (0); 87 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED); 88 if (vp->v_mount->mnt_flag & MNT_RDONLY) 89 return (0); 90 fs = ip->i_fs; 91 /* 92 * Ensure that uid and gid are correct. This is a temporary 93 * fix until fsck has been changed to do the update. 94 */ 95 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 96 ip->i_din.di_ouid = ip->i_uid; /* XXX */ 97 ip->i_din.di_ogid = ip->i_gid; /* XXX */ 98 } /* XXX */ 99 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 100 (int)fs->fs_bsize, NOCRED, &bp); 101 if (error) { 102 brelse(bp); 103 return (error); 104 } 105 if (DOINGSOFTDEP(vp)) 106 softdep_update_inodeblock(ip, bp, waitfor); 107 else if (ip->i_effnlink != ip->i_nlink) 108 panic("ffs_update: bad link cnt"); 109 *((struct dinode *)bp->b_data + 110 ino_to_fsbo(fs, ip->i_number)) = ip->i_din; 111 if (waitfor && !DOINGASYNC(vp)) { 112 return (bwrite(bp)); 113 } else { 114 if (bp->b_bufsize == fs->fs_bsize) 115 bp->b_flags |= B_CLUSTEROK; 116 bdwrite(bp); 117 return (0); 118 } 119 } 120 121 #define SINGLE 0 /* index of single indirect block */ 122 #define DOUBLE 1 /* index of double indirect block */ 123 #define TRIPLE 2 /* index of triple indirect block */ 124 /* 125 * Truncate the inode oip to at most length size, freeing the 126 * disk blocks. 127 */ 128 int 129 ffs_truncate(vp, length, flags, cred, p) 130 struct vnode *vp; 131 off_t length; 132 int flags; 133 struct ucred *cred; 134 struct proc *p; 135 { 136 register struct vnode *ovp = vp; 137 ufs_daddr_t lastblock; 138 register struct inode *oip; 139 ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; 140 ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 141 register struct fs *fs; 142 struct buf *bp; 143 int offset, size, level; 144 long count, nblocks, blocksreleased = 0; 145 register int i; 146 int aflags, error, allerror; 147 off_t osize; 148 149 oip = VTOI(ovp); 150 if (oip->i_size == length) 151 return (0); 152 fs = oip->i_fs; 153 if (length < 0) 154 return (EINVAL); 155 if (length > fs->fs_maxfilesize) 156 return (EFBIG); 157 if (ovp->v_type == VLNK && 158 (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || oip->i_din.di_blocks == 0)) { 159 #ifdef DIAGNOSTIC 160 if (length != 0) 161 panic("ffs_truncate: partial truncate of symlink"); 162 #endif 163 bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); 164 oip->i_size = 0; 165 oip->i_flag |= IN_CHANGE | IN_UPDATE; 166 return (UFS_UPDATE(ovp, 1)); 167 } 168 if (oip->i_size == length) { 169 oip->i_flag |= IN_CHANGE | IN_UPDATE; 170 return (UFS_UPDATE(ovp, 0)); 171 } 172 #ifdef QUOTA 173 error = getinoquota(oip); 174 if (error) 175 return (error); 176 #endif 177 ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; 178 if (DOINGSOFTDEP(ovp)) { 179 if (length > 0) { 180 /* 181 * If a file is only partially truncated, then 182 * we have to clean up the data structures 183 * describing the allocation past the truncation 184 * point. Finding and deallocating those structures 185 * is a lot of work. Since partial truncation occurs 186 * rarely, we solve the problem by syncing the file 187 * so that it will have no data structures left. 188 */ 189 if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT, 190 p)) != 0) 191 return (error); 192 } else { 193 #ifdef QUOTA 194 (void) chkdq(oip, -oip->i_blocks, NOCRED, 0); 195 #endif 196 softdep_setup_freeblocks(oip, length); 197 vinvalbuf(ovp, 0, cred, p, 0, 0); 198 oip->i_flag |= IN_CHANGE | IN_UPDATE; 199 return (ffs_update(ovp, 0)); 200 } 201 } 202 osize = oip->i_size; 203 /* 204 * Lengthen the size of the file. We must ensure that the 205 * last byte of the file is allocated. Since the smallest 206 * value of osize is 0, length will be at least 1. 207 */ 208 if (osize < length) { 209 vnode_pager_setsize(ovp, length); 210 aflags = B_CLRBUF; 211 if (flags & IO_SYNC) 212 aflags |= B_SYNC; 213 error = VOP_BALLOC(ovp, length - 1, 1, 214 cred, aflags, &bp); 215 if (error) 216 return (error); 217 oip->i_size = length; 218 if (bp->b_bufsize == fs->fs_bsize) 219 bp->b_flags |= B_CLUSTEROK; 220 if (aflags & B_SYNC) 221 bwrite(bp); 222 else 223 bawrite(bp); 224 oip->i_flag |= IN_CHANGE | IN_UPDATE; 225 return (UFS_UPDATE(ovp, 1)); 226 } 227 /* 228 * Shorten the size of the file. If the file is not being 229 * truncated to a block boundary, the contents of the 230 * partial block following the end of the file must be 231 * zero'ed in case it ever becomes accessible again because 232 * of subsequent file growth. Directories however are not 233 * zero'ed as they should grow back initialized to empty. 234 */ 235 offset = blkoff(fs, length); 236 if (offset == 0) { 237 oip->i_size = length; 238 } else { 239 lbn = lblkno(fs, length); 240 aflags = B_CLRBUF; 241 if (flags & IO_SYNC) 242 aflags |= B_SYNC; 243 error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); 244 if (error) { 245 return (error); 246 } 247 oip->i_size = length; 248 size = blksize(fs, oip, lbn); 249 if (ovp->v_type != VDIR) 250 bzero((char *)bp->b_data + offset, 251 (u_int)(size - offset)); 252 /* Kirk's code has reallocbuf(bp, size, 1) here */ 253 allocbuf(bp, size); 254 if (bp->b_bufsize == fs->fs_bsize) 255 bp->b_flags |= B_CLUSTEROK; 256 if (aflags & B_SYNC) 257 bwrite(bp); 258 else 259 bawrite(bp); 260 } 261 /* 262 * Calculate index into inode's block list of 263 * last direct and indirect blocks (if any) 264 * which we want to keep. Lastblock is -1 when 265 * the file is truncated to 0. 266 */ 267 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 268 lastiblock[SINGLE] = lastblock - NDADDR; 269 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 270 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 271 nblocks = btodb(fs->fs_bsize); 272 /* 273 * Update file and block pointers on disk before we start freeing 274 * blocks. If we crash before free'ing blocks below, the blocks 275 * will be returned to the free list. lastiblock values are also 276 * normalized to -1 for calls to ffs_indirtrunc below. 277 */ 278 bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); 279 for (level = TRIPLE; level >= SINGLE; level--) 280 if (lastiblock[level] < 0) { 281 oip->i_ib[level] = 0; 282 lastiblock[level] = -1; 283 } 284 for (i = NDADDR - 1; i > lastblock; i--) 285 oip->i_db[i] = 0; 286 oip->i_flag |= IN_CHANGE | IN_UPDATE; 287 allerror = UFS_UPDATE(ovp, ((length > 0) ? 0 : 1)); 288 289 /* 290 * Having written the new inode to disk, save its new configuration 291 * and put back the old block pointers long enough to process them. 292 * Note that we save the new block configuration so we can check it 293 * when we are done. 294 */ 295 bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); 296 bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); 297 oip->i_size = osize; 298 299 error = vtruncbuf(ovp, cred, p, length, fs->fs_bsize); 300 if (error && (allerror == 0)) 301 allerror = error; 302 303 /* 304 * Indirect blocks first. 305 */ 306 indir_lbn[SINGLE] = -NDADDR; 307 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 308 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 309 for (level = TRIPLE; level >= SINGLE; level--) { 310 bn = oip->i_ib[level]; 311 if (bn != 0) { 312 error = ffs_indirtrunc(oip, indir_lbn[level], 313 fsbtodb(fs, bn), lastiblock[level], level, &count); 314 if (error) 315 allerror = error; 316 blocksreleased += count; 317 if (lastiblock[level] < 0) { 318 oip->i_ib[level] = 0; 319 ffs_blkfree(oip, bn, fs->fs_bsize); 320 blocksreleased += nblocks; 321 } 322 } 323 if (lastiblock[level] >= 0) 324 goto done; 325 } 326 327 /* 328 * All whole direct blocks or frags. 329 */ 330 for (i = NDADDR - 1; i > lastblock; i--) { 331 register long bsize; 332 333 bn = oip->i_db[i]; 334 if (bn == 0) 335 continue; 336 oip->i_db[i] = 0; 337 bsize = blksize(fs, oip, i); 338 ffs_blkfree(oip, bn, bsize); 339 blocksreleased += btodb(bsize); 340 } 341 if (lastblock < 0) 342 goto done; 343 344 /* 345 * Finally, look for a change in size of the 346 * last direct block; release any frags. 347 */ 348 bn = oip->i_db[lastblock]; 349 if (bn != 0) { 350 long oldspace, newspace; 351 352 /* 353 * Calculate amount of space we're giving 354 * back as old block size minus new block size. 355 */ 356 oldspace = blksize(fs, oip, lastblock); 357 oip->i_size = length; 358 newspace = blksize(fs, oip, lastblock); 359 if (newspace == 0) 360 panic("ffs_truncate: newspace"); 361 if (oldspace - newspace > 0) { 362 /* 363 * Block number of space to be free'd is 364 * the old block # plus the number of frags 365 * required for the storage we're keeping. 366 */ 367 bn += numfrags(fs, newspace); 368 ffs_blkfree(oip, bn, oldspace - newspace); 369 blocksreleased += btodb(oldspace - newspace); 370 } 371 } 372 done: 373 #ifdef DIAGNOSTIC 374 for (level = SINGLE; level <= TRIPLE; level++) 375 if (newblks[NDADDR + level] != oip->i_ib[level]) 376 panic("ffs_truncate1"); 377 for (i = 0; i < NDADDR; i++) 378 if (newblks[i] != oip->i_db[i]) 379 panic("ffs_truncate2"); 380 if (length == 0 && 381 (!TAILQ_EMPTY(&ovp->v_dirtyblkhd) || 382 !TAILQ_EMPTY(&ovp->v_cleanblkhd))) 383 panic("ffs_truncate3"); 384 #endif /* DIAGNOSTIC */ 385 /* 386 * Put back the real size. 387 */ 388 oip->i_size = length; 389 oip->i_blocks -= blocksreleased; 390 391 if (oip->i_blocks < 0) /* sanity */ 392 oip->i_blocks = 0; 393 oip->i_flag |= IN_CHANGE; 394 #ifdef QUOTA 395 (void) chkdq(oip, -blocksreleased, NOCRED, 0); 396 #endif 397 return (allerror); 398 } 399 400 /* 401 * Release blocks associated with the inode ip and stored in the indirect 402 * block bn. Blocks are free'd in LIFO order up to (but not including) 403 * lastbn. If level is greater than SINGLE, the block is an indirect block 404 * and recursive calls to indirtrunc must be used to cleanse other indirect 405 * blocks. 406 * 407 * NB: triple indirect blocks are untested. 408 */ 409 static int 410 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 411 register struct inode *ip; 412 ufs_daddr_t lbn, lastbn; 413 ufs_daddr_t dbn; 414 int level; 415 long *countp; 416 { 417 register int i; 418 struct buf *bp; 419 register struct fs *fs = ip->i_fs; 420 register ufs_daddr_t *bap; 421 struct vnode *vp; 422 ufs_daddr_t *copy = NULL, nb, nlbn, last; 423 long blkcount, factor; 424 int nblocks, blocksreleased = 0; 425 int error = 0, allerror = 0; 426 427 /* 428 * Calculate index in current block of last 429 * block to be kept. -1 indicates the entire 430 * block so we need not calculate the index. 431 */ 432 factor = 1; 433 for (i = SINGLE; i < level; i++) 434 factor *= NINDIR(fs); 435 last = lastbn; 436 if (lastbn > 0) 437 last /= factor; 438 nblocks = btodb(fs->fs_bsize); 439 /* 440 * Get buffer of block pointers, zero those entries corresponding 441 * to blocks to be free'd, and update on disk copy first. Since 442 * double(triple) indirect before single(double) indirect, calls 443 * to bmap on these blocks will fail. However, we already have 444 * the on disk address, so we have to set the b_blkno field 445 * explicitly instead of letting bread do everything for us. 446 */ 447 vp = ITOV(ip); 448 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0); 449 if ((bp->b_flags & B_CACHE) == 0) { 450 curproc->p_stats->p_ru.ru_inblock++; /* pay for read */ 451 bp->b_iocmd = BIO_READ; 452 bp->b_flags &= ~B_INVAL; 453 bp->b_ioflags &= ~BIO_ERROR; 454 if (bp->b_bcount > bp->b_bufsize) 455 panic("ffs_indirtrunc: bad buffer size"); 456 bp->b_blkno = dbn; 457 vfs_busy_pages(bp, 0); 458 BUF_STRATEGY(bp); 459 error = bufwait(bp); 460 } 461 if (error) { 462 brelse(bp); 463 *countp = 0; 464 return (error); 465 } 466 467 bap = (ufs_daddr_t *)bp->b_data; 468 if (lastbn != -1) { 469 MALLOC(copy, ufs_daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK); 470 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize); 471 bzero((caddr_t)&bap[last + 1], 472 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t)); 473 if (DOINGASYNC(vp)) { 474 bawrite(bp); 475 } else { 476 error = bwrite(bp); 477 if (error) 478 allerror = error; 479 } 480 bap = copy; 481 } 482 483 /* 484 * Recursively free totally unused blocks. 485 */ 486 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 487 i--, nlbn += factor) { 488 nb = bap[i]; 489 if (nb == 0) 490 continue; 491 if (level > SINGLE) { 492 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 493 (ufs_daddr_t)-1, level - 1, &blkcount)) != 0) 494 allerror = error; 495 blocksreleased += blkcount; 496 } 497 ffs_blkfree(ip, nb, fs->fs_bsize); 498 blocksreleased += nblocks; 499 } 500 501 /* 502 * Recursively free last partial block. 503 */ 504 if (level > SINGLE && lastbn >= 0) { 505 last = lastbn % factor; 506 nb = bap[i]; 507 if (nb != 0) { 508 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 509 last, level - 1, &blkcount); 510 if (error) 511 allerror = error; 512 blocksreleased += blkcount; 513 } 514 } 515 if (copy != NULL) { 516 FREE(copy, M_TEMP); 517 } else { 518 bp->b_flags |= B_INVAL | B_NOCACHE; 519 brelse(bp); 520 } 521 522 *countp = blocksreleased; 523 return (allerror); 524 } 525