1 /*- 2 * Copyright 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * Further information about snapshots can be obtained from: 5 * 6 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 7 * 1614 Oxford Street mckusick@mckusick.com 8 * Berkeley, CA 94709-1608 +1-510-843-9542 9 * USA 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 22 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_snapshot.c 8.11 (McKusick) 7/23/00 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_quota.h" 40 41 #include <sys/param.h> 42 #include <sys/kernel.h> 43 #include <sys/systm.h> 44 #include <sys/conf.h> 45 #include <sys/bio.h> 46 #include <sys/buf.h> 47 #include <sys/proc.h> 48 #include <sys/namei.h> 49 #include <sys/sched.h> 50 #include <sys/stat.h> 51 #include <sys/malloc.h> 52 #include <sys/mount.h> 53 #include <sys/resource.h> 54 #include <sys/resourcevar.h> 55 #include <sys/vnode.h> 56 57 #include <geom/geom.h> 58 59 #include <ufs/ufs/extattr.h> 60 #include <ufs/ufs/quota.h> 61 #include <ufs/ufs/ufsmount.h> 62 #include <ufs/ufs/inode.h> 63 #include <ufs/ufs/ufs_extern.h> 64 65 #include <ufs/ffs/fs.h> 66 #include <ufs/ffs/ffs_extern.h> 67 68 #define KERNCRED thread0.td_ucred 69 #define DEBUG 1 70 71 #include "opt_ffs.h" 72 73 #ifdef NO_FFS_SNAPSHOT 74 int 75 ffs_snapshot(mp, snapfile) 76 struct mount *mp; 77 char *snapfile; 78 { 79 return (EINVAL); 80 } 81 82 int 83 ffs_snapblkfree(fs, devvp, bno, size, inum) 84 struct fs *fs; 85 struct vnode *devvp; 86 ufs2_daddr_t bno; 87 long size; 88 ino_t inum; 89 { 90 return (EINVAL); 91 } 92 93 void 94 ffs_snapremove(vp) 95 struct vnode *vp; 96 { 97 } 98 99 void 100 ffs_snapshot_mount(mp) 101 struct mount *mp; 102 { 103 } 104 105 void 106 ffs_snapshot_unmount(mp) 107 struct mount *mp; 108 { 109 } 110 111 void 112 ffs_snapgone(ip) 113 struct inode *ip; 114 { 115 } 116 117 int 118 ffs_copyonwrite(devvp, bp) 119 struct vnode *devvp; 120 struct buf *bp; 121 { 122 return (EINVAL); 123 } 124 125 #else 126 127 TAILQ_HEAD(snaphead, inode); 128 129 struct snapdata { 130 struct snaphead sn_head; 131 daddr_t sn_listsize; 132 daddr_t *sn_blklist; 133 struct lock sn_lock; 134 }; 135 136 static int cgaccount(int, struct vnode *, struct buf *, int); 137 static int expunge_ufs1(struct vnode *, struct inode *, struct fs *, 138 int (*)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *, 139 ufs_lbn_t, int), int); 140 static int indiracct_ufs1(struct vnode *, struct vnode *, int, 141 ufs1_daddr_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, struct fs *, 142 int (*)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *, 143 ufs_lbn_t, int), int); 144 static int fullacct_ufs1(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, 145 struct fs *, ufs_lbn_t, int); 146 static int snapacct_ufs1(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, 147 struct fs *, ufs_lbn_t, int); 148 static int mapacct_ufs1(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, 149 struct fs *, ufs_lbn_t, int); 150 static int expunge_ufs2(struct vnode *, struct inode *, struct fs *, 151 int (*)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *, 152 ufs_lbn_t, int), int); 153 static int indiracct_ufs2(struct vnode *, struct vnode *, int, 154 ufs2_daddr_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, struct fs *, 155 int (*)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *, 156 ufs_lbn_t, int), int); 157 static int fullacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, 158 struct fs *, ufs_lbn_t, int); 159 static int snapacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, 160 struct fs *, ufs_lbn_t, int); 161 static int mapacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, 162 struct fs *, ufs_lbn_t, int); 163 static int readblock(struct vnode *vp, struct buf *, ufs2_daddr_t); 164 static void process_deferred_inactive(struct mount *); 165 static void try_free_snapdata(struct vnode *devvp, struct thread *td); 166 static int ffs_bp_snapblk(struct vnode *, struct buf *); 167 168 /* 169 * To ensure the consistency of snapshots across crashes, we must 170 * synchronously write out copied blocks before allowing the 171 * originals to be modified. Because of the rather severe speed 172 * penalty that this imposes, the following flag allows this 173 * crash persistence to be disabled. 174 */ 175 int dopersistence = 0; 176 177 #ifdef DEBUG 178 #include <sys/sysctl.h> 179 SYSCTL_INT(_debug, OID_AUTO, dopersistence, CTLFLAG_RW, &dopersistence, 0, ""); 180 static int snapdebug = 0; 181 SYSCTL_INT(_debug, OID_AUTO, snapdebug, CTLFLAG_RW, &snapdebug, 0, ""); 182 int collectsnapstats = 0; 183 SYSCTL_INT(_debug, OID_AUTO, collectsnapstats, CTLFLAG_RW, &collectsnapstats, 184 0, ""); 185 #endif /* DEBUG */ 186 187 /* 188 * Create a snapshot file and initialize it for the filesystem. 189 */ 190 int 191 ffs_snapshot(mp, snapfile) 192 struct mount *mp; 193 char *snapfile; 194 { 195 ufs2_daddr_t numblks, blkno, *blkp, *snapblklist; 196 int error, cg, snaploc; 197 int i, size, len, loc; 198 int flag; 199 struct timespec starttime = {0, 0}, endtime; 200 char saved_nice = 0; 201 long redo = 0, snaplistsize = 0; 202 int32_t *lp; 203 void *space; 204 struct fs *copy_fs = NULL, *fs; 205 struct thread *td = curthread; 206 struct inode *ip, *xp; 207 struct buf *bp, *nbp, *ibp, *sbp = NULL; 208 struct nameidata nd; 209 struct mount *wrtmp; 210 struct vattr vat; 211 struct vnode *vp, *xvp, *mvp, *devvp; 212 struct uio auio; 213 struct iovec aiov; 214 struct snapdata *sn; 215 struct ufsmount *ump; 216 217 ump = VFSTOUFS(mp); 218 fs = ump->um_fs; 219 sn = NULL; 220 MNT_ILOCK(mp); 221 flag = mp->mnt_flag; 222 MNT_IUNLOCK(mp); 223 224 /* 225 * Need to serialize access to snapshot code per filesystem. 226 */ 227 /* 228 * Assign a snapshot slot in the superblock. 229 */ 230 UFS_LOCK(ump); 231 for (snaploc = 0; snaploc < FSMAXSNAP; snaploc++) 232 if (fs->fs_snapinum[snaploc] == 0) 233 break; 234 UFS_UNLOCK(ump); 235 if (snaploc == FSMAXSNAP) 236 return (ENOSPC); 237 /* 238 * Create the snapshot file. 239 */ 240 restart: 241 NDINIT(&nd, CREATE, LOCKPARENT | LOCKLEAF, UIO_SYSSPACE, snapfile, td); 242 if ((error = namei(&nd)) != 0) 243 return (error); 244 if (nd.ni_vp != NULL) { 245 vput(nd.ni_vp); 246 error = EEXIST; 247 } 248 if (nd.ni_dvp->v_mount != mp) 249 error = EXDEV; 250 if (error) { 251 NDFREE(&nd, NDF_ONLY_PNBUF); 252 if (nd.ni_dvp == nd.ni_vp) 253 vrele(nd.ni_dvp); 254 else 255 vput(nd.ni_dvp); 256 return (error); 257 } 258 VATTR_NULL(&vat); 259 vat.va_type = VREG; 260 vat.va_mode = S_IRUSR; 261 vat.va_vaflags |= VA_EXCLUSIVE; 262 if (VOP_GETWRITEMOUNT(nd.ni_dvp, &wrtmp)) 263 wrtmp = NULL; 264 if (wrtmp != mp) 265 panic("ffs_snapshot: mount mismatch"); 266 vfs_rel(wrtmp); 267 if (vn_start_write(NULL, &wrtmp, V_NOWAIT) != 0) { 268 NDFREE(&nd, NDF_ONLY_PNBUF); 269 vput(nd.ni_dvp); 270 if ((error = vn_start_write(NULL, &wrtmp, 271 V_XSLEEP | PCATCH)) != 0) 272 return (error); 273 goto restart; 274 } 275 VOP_LEASE(nd.ni_dvp, td, KERNCRED, LEASE_WRITE); 276 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vat); 277 VOP_UNLOCK(nd.ni_dvp, 0); 278 if (error) { 279 NDFREE(&nd, NDF_ONLY_PNBUF); 280 vn_finished_write(wrtmp); 281 vrele(nd.ni_dvp); 282 return (error); 283 } 284 vp = nd.ni_vp; 285 vp->v_vflag |= VV_SYSTEM; 286 ip = VTOI(vp); 287 devvp = ip->i_devvp; 288 /* 289 * Allocate and copy the last block contents so as to be able 290 * to set size to that of the filesystem. 291 */ 292 numblks = howmany(fs->fs_size, fs->fs_frag); 293 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(numblks - 1)), 294 fs->fs_bsize, KERNCRED, BA_CLRBUF, &bp); 295 if (error) 296 goto out; 297 ip->i_size = lblktosize(fs, (off_t)numblks); 298 DIP_SET(ip, i_size, ip->i_size); 299 ip->i_flag |= IN_CHANGE | IN_UPDATE; 300 error = readblock(vp, bp, numblks - 1); 301 bawrite(bp); 302 if (error != 0) 303 goto out; 304 /* 305 * Preallocate critical data structures so that we can copy 306 * them in without further allocation after we suspend all 307 * operations on the filesystem. We would like to just release 308 * the allocated buffers without writing them since they will 309 * be filled in below once we are ready to go, but this upsets 310 * the soft update code, so we go ahead and write the new buffers. 311 * 312 * Allocate all indirect blocks and mark all of them as not 313 * needing to be copied. 314 */ 315 for (blkno = NDADDR; blkno < numblks; blkno += NINDIR(fs)) { 316 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)blkno), 317 fs->fs_bsize, td->td_ucred, BA_METAONLY, &ibp); 318 if (error) 319 goto out; 320 bawrite(ibp); 321 } 322 /* 323 * Allocate copies for the superblock and its summary information. 324 */ 325 error = UFS_BALLOC(vp, fs->fs_sblockloc, fs->fs_sbsize, KERNCRED, 326 0, &nbp); 327 if (error) 328 goto out; 329 bawrite(nbp); 330 blkno = fragstoblks(fs, fs->fs_csaddr); 331 len = howmany(fs->fs_cssize, fs->fs_bsize); 332 for (loc = 0; loc < len; loc++) { 333 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(blkno + loc)), 334 fs->fs_bsize, KERNCRED, 0, &nbp); 335 if (error) 336 goto out; 337 bawrite(nbp); 338 } 339 /* 340 * Allocate all cylinder group blocks. 341 */ 342 for (cg = 0; cg < fs->fs_ncg; cg++) { 343 error = UFS_BALLOC(vp, lfragtosize(fs, cgtod(fs, cg)), 344 fs->fs_bsize, KERNCRED, 0, &nbp); 345 if (error) 346 goto out; 347 bawrite(nbp); 348 } 349 /* 350 * Copy all the cylinder group maps. Although the 351 * filesystem is still active, we hope that only a few 352 * cylinder groups will change between now and when we 353 * suspend operations. Thus, we will be able to quickly 354 * touch up the few cylinder groups that changed during 355 * the suspension period. 356 */ 357 len = howmany(fs->fs_ncg, NBBY); 358 MALLOC(space, void *, len, M_DEVBUF, M_WAITOK|M_ZERO); 359 UFS_LOCK(ump); 360 fs->fs_active = space; 361 UFS_UNLOCK(ump); 362 for (cg = 0; cg < fs->fs_ncg; cg++) { 363 error = UFS_BALLOC(vp, lfragtosize(fs, cgtod(fs, cg)), 364 fs->fs_bsize, KERNCRED, 0, &nbp); 365 if (error) 366 goto out; 367 error = cgaccount(cg, vp, nbp, 1); 368 bawrite(nbp); 369 if (error) 370 goto out; 371 } 372 /* 373 * Change inode to snapshot type file. 374 */ 375 ip->i_flags |= SF_SNAPSHOT; 376 DIP_SET(ip, i_flags, ip->i_flags); 377 ip->i_flag |= IN_CHANGE | IN_UPDATE; 378 /* 379 * Ensure that the snapshot is completely on disk. 380 * Since we have marked it as a snapshot it is safe to 381 * unlock it as no process will be allowed to write to it. 382 */ 383 if ((error = ffs_syncvnode(vp, MNT_WAIT)) != 0) 384 goto out; 385 VOP_UNLOCK(vp, 0); 386 /* 387 * All allocations are done, so we can now snapshot the system. 388 * 389 * Recind nice scheduling while running with the filesystem suspended. 390 */ 391 if (td->td_proc->p_nice > 0) { 392 struct proc *p; 393 394 p = td->td_proc; 395 PROC_LOCK(p); 396 saved_nice = p->p_nice; 397 sched_nice(p, 0); 398 PROC_UNLOCK(p); 399 } 400 /* 401 * Suspend operation on filesystem. 402 */ 403 for (;;) { 404 vn_finished_write(wrtmp); 405 if ((error = vfs_write_suspend(vp->v_mount)) != 0) { 406 vn_start_write(NULL, &wrtmp, V_WAIT); 407 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 408 goto out; 409 } 410 if (mp->mnt_kern_flag & MNTK_SUSPENDED) 411 break; 412 vn_start_write(NULL, &wrtmp, V_WAIT); 413 } 414 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 415 if (ip->i_effnlink == 0) { 416 error = ENOENT; /* Snapshot file unlinked */ 417 goto out1; 418 } 419 if (collectsnapstats) 420 nanotime(&starttime); 421 422 /* The last block might have changed. Copy it again to be sure. */ 423 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(numblks - 1)), 424 fs->fs_bsize, KERNCRED, BA_CLRBUF, &bp); 425 if (error != 0) 426 goto out1; 427 error = readblock(vp, bp, numblks - 1); 428 bp->b_flags |= B_VALIDSUSPWRT; 429 bawrite(bp); 430 if (error != 0) 431 goto out1; 432 /* 433 * First, copy all the cylinder group maps that have changed. 434 */ 435 for (cg = 0; cg < fs->fs_ncg; cg++) { 436 if ((ACTIVECGNUM(fs, cg) & ACTIVECGOFF(cg)) != 0) 437 continue; 438 redo++; 439 error = UFS_BALLOC(vp, lfragtosize(fs, cgtod(fs, cg)), 440 fs->fs_bsize, KERNCRED, 0, &nbp); 441 if (error) 442 goto out1; 443 error = cgaccount(cg, vp, nbp, 2); 444 bawrite(nbp); 445 if (error) 446 goto out1; 447 } 448 /* 449 * Grab a copy of the superblock and its summary information. 450 * We delay writing it until the suspension is released below. 451 */ 452 error = bread(vp, lblkno(fs, fs->fs_sblockloc), fs->fs_bsize, 453 KERNCRED, &sbp); 454 if (error) { 455 brelse(sbp); 456 sbp = NULL; 457 goto out1; 458 } 459 loc = blkoff(fs, fs->fs_sblockloc); 460 copy_fs = (struct fs *)(sbp->b_data + loc); 461 bcopy(fs, copy_fs, fs->fs_sbsize); 462 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 463 copy_fs->fs_clean = 1; 464 size = fs->fs_bsize < SBLOCKSIZE ? fs->fs_bsize : SBLOCKSIZE; 465 if (fs->fs_sbsize < size) 466 bzero(&sbp->b_data[loc + fs->fs_sbsize], size - fs->fs_sbsize); 467 size = blkroundup(fs, fs->fs_cssize); 468 if (fs->fs_contigsumsize > 0) 469 size += fs->fs_ncg * sizeof(int32_t); 470 space = malloc((u_long)size, M_UFSMNT, M_WAITOK); 471 copy_fs->fs_csp = space; 472 bcopy(fs->fs_csp, copy_fs->fs_csp, fs->fs_cssize); 473 space = (char *)space + fs->fs_cssize; 474 loc = howmany(fs->fs_cssize, fs->fs_fsize); 475 i = fs->fs_frag - loc % fs->fs_frag; 476 len = (i == fs->fs_frag) ? 0 : i * fs->fs_fsize; 477 if (len > 0) { 478 if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + loc), 479 len, KERNCRED, &bp)) != 0) { 480 brelse(bp); 481 free(copy_fs->fs_csp, M_UFSMNT); 482 bawrite(sbp); 483 sbp = NULL; 484 goto out1; 485 } 486 bcopy(bp->b_data, space, (u_int)len); 487 space = (char *)space + len; 488 bp->b_flags |= B_INVAL | B_NOCACHE; 489 brelse(bp); 490 } 491 if (fs->fs_contigsumsize > 0) { 492 copy_fs->fs_maxcluster = lp = space; 493 for (i = 0; i < fs->fs_ncg; i++) 494 *lp++ = fs->fs_contigsumsize; 495 } 496 /* 497 * We must check for active files that have been unlinked 498 * (e.g., with a zero link count). We have to expunge all 499 * trace of these files from the snapshot so that they are 500 * not reclaimed prematurely by fsck or unnecessarily dumped. 501 * We turn off the MNTK_SUSPENDED flag to avoid a panic from 502 * spec_strategy about writing on a suspended filesystem. 503 * Note that we skip unlinked snapshot files as they will 504 * be handled separately below. 505 * 506 * We also calculate the needed size for the snapshot list. 507 */ 508 snaplistsize = fs->fs_ncg + howmany(fs->fs_cssize, fs->fs_bsize) + 509 FSMAXSNAP + 1 /* superblock */ + 1 /* last block */ + 1 /* size */; 510 MNT_ILOCK(mp); 511 mp->mnt_kern_flag &= ~MNTK_SUSPENDED; 512 loop: 513 MNT_VNODE_FOREACH(xvp, mp, mvp) { 514 VI_LOCK(xvp); 515 MNT_IUNLOCK(mp); 516 if ((xvp->v_iflag & VI_DOOMED) || 517 (xvp->v_usecount == 0 && 518 (xvp->v_iflag & (VI_OWEINACT | VI_DOINGINACT)) == 0) || 519 xvp->v_type == VNON || 520 (VTOI(xvp)->i_flags & SF_SNAPSHOT)) { 521 VI_UNLOCK(xvp); 522 MNT_ILOCK(mp); 523 continue; 524 } 525 /* 526 * We can skip parent directory vnode because it must have 527 * this snapshot file in it. 528 */ 529 if (xvp == nd.ni_dvp) { 530 VI_UNLOCK(xvp); 531 MNT_ILOCK(mp); 532 continue; 533 } 534 vholdl(xvp); 535 if (vn_lock(xvp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) { 536 MNT_ILOCK(mp); 537 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); 538 vdrop(xvp); 539 goto loop; 540 } 541 VI_LOCK(xvp); 542 if (xvp->v_usecount == 0 && 543 (xvp->v_iflag & (VI_OWEINACT | VI_DOINGINACT)) == 0) { 544 VI_UNLOCK(xvp); 545 VOP_UNLOCK(xvp, 0); 546 vdrop(xvp); 547 MNT_ILOCK(mp); 548 continue; 549 } 550 VI_UNLOCK(xvp); 551 if (snapdebug) 552 vprint("ffs_snapshot: busy vnode", xvp); 553 if (VOP_GETATTR(xvp, &vat, td->td_ucred, td) == 0 && 554 vat.va_nlink > 0) { 555 VOP_UNLOCK(xvp, 0); 556 vdrop(xvp); 557 MNT_ILOCK(mp); 558 continue; 559 } 560 xp = VTOI(xvp); 561 if (ffs_checkfreefile(copy_fs, vp, xp->i_number)) { 562 VOP_UNLOCK(xvp, 0); 563 vdrop(xvp); 564 MNT_ILOCK(mp); 565 continue; 566 } 567 /* 568 * If there is a fragment, clear it here. 569 */ 570 blkno = 0; 571 loc = howmany(xp->i_size, fs->fs_bsize) - 1; 572 if (loc < NDADDR) { 573 len = fragroundup(fs, blkoff(fs, xp->i_size)); 574 if (len != 0 && len < fs->fs_bsize) { 575 ffs_blkfree(ump, copy_fs, vp, 576 DIP(xp, i_db[loc]), len, xp->i_number); 577 blkno = DIP(xp, i_db[loc]); 578 DIP_SET(xp, i_db[loc], 0); 579 } 580 } 581 snaplistsize += 1; 582 if (xp->i_ump->um_fstype == UFS1) 583 error = expunge_ufs1(vp, xp, copy_fs, fullacct_ufs1, 584 BLK_NOCOPY); 585 else 586 error = expunge_ufs2(vp, xp, copy_fs, fullacct_ufs2, 587 BLK_NOCOPY); 588 if (blkno) 589 DIP_SET(xp, i_db[loc], blkno); 590 if (!error) 591 error = ffs_freefile(ump, copy_fs, vp, xp->i_number, 592 xp->i_mode); 593 VOP_UNLOCK(xvp, 0); 594 vdrop(xvp); 595 if (error) { 596 free(copy_fs->fs_csp, M_UFSMNT); 597 bawrite(sbp); 598 sbp = NULL; 599 MNT_VNODE_FOREACH_ABORT(mp, mvp); 600 goto out1; 601 } 602 MNT_ILOCK(mp); 603 } 604 MNT_IUNLOCK(mp); 605 /* 606 * If there already exist snapshots on this filesystem, grab a 607 * reference to their shared lock. If this is the first snapshot 608 * on this filesystem, we need to allocate a lock for the snapshots 609 * to share. In either case, acquire the snapshot lock and give 610 * up our original private lock. 611 */ 612 VI_LOCK(devvp); 613 sn = devvp->v_rdev->si_snapdata; 614 if (sn != NULL) { 615 xp = TAILQ_FIRST(&sn->sn_head); 616 VI_UNLOCK(devvp); 617 VI_LOCK(vp); 618 vp->v_vnlock = &sn->sn_lock; 619 } else { 620 VI_UNLOCK(devvp); 621 sn = malloc(sizeof *sn, M_UFSMNT, M_WAITOK | M_ZERO); 622 TAILQ_INIT(&sn->sn_head); 623 lockinit(&sn->sn_lock, PVFS, "snaplk", VLKTIMEOUT, 624 LK_CANRECURSE | LK_NOSHARE); 625 VI_LOCK(vp); 626 vp->v_vnlock = &sn->sn_lock; 627 mp_fixme("si_snapdata setting is racey."); 628 devvp->v_rdev->si_snapdata = sn; 629 xp = NULL; 630 } 631 lockmgr(vp->v_vnlock, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY, 632 VI_MTX(vp)); 633 lockmgr(&vp->v_lock, LK_RELEASE, NULL); 634 /* 635 * If this is the first snapshot on this filesystem, then we need 636 * to allocate the space for the list of preallocated snapshot blocks. 637 * This list will be refined below, but this preliminary one will 638 * keep us out of deadlock until the full one is ready. 639 */ 640 if (xp == NULL) { 641 MALLOC(snapblklist, daddr_t *, snaplistsize * sizeof(daddr_t), 642 M_UFSMNT, M_WAITOK); 643 blkp = &snapblklist[1]; 644 *blkp++ = lblkno(fs, fs->fs_sblockloc); 645 blkno = fragstoblks(fs, fs->fs_csaddr); 646 for (cg = 0; cg < fs->fs_ncg; cg++) { 647 if (fragstoblks(fs, cgtod(fs, cg) > blkno)) 648 break; 649 *blkp++ = fragstoblks(fs, cgtod(fs, cg)); 650 } 651 len = howmany(fs->fs_cssize, fs->fs_bsize); 652 for (loc = 0; loc < len; loc++) 653 *blkp++ = blkno + loc; 654 for (; cg < fs->fs_ncg; cg++) 655 *blkp++ = fragstoblks(fs, cgtod(fs, cg)); 656 snapblklist[0] = blkp - snapblklist; 657 VI_LOCK(devvp); 658 if (sn->sn_blklist != NULL) 659 panic("ffs_snapshot: non-empty list"); 660 sn->sn_blklist = snapblklist; 661 sn->sn_listsize = blkp - snapblklist; 662 VI_UNLOCK(devvp); 663 } 664 /* 665 * Record snapshot inode. Since this is the newest snapshot, 666 * it must be placed at the end of the list. 667 */ 668 VI_LOCK(devvp); 669 fs->fs_snapinum[snaploc] = ip->i_number; 670 if (ip->i_nextsnap.tqe_prev != 0) 671 panic("ffs_snapshot: %d already on list", ip->i_number); 672 TAILQ_INSERT_TAIL(&sn->sn_head, ip, i_nextsnap); 673 devvp->v_vflag |= VV_COPYONWRITE; 674 VI_UNLOCK(devvp); 675 ASSERT_VOP_LOCKED(vp, "ffs_snapshot vp"); 676 out1: 677 KASSERT((sn != NULL && sbp != NULL && error == 0) || 678 (sn == NULL && sbp == NULL && error != 0), 679 ("email phk@ and mckusick@")); 680 /* 681 * Resume operation on filesystem. 682 */ 683 vfs_write_resume(vp->v_mount); 684 vn_start_write(NULL, &wrtmp, V_WAIT); 685 if (collectsnapstats && starttime.tv_sec > 0) { 686 nanotime(&endtime); 687 timespecsub(&endtime, &starttime); 688 printf("%s: suspended %ld.%03ld sec, redo %ld of %d\n", 689 vp->v_mount->mnt_stat.f_mntonname, (long)endtime.tv_sec, 690 endtime.tv_nsec / 1000000, redo, fs->fs_ncg); 691 } 692 if (sbp == NULL) 693 goto out; 694 /* 695 * Copy allocation information from all the snapshots in 696 * this snapshot and then expunge them from its view. 697 */ 698 TAILQ_FOREACH(xp, &sn->sn_head, i_nextsnap) { 699 if (xp == ip) 700 break; 701 if (xp->i_ump->um_fstype == UFS1) 702 error = expunge_ufs1(vp, xp, fs, snapacct_ufs1, 703 BLK_SNAP); 704 else 705 error = expunge_ufs2(vp, xp, fs, snapacct_ufs2, 706 BLK_SNAP); 707 if (error == 0 && xp->i_effnlink == 0) { 708 error = ffs_freefile(ump, 709 copy_fs, 710 vp, 711 xp->i_number, 712 xp->i_mode); 713 } 714 if (error) { 715 fs->fs_snapinum[snaploc] = 0; 716 goto done; 717 } 718 } 719 /* 720 * Allocate space for the full list of preallocated snapshot blocks. 721 */ 722 MALLOC(snapblklist, daddr_t *, snaplistsize * sizeof(daddr_t), 723 M_UFSMNT, M_WAITOK); 724 ip->i_snapblklist = &snapblklist[1]; 725 /* 726 * Expunge the blocks used by the snapshots from the set of 727 * blocks marked as used in the snapshot bitmaps. Also, collect 728 * the list of allocated blocks in i_snapblklist. 729 */ 730 if (ip->i_ump->um_fstype == UFS1) 731 error = expunge_ufs1(vp, ip, copy_fs, mapacct_ufs1, BLK_SNAP); 732 else 733 error = expunge_ufs2(vp, ip, copy_fs, mapacct_ufs2, BLK_SNAP); 734 if (error) { 735 fs->fs_snapinum[snaploc] = 0; 736 FREE(snapblklist, M_UFSMNT); 737 goto done; 738 } 739 if (snaplistsize < ip->i_snapblklist - snapblklist) 740 panic("ffs_snapshot: list too small"); 741 snaplistsize = ip->i_snapblklist - snapblklist; 742 snapblklist[0] = snaplistsize; 743 ip->i_snapblklist = 0; 744 /* 745 * Write out the list of allocated blocks to the end of the snapshot. 746 */ 747 auio.uio_iov = &aiov; 748 auio.uio_iovcnt = 1; 749 aiov.iov_base = (void *)snapblklist; 750 aiov.iov_len = snaplistsize * sizeof(daddr_t); 751 auio.uio_resid = aiov.iov_len;; 752 auio.uio_offset = ip->i_size; 753 auio.uio_segflg = UIO_SYSSPACE; 754 auio.uio_rw = UIO_WRITE; 755 auio.uio_td = td; 756 if ((error = VOP_WRITE(vp, &auio, IO_UNIT, td->td_ucred)) != 0) { 757 fs->fs_snapinum[snaploc] = 0; 758 FREE(snapblklist, M_UFSMNT); 759 goto done; 760 } 761 /* 762 * Write the superblock and its summary information 763 * to the snapshot. 764 */ 765 blkno = fragstoblks(fs, fs->fs_csaddr); 766 len = howmany(fs->fs_cssize, fs->fs_bsize); 767 space = copy_fs->fs_csp; 768 for (loc = 0; loc < len; loc++) { 769 error = bread(vp, blkno + loc, fs->fs_bsize, KERNCRED, &nbp); 770 if (error) { 771 brelse(nbp); 772 fs->fs_snapinum[snaploc] = 0; 773 FREE(snapblklist, M_UFSMNT); 774 goto done; 775 } 776 bcopy(space, nbp->b_data, fs->fs_bsize); 777 space = (char *)space + fs->fs_bsize; 778 bawrite(nbp); 779 } 780 /* 781 * As this is the newest list, it is the most inclusive, so 782 * should replace the previous list. 783 */ 784 VI_LOCK(devvp); 785 space = sn->sn_blklist; 786 sn->sn_blklist = snapblklist; 787 sn->sn_listsize = snaplistsize; 788 VI_UNLOCK(devvp); 789 if (space != NULL) 790 FREE(space, M_UFSMNT); 791 /* 792 * If another process is currently writing the buffer containing 793 * the inode for this snapshot then a deadlock can occur. Drop 794 * the snapshot lock until the buffer has been written. 795 */ 796 VREF(vp); /* Protect against ffs_snapgone() */ 797 VOP_UNLOCK(vp, 0); 798 (void) bread(ip->i_devvp, 799 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 800 (int) fs->fs_bsize, NOCRED, &nbp); 801 brelse(nbp); 802 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 803 if (ip->i_effnlink == 0) 804 error = ENOENT; /* Snapshot file unlinked */ 805 else 806 vrele(vp); /* Drop extra reference */ 807 done: 808 FREE(copy_fs->fs_csp, M_UFSMNT); 809 bawrite(sbp); 810 out: 811 NDFREE(&nd, NDF_ONLY_PNBUF); 812 if (saved_nice > 0) { 813 struct proc *p; 814 815 p = td->td_proc; 816 PROC_LOCK(p); 817 sched_nice(td->td_proc, saved_nice); 818 PROC_UNLOCK(td->td_proc); 819 } 820 UFS_LOCK(ump); 821 if (fs->fs_active != 0) { 822 FREE(fs->fs_active, M_DEVBUF); 823 fs->fs_active = 0; 824 } 825 UFS_UNLOCK(ump); 826 MNT_ILOCK(mp); 827 mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA); 828 MNT_IUNLOCK(mp); 829 if (error) 830 (void) ffs_truncate(vp, (off_t)0, 0, NOCRED, td); 831 (void) ffs_syncvnode(vp, MNT_WAIT); 832 if (error) 833 vput(vp); 834 else 835 VOP_UNLOCK(vp, 0); 836 vrele(nd.ni_dvp); 837 vn_finished_write(wrtmp); 838 process_deferred_inactive(mp); 839 return (error); 840 } 841 842 /* 843 * Copy a cylinder group map. All the unallocated blocks are marked 844 * BLK_NOCOPY so that the snapshot knows that it need not copy them 845 * if they are later written. If passno is one, then this is a first 846 * pass, so only setting needs to be done. If passno is 2, then this 847 * is a revision to a previous pass which must be undone as the 848 * replacement pass is done. 849 */ 850 static int 851 cgaccount(cg, vp, nbp, passno) 852 int cg; 853 struct vnode *vp; 854 struct buf *nbp; 855 int passno; 856 { 857 struct buf *bp, *ibp; 858 struct inode *ip; 859 struct cg *cgp; 860 struct fs *fs; 861 ufs2_daddr_t base, numblks; 862 int error, len, loc, indiroff; 863 864 ip = VTOI(vp); 865 fs = ip->i_fs; 866 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 867 (int)fs->fs_cgsize, KERNCRED, &bp); 868 if (error) { 869 brelse(bp); 870 return (error); 871 } 872 cgp = (struct cg *)bp->b_data; 873 if (!cg_chkmagic(cgp)) { 874 brelse(bp); 875 return (EIO); 876 } 877 UFS_LOCK(ip->i_ump); 878 ACTIVESET(fs, cg); 879 UFS_UNLOCK(ip->i_ump); 880 bcopy(bp->b_data, nbp->b_data, fs->fs_cgsize); 881 if (fs->fs_cgsize < fs->fs_bsize) 882 bzero(&nbp->b_data[fs->fs_cgsize], 883 fs->fs_bsize - fs->fs_cgsize); 884 cgp = (struct cg *)nbp->b_data; 885 bqrelse(bp); 886 if (passno == 2) 887 nbp->b_flags |= B_VALIDSUSPWRT; 888 numblks = howmany(fs->fs_size, fs->fs_frag); 889 len = howmany(fs->fs_fpg, fs->fs_frag); 890 base = cgbase(fs, cg) / fs->fs_frag; 891 if (base + len >= numblks) 892 len = numblks - base - 1; 893 loc = 0; 894 if (base < NDADDR) { 895 for ( ; loc < NDADDR; loc++) { 896 if (ffs_isblock(fs, cg_blksfree(cgp), loc)) 897 DIP_SET(ip, i_db[loc], BLK_NOCOPY); 898 else if (passno == 2 && DIP(ip, i_db[loc])== BLK_NOCOPY) 899 DIP_SET(ip, i_db[loc], 0); 900 else if (passno == 1 && DIP(ip, i_db[loc])== BLK_NOCOPY) 901 panic("ffs_snapshot: lost direct block"); 902 } 903 } 904 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(base + loc)), 905 fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); 906 if (error) { 907 return (error); 908 } 909 indiroff = (base + loc - NDADDR) % NINDIR(fs); 910 for ( ; loc < len; loc++, indiroff++) { 911 if (indiroff >= NINDIR(fs)) { 912 if (passno == 2) 913 ibp->b_flags |= B_VALIDSUSPWRT; 914 bawrite(ibp); 915 error = UFS_BALLOC(vp, 916 lblktosize(fs, (off_t)(base + loc)), 917 fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); 918 if (error) { 919 return (error); 920 } 921 indiroff = 0; 922 } 923 if (ip->i_ump->um_fstype == UFS1) { 924 if (ffs_isblock(fs, cg_blksfree(cgp), loc)) 925 ((ufs1_daddr_t *)(ibp->b_data))[indiroff] = 926 BLK_NOCOPY; 927 else if (passno == 2 && ((ufs1_daddr_t *)(ibp->b_data)) 928 [indiroff] == BLK_NOCOPY) 929 ((ufs1_daddr_t *)(ibp->b_data))[indiroff] = 0; 930 else if (passno == 1 && ((ufs1_daddr_t *)(ibp->b_data)) 931 [indiroff] == BLK_NOCOPY) 932 panic("ffs_snapshot: lost indirect block"); 933 continue; 934 } 935 if (ffs_isblock(fs, cg_blksfree(cgp), loc)) 936 ((ufs2_daddr_t *)(ibp->b_data))[indiroff] = BLK_NOCOPY; 937 else if (passno == 2 && 938 ((ufs2_daddr_t *)(ibp->b_data)) [indiroff] == BLK_NOCOPY) 939 ((ufs2_daddr_t *)(ibp->b_data))[indiroff] = 0; 940 else if (passno == 1 && 941 ((ufs2_daddr_t *)(ibp->b_data)) [indiroff] == BLK_NOCOPY) 942 panic("ffs_snapshot: lost indirect block"); 943 } 944 if (passno == 2) 945 ibp->b_flags |= B_VALIDSUSPWRT; 946 bdwrite(ibp); 947 return (0); 948 } 949 950 /* 951 * Before expunging a snapshot inode, note all the 952 * blocks that it claims with BLK_SNAP so that fsck will 953 * be able to account for those blocks properly and so 954 * that this snapshot knows that it need not copy them 955 * if the other snapshot holding them is freed. This code 956 * is reproduced once each for UFS1 and UFS2. 957 */ 958 static int 959 expunge_ufs1(snapvp, cancelip, fs, acctfunc, expungetype) 960 struct vnode *snapvp; 961 struct inode *cancelip; 962 struct fs *fs; 963 int (*acctfunc)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, 964 struct fs *, ufs_lbn_t, int); 965 int expungetype; 966 { 967 int i, error, indiroff; 968 ufs_lbn_t lbn, rlbn; 969 ufs2_daddr_t len, blkno, numblks, blksperindir; 970 struct ufs1_dinode *dip; 971 struct thread *td = curthread; 972 struct buf *bp; 973 974 /* 975 * Prepare to expunge the inode. If its inode block has not 976 * yet been copied, then allocate and fill the copy. 977 */ 978 lbn = fragstoblks(fs, ino_to_fsba(fs, cancelip->i_number)); 979 blkno = 0; 980 if (lbn < NDADDR) { 981 blkno = VTOI(snapvp)->i_din1->di_db[lbn]; 982 } else { 983 td->td_pflags |= TDP_COWINPROGRESS; 984 error = ffs_balloc_ufs1(snapvp, lblktosize(fs, (off_t)lbn), 985 fs->fs_bsize, KERNCRED, BA_METAONLY, &bp); 986 td->td_pflags &= ~TDP_COWINPROGRESS; 987 if (error) 988 return (error); 989 indiroff = (lbn - NDADDR) % NINDIR(fs); 990 blkno = ((ufs1_daddr_t *)(bp->b_data))[indiroff]; 991 bqrelse(bp); 992 } 993 if (blkno != 0) { 994 if ((error = bread(snapvp, lbn, fs->fs_bsize, KERNCRED, &bp))) 995 return (error); 996 } else { 997 error = ffs_balloc_ufs1(snapvp, lblktosize(fs, (off_t)lbn), 998 fs->fs_bsize, KERNCRED, 0, &bp); 999 if (error) 1000 return (error); 1001 if ((error = readblock(snapvp, bp, lbn)) != 0) 1002 return (error); 1003 } 1004 /* 1005 * Set a snapshot inode to be a zero length file, regular files 1006 * or unlinked snapshots to be completely unallocated. 1007 */ 1008 dip = (struct ufs1_dinode *)bp->b_data + 1009 ino_to_fsbo(fs, cancelip->i_number); 1010 if (expungetype == BLK_NOCOPY || cancelip->i_effnlink == 0) 1011 dip->di_mode = 0; 1012 dip->di_size = 0; 1013 dip->di_blocks = 0; 1014 dip->di_flags &= ~SF_SNAPSHOT; 1015 bzero(&dip->di_db[0], (NDADDR + NIADDR) * sizeof(ufs1_daddr_t)); 1016 bdwrite(bp); 1017 /* 1018 * Now go through and expunge all the blocks in the file 1019 * using the function requested. 1020 */ 1021 numblks = howmany(cancelip->i_size, fs->fs_bsize); 1022 if ((error = (*acctfunc)(snapvp, &cancelip->i_din1->di_db[0], 1023 &cancelip->i_din1->di_db[NDADDR], fs, 0, expungetype))) 1024 return (error); 1025 if ((error = (*acctfunc)(snapvp, &cancelip->i_din1->di_ib[0], 1026 &cancelip->i_din1->di_ib[NIADDR], fs, -1, expungetype))) 1027 return (error); 1028 blksperindir = 1; 1029 lbn = -NDADDR; 1030 len = numblks - NDADDR; 1031 rlbn = NDADDR; 1032 for (i = 0; len > 0 && i < NIADDR; i++) { 1033 error = indiracct_ufs1(snapvp, ITOV(cancelip), i, 1034 cancelip->i_din1->di_ib[i], lbn, rlbn, len, 1035 blksperindir, fs, acctfunc, expungetype); 1036 if (error) 1037 return (error); 1038 blksperindir *= NINDIR(fs); 1039 lbn -= blksperindir + 1; 1040 len -= blksperindir; 1041 rlbn += blksperindir; 1042 } 1043 return (0); 1044 } 1045 1046 /* 1047 * Descend an indirect block chain for vnode cancelvp accounting for all 1048 * its indirect blocks in snapvp. 1049 */ 1050 static int 1051 indiracct_ufs1(snapvp, cancelvp, level, blkno, lbn, rlbn, remblks, 1052 blksperindir, fs, acctfunc, expungetype) 1053 struct vnode *snapvp; 1054 struct vnode *cancelvp; 1055 int level; 1056 ufs1_daddr_t blkno; 1057 ufs_lbn_t lbn; 1058 ufs_lbn_t rlbn; 1059 ufs_lbn_t remblks; 1060 ufs_lbn_t blksperindir; 1061 struct fs *fs; 1062 int (*acctfunc)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, 1063 struct fs *, ufs_lbn_t, int); 1064 int expungetype; 1065 { 1066 int error, num, i; 1067 ufs_lbn_t subblksperindir; 1068 struct indir indirs[NIADDR + 2]; 1069 ufs1_daddr_t last, *bap; 1070 struct buf *bp; 1071 1072 if (blkno == 0) { 1073 if (expungetype == BLK_NOCOPY) 1074 return (0); 1075 panic("indiracct_ufs1: missing indir"); 1076 } 1077 if ((error = ufs_getlbns(cancelvp, rlbn, indirs, &num)) != 0) 1078 return (error); 1079 if (lbn != indirs[num - 1 - level].in_lbn || num < 2) 1080 panic("indiracct_ufs1: botched params"); 1081 /* 1082 * We have to expand bread here since it will deadlock looking 1083 * up the block number for any blocks that are not in the cache. 1084 */ 1085 bp = getblk(cancelvp, lbn, fs->fs_bsize, 0, 0, 0); 1086 bp->b_blkno = fsbtodb(fs, blkno); 1087 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0 && 1088 (error = readblock(cancelvp, bp, fragstoblks(fs, blkno)))) { 1089 brelse(bp); 1090 return (error); 1091 } 1092 /* 1093 * Account for the block pointers in this indirect block. 1094 */ 1095 last = howmany(remblks, blksperindir); 1096 if (last > NINDIR(fs)) 1097 last = NINDIR(fs); 1098 MALLOC(bap, ufs1_daddr_t *, fs->fs_bsize, M_DEVBUF, M_WAITOK); 1099 bcopy(bp->b_data, (caddr_t)bap, fs->fs_bsize); 1100 bqrelse(bp); 1101 error = (*acctfunc)(snapvp, &bap[0], &bap[last], fs, 1102 level == 0 ? rlbn : -1, expungetype); 1103 if (error || level == 0) 1104 goto out; 1105 /* 1106 * Account for the block pointers in each of the indirect blocks 1107 * in the levels below us. 1108 */ 1109 subblksperindir = blksperindir / NINDIR(fs); 1110 for (lbn++, level--, i = 0; i < last; i++) { 1111 error = indiracct_ufs1(snapvp, cancelvp, level, bap[i], lbn, 1112 rlbn, remblks, subblksperindir, fs, acctfunc, expungetype); 1113 if (error) 1114 goto out; 1115 rlbn += blksperindir; 1116 lbn -= blksperindir; 1117 remblks -= blksperindir; 1118 } 1119 out: 1120 FREE(bap, M_DEVBUF); 1121 return (error); 1122 } 1123 1124 /* 1125 * Do both snap accounting and map accounting. 1126 */ 1127 static int 1128 fullacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, exptype) 1129 struct vnode *vp; 1130 ufs1_daddr_t *oldblkp, *lastblkp; 1131 struct fs *fs; 1132 ufs_lbn_t lblkno; 1133 int exptype; /* BLK_SNAP or BLK_NOCOPY */ 1134 { 1135 int error; 1136 1137 if ((error = snapacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, exptype))) 1138 return (error); 1139 return (mapacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, exptype)); 1140 } 1141 1142 /* 1143 * Identify a set of blocks allocated in a snapshot inode. 1144 */ 1145 static int 1146 snapacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, expungetype) 1147 struct vnode *vp; 1148 ufs1_daddr_t *oldblkp, *lastblkp; 1149 struct fs *fs; 1150 ufs_lbn_t lblkno; 1151 int expungetype; /* BLK_SNAP or BLK_NOCOPY */ 1152 { 1153 struct inode *ip = VTOI(vp); 1154 ufs1_daddr_t blkno, *blkp; 1155 ufs_lbn_t lbn; 1156 struct buf *ibp; 1157 int error; 1158 1159 for ( ; oldblkp < lastblkp; oldblkp++) { 1160 blkno = *oldblkp; 1161 if (blkno == 0 || blkno == BLK_NOCOPY || blkno == BLK_SNAP) 1162 continue; 1163 lbn = fragstoblks(fs, blkno); 1164 if (lbn < NDADDR) { 1165 blkp = &ip->i_din1->di_db[lbn]; 1166 ip->i_flag |= IN_CHANGE | IN_UPDATE; 1167 } else { 1168 error = ffs_balloc_ufs1(vp, lblktosize(fs, (off_t)lbn), 1169 fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); 1170 if (error) 1171 return (error); 1172 blkp = &((ufs1_daddr_t *)(ibp->b_data)) 1173 [(lbn - NDADDR) % NINDIR(fs)]; 1174 } 1175 /* 1176 * If we are expunging a snapshot vnode and we 1177 * find a block marked BLK_NOCOPY, then it is 1178 * one that has been allocated to this snapshot after 1179 * we took our current snapshot and can be ignored. 1180 */ 1181 if (expungetype == BLK_SNAP && *blkp == BLK_NOCOPY) { 1182 if (lbn >= NDADDR) 1183 brelse(ibp); 1184 } else { 1185 if (*blkp != 0) 1186 panic("snapacct_ufs1: bad block"); 1187 *blkp = expungetype; 1188 if (lbn >= NDADDR) 1189 bdwrite(ibp); 1190 } 1191 } 1192 return (0); 1193 } 1194 1195 /* 1196 * Account for a set of blocks allocated in a snapshot inode. 1197 */ 1198 static int 1199 mapacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, expungetype) 1200 struct vnode *vp; 1201 ufs1_daddr_t *oldblkp, *lastblkp; 1202 struct fs *fs; 1203 ufs_lbn_t lblkno; 1204 int expungetype; 1205 { 1206 ufs1_daddr_t blkno; 1207 struct inode *ip; 1208 ino_t inum; 1209 int acctit; 1210 1211 ip = VTOI(vp); 1212 inum = ip->i_number; 1213 if (lblkno == -1) 1214 acctit = 0; 1215 else 1216 acctit = 1; 1217 for ( ; oldblkp < lastblkp; oldblkp++, lblkno++) { 1218 blkno = *oldblkp; 1219 if (blkno == 0 || blkno == BLK_NOCOPY) 1220 continue; 1221 if (acctit && expungetype == BLK_SNAP && blkno != BLK_SNAP) 1222 *ip->i_snapblklist++ = lblkno; 1223 if (blkno == BLK_SNAP) 1224 blkno = blkstofrags(fs, lblkno); 1225 ffs_blkfree(ip->i_ump, fs, vp, blkno, fs->fs_bsize, inum); 1226 } 1227 return (0); 1228 } 1229 1230 /* 1231 * Before expunging a snapshot inode, note all the 1232 * blocks that it claims with BLK_SNAP so that fsck will 1233 * be able to account for those blocks properly and so 1234 * that this snapshot knows that it need not copy them 1235 * if the other snapshot holding them is freed. This code 1236 * is reproduced once each for UFS1 and UFS2. 1237 */ 1238 static int 1239 expunge_ufs2(snapvp, cancelip, fs, acctfunc, expungetype) 1240 struct vnode *snapvp; 1241 struct inode *cancelip; 1242 struct fs *fs; 1243 int (*acctfunc)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, 1244 struct fs *, ufs_lbn_t, int); 1245 int expungetype; 1246 { 1247 int i, error, indiroff; 1248 ufs_lbn_t lbn, rlbn; 1249 ufs2_daddr_t len, blkno, numblks, blksperindir; 1250 struct ufs2_dinode *dip; 1251 struct thread *td = curthread; 1252 struct buf *bp; 1253 1254 /* 1255 * Prepare to expunge the inode. If its inode block has not 1256 * yet been copied, then allocate and fill the copy. 1257 */ 1258 lbn = fragstoblks(fs, ino_to_fsba(fs, cancelip->i_number)); 1259 blkno = 0; 1260 if (lbn < NDADDR) { 1261 blkno = VTOI(snapvp)->i_din2->di_db[lbn]; 1262 } else { 1263 td->td_pflags |= TDP_COWINPROGRESS; 1264 error = ffs_balloc_ufs2(snapvp, lblktosize(fs, (off_t)lbn), 1265 fs->fs_bsize, KERNCRED, BA_METAONLY, &bp); 1266 td->td_pflags &= ~TDP_COWINPROGRESS; 1267 if (error) 1268 return (error); 1269 indiroff = (lbn - NDADDR) % NINDIR(fs); 1270 blkno = ((ufs2_daddr_t *)(bp->b_data))[indiroff]; 1271 bqrelse(bp); 1272 } 1273 if (blkno != 0) { 1274 if ((error = bread(snapvp, lbn, fs->fs_bsize, KERNCRED, &bp))) 1275 return (error); 1276 } else { 1277 error = ffs_balloc_ufs2(snapvp, lblktosize(fs, (off_t)lbn), 1278 fs->fs_bsize, KERNCRED, 0, &bp); 1279 if (error) 1280 return (error); 1281 if ((error = readblock(snapvp, bp, lbn)) != 0) 1282 return (error); 1283 } 1284 /* 1285 * Set a snapshot inode to be a zero length file, regular files 1286 * to be completely unallocated. 1287 */ 1288 dip = (struct ufs2_dinode *)bp->b_data + 1289 ino_to_fsbo(fs, cancelip->i_number); 1290 if (expungetype == BLK_NOCOPY) 1291 dip->di_mode = 0; 1292 dip->di_size = 0; 1293 dip->di_blocks = 0; 1294 dip->di_flags &= ~SF_SNAPSHOT; 1295 bzero(&dip->di_db[0], (NDADDR + NIADDR) * sizeof(ufs2_daddr_t)); 1296 bdwrite(bp); 1297 /* 1298 * Now go through and expunge all the blocks in the file 1299 * using the function requested. 1300 */ 1301 numblks = howmany(cancelip->i_size, fs->fs_bsize); 1302 if ((error = (*acctfunc)(snapvp, &cancelip->i_din2->di_db[0], 1303 &cancelip->i_din2->di_db[NDADDR], fs, 0, expungetype))) 1304 return (error); 1305 if ((error = (*acctfunc)(snapvp, &cancelip->i_din2->di_ib[0], 1306 &cancelip->i_din2->di_ib[NIADDR], fs, -1, expungetype))) 1307 return (error); 1308 blksperindir = 1; 1309 lbn = -NDADDR; 1310 len = numblks - NDADDR; 1311 rlbn = NDADDR; 1312 for (i = 0; len > 0 && i < NIADDR; i++) { 1313 error = indiracct_ufs2(snapvp, ITOV(cancelip), i, 1314 cancelip->i_din2->di_ib[i], lbn, rlbn, len, 1315 blksperindir, fs, acctfunc, expungetype); 1316 if (error) 1317 return (error); 1318 blksperindir *= NINDIR(fs); 1319 lbn -= blksperindir + 1; 1320 len -= blksperindir; 1321 rlbn += blksperindir; 1322 } 1323 return (0); 1324 } 1325 1326 /* 1327 * Descend an indirect block chain for vnode cancelvp accounting for all 1328 * its indirect blocks in snapvp. 1329 */ 1330 static int 1331 indiracct_ufs2(snapvp, cancelvp, level, blkno, lbn, rlbn, remblks, 1332 blksperindir, fs, acctfunc, expungetype) 1333 struct vnode *snapvp; 1334 struct vnode *cancelvp; 1335 int level; 1336 ufs2_daddr_t blkno; 1337 ufs_lbn_t lbn; 1338 ufs_lbn_t rlbn; 1339 ufs_lbn_t remblks; 1340 ufs_lbn_t blksperindir; 1341 struct fs *fs; 1342 int (*acctfunc)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, 1343 struct fs *, ufs_lbn_t, int); 1344 int expungetype; 1345 { 1346 int error, num, i; 1347 ufs_lbn_t subblksperindir; 1348 struct indir indirs[NIADDR + 2]; 1349 ufs2_daddr_t last, *bap; 1350 struct buf *bp; 1351 1352 if (blkno == 0) { 1353 if (expungetype == BLK_NOCOPY) 1354 return (0); 1355 panic("indiracct_ufs2: missing indir"); 1356 } 1357 if ((error = ufs_getlbns(cancelvp, rlbn, indirs, &num)) != 0) 1358 return (error); 1359 if (lbn != indirs[num - 1 - level].in_lbn || num < 2) 1360 panic("indiracct_ufs2: botched params"); 1361 /* 1362 * We have to expand bread here since it will deadlock looking 1363 * up the block number for any blocks that are not in the cache. 1364 */ 1365 bp = getblk(cancelvp, lbn, fs->fs_bsize, 0, 0, 0); 1366 bp->b_blkno = fsbtodb(fs, blkno); 1367 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0 && 1368 (error = readblock(cancelvp, bp, fragstoblks(fs, blkno)))) { 1369 brelse(bp); 1370 return (error); 1371 } 1372 /* 1373 * Account for the block pointers in this indirect block. 1374 */ 1375 last = howmany(remblks, blksperindir); 1376 if (last > NINDIR(fs)) 1377 last = NINDIR(fs); 1378 MALLOC(bap, ufs2_daddr_t *, fs->fs_bsize, M_DEVBUF, M_WAITOK); 1379 bcopy(bp->b_data, (caddr_t)bap, fs->fs_bsize); 1380 bqrelse(bp); 1381 error = (*acctfunc)(snapvp, &bap[0], &bap[last], fs, 1382 level == 0 ? rlbn : -1, expungetype); 1383 if (error || level == 0) 1384 goto out; 1385 /* 1386 * Account for the block pointers in each of the indirect blocks 1387 * in the levels below us. 1388 */ 1389 subblksperindir = blksperindir / NINDIR(fs); 1390 for (lbn++, level--, i = 0; i < last; i++) { 1391 error = indiracct_ufs2(snapvp, cancelvp, level, bap[i], lbn, 1392 rlbn, remblks, subblksperindir, fs, acctfunc, expungetype); 1393 if (error) 1394 goto out; 1395 rlbn += blksperindir; 1396 lbn -= blksperindir; 1397 remblks -= blksperindir; 1398 } 1399 out: 1400 FREE(bap, M_DEVBUF); 1401 return (error); 1402 } 1403 1404 /* 1405 * Do both snap accounting and map accounting. 1406 */ 1407 static int 1408 fullacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, exptype) 1409 struct vnode *vp; 1410 ufs2_daddr_t *oldblkp, *lastblkp; 1411 struct fs *fs; 1412 ufs_lbn_t lblkno; 1413 int exptype; /* BLK_SNAP or BLK_NOCOPY */ 1414 { 1415 int error; 1416 1417 if ((error = snapacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, exptype))) 1418 return (error); 1419 return (mapacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, exptype)); 1420 } 1421 1422 /* 1423 * Identify a set of blocks allocated in a snapshot inode. 1424 */ 1425 static int 1426 snapacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, expungetype) 1427 struct vnode *vp; 1428 ufs2_daddr_t *oldblkp, *lastblkp; 1429 struct fs *fs; 1430 ufs_lbn_t lblkno; 1431 int expungetype; /* BLK_SNAP or BLK_NOCOPY */ 1432 { 1433 struct inode *ip = VTOI(vp); 1434 ufs2_daddr_t blkno, *blkp; 1435 ufs_lbn_t lbn; 1436 struct buf *ibp; 1437 int error; 1438 1439 for ( ; oldblkp < lastblkp; oldblkp++) { 1440 blkno = *oldblkp; 1441 if (blkno == 0 || blkno == BLK_NOCOPY || blkno == BLK_SNAP) 1442 continue; 1443 lbn = fragstoblks(fs, blkno); 1444 if (lbn < NDADDR) { 1445 blkp = &ip->i_din2->di_db[lbn]; 1446 ip->i_flag |= IN_CHANGE | IN_UPDATE; 1447 } else { 1448 error = ffs_balloc_ufs2(vp, lblktosize(fs, (off_t)lbn), 1449 fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); 1450 if (error) 1451 return (error); 1452 blkp = &((ufs2_daddr_t *)(ibp->b_data)) 1453 [(lbn - NDADDR) % NINDIR(fs)]; 1454 } 1455 /* 1456 * If we are expunging a snapshot vnode and we 1457 * find a block marked BLK_NOCOPY, then it is 1458 * one that has been allocated to this snapshot after 1459 * we took our current snapshot and can be ignored. 1460 */ 1461 if (expungetype == BLK_SNAP && *blkp == BLK_NOCOPY) { 1462 if (lbn >= NDADDR) 1463 brelse(ibp); 1464 } else { 1465 if (*blkp != 0) 1466 panic("snapacct_ufs2: bad block"); 1467 *blkp = expungetype; 1468 if (lbn >= NDADDR) 1469 bdwrite(ibp); 1470 } 1471 } 1472 return (0); 1473 } 1474 1475 /* 1476 * Account for a set of blocks allocated in a snapshot inode. 1477 */ 1478 static int 1479 mapacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, expungetype) 1480 struct vnode *vp; 1481 ufs2_daddr_t *oldblkp, *lastblkp; 1482 struct fs *fs; 1483 ufs_lbn_t lblkno; 1484 int expungetype; 1485 { 1486 ufs2_daddr_t blkno; 1487 struct inode *ip; 1488 ino_t inum; 1489 int acctit; 1490 1491 ip = VTOI(vp); 1492 inum = ip->i_number; 1493 if (lblkno == -1) 1494 acctit = 0; 1495 else 1496 acctit = 1; 1497 for ( ; oldblkp < lastblkp; oldblkp++, lblkno++) { 1498 blkno = *oldblkp; 1499 if (blkno == 0 || blkno == BLK_NOCOPY) 1500 continue; 1501 if (acctit && expungetype == BLK_SNAP && blkno != BLK_SNAP) 1502 *ip->i_snapblklist++ = lblkno; 1503 if (blkno == BLK_SNAP) 1504 blkno = blkstofrags(fs, lblkno); 1505 ffs_blkfree(ip->i_ump, fs, vp, blkno, fs->fs_bsize, inum); 1506 } 1507 return (0); 1508 } 1509 1510 /* 1511 * Decrement extra reference on snapshot when last name is removed. 1512 * It will not be freed until the last open reference goes away. 1513 */ 1514 void 1515 ffs_snapgone(ip) 1516 struct inode *ip; 1517 { 1518 struct inode *xp; 1519 struct fs *fs; 1520 int snaploc; 1521 struct snapdata *sn; 1522 struct ufsmount *ump; 1523 1524 /* 1525 * Find snapshot in incore list. 1526 */ 1527 xp = NULL; 1528 sn = ip->i_devvp->v_rdev->si_snapdata; 1529 if (sn != NULL) 1530 TAILQ_FOREACH(xp, &sn->sn_head, i_nextsnap) 1531 if (xp == ip) 1532 break; 1533 if (xp != NULL) 1534 vrele(ITOV(ip)); 1535 else if (snapdebug) 1536 printf("ffs_snapgone: lost snapshot vnode %d\n", 1537 ip->i_number); 1538 /* 1539 * Delete snapshot inode from superblock. Keep list dense. 1540 */ 1541 fs = ip->i_fs; 1542 ump = ip->i_ump; 1543 UFS_LOCK(ump); 1544 for (snaploc = 0; snaploc < FSMAXSNAP; snaploc++) 1545 if (fs->fs_snapinum[snaploc] == ip->i_number) 1546 break; 1547 if (snaploc < FSMAXSNAP) { 1548 for (snaploc++; snaploc < FSMAXSNAP; snaploc++) { 1549 if (fs->fs_snapinum[snaploc] == 0) 1550 break; 1551 fs->fs_snapinum[snaploc - 1] = fs->fs_snapinum[snaploc]; 1552 } 1553 fs->fs_snapinum[snaploc - 1] = 0; 1554 } 1555 UFS_UNLOCK(ump); 1556 } 1557 1558 /* 1559 * Prepare a snapshot file for being removed. 1560 */ 1561 void 1562 ffs_snapremove(vp) 1563 struct vnode *vp; 1564 { 1565 struct inode *ip; 1566 struct vnode *devvp; 1567 struct buf *ibp; 1568 struct fs *fs; 1569 struct thread *td = curthread; 1570 ufs2_daddr_t numblks, blkno, dblk; 1571 int error, loc, last; 1572 struct snapdata *sn; 1573 1574 ip = VTOI(vp); 1575 fs = ip->i_fs; 1576 devvp = ip->i_devvp; 1577 /* 1578 * If active, delete from incore list (this snapshot may 1579 * already have been in the process of being deleted, so 1580 * would not have been active). 1581 * 1582 * Clear copy-on-write flag if last snapshot. 1583 */ 1584 VI_LOCK(devvp); 1585 if (ip->i_nextsnap.tqe_prev != 0) { 1586 sn = devvp->v_rdev->si_snapdata; 1587 TAILQ_REMOVE(&sn->sn_head, ip, i_nextsnap); 1588 ip->i_nextsnap.tqe_prev = 0; 1589 VI_UNLOCK(devvp); 1590 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); 1591 VI_LOCK(vp); 1592 KASSERT(vp->v_vnlock == &sn->sn_lock, 1593 ("ffs_snapremove: lost lock mutation")); 1594 vp->v_vnlock = &vp->v_lock; 1595 VI_UNLOCK(vp); 1596 VI_LOCK(devvp); 1597 lockmgr(&sn->sn_lock, LK_RELEASE, NULL); 1598 try_free_snapdata(devvp, td); 1599 } else 1600 VI_UNLOCK(devvp); 1601 /* 1602 * Clear all BLK_NOCOPY fields. Pass any block claims to other 1603 * snapshots that want them (see ffs_snapblkfree below). 1604 */ 1605 for (blkno = 1; blkno < NDADDR; blkno++) { 1606 dblk = DIP(ip, i_db[blkno]); 1607 if (dblk == 0) 1608 continue; 1609 if (dblk == BLK_NOCOPY || dblk == BLK_SNAP) 1610 DIP_SET(ip, i_db[blkno], 0); 1611 else if ((dblk == blkstofrags(fs, blkno) && 1612 ffs_snapblkfree(fs, ip->i_devvp, dblk, fs->fs_bsize, 1613 ip->i_number))) { 1614 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - 1615 btodb(fs->fs_bsize)); 1616 DIP_SET(ip, i_db[blkno], 0); 1617 } 1618 } 1619 numblks = howmany(ip->i_size, fs->fs_bsize); 1620 for (blkno = NDADDR; blkno < numblks; blkno += NINDIR(fs)) { 1621 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)blkno), 1622 fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); 1623 if (error) 1624 continue; 1625 if (fs->fs_size - blkno > NINDIR(fs)) 1626 last = NINDIR(fs); 1627 else 1628 last = fs->fs_size - blkno; 1629 for (loc = 0; loc < last; loc++) { 1630 if (ip->i_ump->um_fstype == UFS1) { 1631 dblk = ((ufs1_daddr_t *)(ibp->b_data))[loc]; 1632 if (dblk == 0) 1633 continue; 1634 if (dblk == BLK_NOCOPY || dblk == BLK_SNAP) 1635 ((ufs1_daddr_t *)(ibp->b_data))[loc]= 0; 1636 else if ((dblk == blkstofrags(fs, blkno) && 1637 ffs_snapblkfree(fs, ip->i_devvp, dblk, 1638 fs->fs_bsize, ip->i_number))) { 1639 ip->i_din1->di_blocks -= 1640 btodb(fs->fs_bsize); 1641 ((ufs1_daddr_t *)(ibp->b_data))[loc]= 0; 1642 } 1643 continue; 1644 } 1645 dblk = ((ufs2_daddr_t *)(ibp->b_data))[loc]; 1646 if (dblk == 0) 1647 continue; 1648 if (dblk == BLK_NOCOPY || dblk == BLK_SNAP) 1649 ((ufs2_daddr_t *)(ibp->b_data))[loc] = 0; 1650 else if ((dblk == blkstofrags(fs, blkno) && 1651 ffs_snapblkfree(fs, ip->i_devvp, dblk, 1652 fs->fs_bsize, ip->i_number))) { 1653 ip->i_din2->di_blocks -= btodb(fs->fs_bsize); 1654 ((ufs2_daddr_t *)(ibp->b_data))[loc] = 0; 1655 } 1656 } 1657 bawrite(ibp); 1658 } 1659 /* 1660 * Clear snapshot flag and drop reference. 1661 */ 1662 ip->i_flags &= ~SF_SNAPSHOT; 1663 DIP_SET(ip, i_flags, ip->i_flags); 1664 ip->i_flag |= IN_CHANGE | IN_UPDATE; 1665 #ifdef QUOTA 1666 /* 1667 * Reenable disk quotas for ex-snapshot file. 1668 */ 1669 if (!getinoquota(ip)) 1670 (void) chkdq(ip, DIP(ip, i_blocks), KERNCRED, FORCE); 1671 #endif 1672 } 1673 1674 /* 1675 * Notification that a block is being freed. Return zero if the free 1676 * should be allowed to proceed. Return non-zero if the snapshot file 1677 * wants to claim the block. The block will be claimed if it is an 1678 * uncopied part of one of the snapshots. It will be freed if it is 1679 * either a BLK_NOCOPY or has already been copied in all of the snapshots. 1680 * If a fragment is being freed, then all snapshots that care about 1681 * it must make a copy since a snapshot file can only claim full sized 1682 * blocks. Note that if more than one snapshot file maps the block, 1683 * we can pick one at random to claim it. Since none of the snapshots 1684 * can change, we are assurred that they will all see the same unmodified 1685 * image. When deleting a snapshot file (see ffs_snapremove above), we 1686 * must push any of these claimed blocks to one of the other snapshots 1687 * that maps it. These claimed blocks are easily identified as they will 1688 * have a block number equal to their logical block number within the 1689 * snapshot. A copied block can never have this property because they 1690 * must always have been allocated from a BLK_NOCOPY location. 1691 */ 1692 int 1693 ffs_snapblkfree(fs, devvp, bno, size, inum) 1694 struct fs *fs; 1695 struct vnode *devvp; 1696 ufs2_daddr_t bno; 1697 long size; 1698 ino_t inum; 1699 { 1700 struct buf *ibp, *cbp, *savedcbp = 0; 1701 struct thread *td = curthread; 1702 struct inode *ip; 1703 struct vnode *vp = NULL; 1704 ufs_lbn_t lbn; 1705 ufs2_daddr_t blkno; 1706 int indiroff = 0, error = 0, claimedblk = 0; 1707 struct snapdata *sn; 1708 1709 lbn = fragstoblks(fs, bno); 1710 retry: 1711 VI_LOCK(devvp); 1712 sn = devvp->v_rdev->si_snapdata; 1713 if (sn == NULL) { 1714 VI_UNLOCK(devvp); 1715 return (0); 1716 } 1717 if (lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL, 1718 VI_MTX(devvp)) != 0) 1719 goto retry; 1720 TAILQ_FOREACH(ip, &sn->sn_head, i_nextsnap) { 1721 vp = ITOV(ip); 1722 /* 1723 * Lookup block being written. 1724 */ 1725 if (lbn < NDADDR) { 1726 blkno = DIP(ip, i_db[lbn]); 1727 } else { 1728 td->td_pflags |= TDP_COWINPROGRESS; 1729 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), 1730 fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); 1731 td->td_pflags &= ~TDP_COWINPROGRESS; 1732 if (error) 1733 break; 1734 indiroff = (lbn - NDADDR) % NINDIR(fs); 1735 if (ip->i_ump->um_fstype == UFS1) 1736 blkno=((ufs1_daddr_t *)(ibp->b_data))[indiroff]; 1737 else 1738 blkno=((ufs2_daddr_t *)(ibp->b_data))[indiroff]; 1739 } 1740 /* 1741 * Check to see if block needs to be copied. 1742 */ 1743 if (blkno == 0) { 1744 /* 1745 * A block that we map is being freed. If it has not 1746 * been claimed yet, we will claim or copy it (below). 1747 */ 1748 claimedblk = 1; 1749 } else if (blkno == BLK_SNAP) { 1750 /* 1751 * No previous snapshot claimed the block, 1752 * so it will be freed and become a BLK_NOCOPY 1753 * (don't care) for us. 1754 */ 1755 if (claimedblk) 1756 panic("snapblkfree: inconsistent block type"); 1757 if (lbn < NDADDR) { 1758 DIP_SET(ip, i_db[lbn], BLK_NOCOPY); 1759 ip->i_flag |= IN_CHANGE | IN_UPDATE; 1760 } else if (ip->i_ump->um_fstype == UFS1) { 1761 ((ufs1_daddr_t *)(ibp->b_data))[indiroff] = 1762 BLK_NOCOPY; 1763 bdwrite(ibp); 1764 } else { 1765 ((ufs2_daddr_t *)(ibp->b_data))[indiroff] = 1766 BLK_NOCOPY; 1767 bdwrite(ibp); 1768 } 1769 continue; 1770 } else /* BLK_NOCOPY or default */ { 1771 /* 1772 * If the snapshot has already copied the block 1773 * (default), or does not care about the block, 1774 * it is not needed. 1775 */ 1776 if (lbn >= NDADDR) 1777 bqrelse(ibp); 1778 continue; 1779 } 1780 /* 1781 * If this is a full size block, we will just grab it 1782 * and assign it to the snapshot inode. Otherwise we 1783 * will proceed to copy it. See explanation for this 1784 * routine as to why only a single snapshot needs to 1785 * claim this block. 1786 */ 1787 if (size == fs->fs_bsize) { 1788 #ifdef DEBUG 1789 if (snapdebug) 1790 printf("%s %d lbn %jd from inum %d\n", 1791 "Grabonremove: snapino", ip->i_number, 1792 (intmax_t)lbn, inum); 1793 #endif 1794 if (lbn < NDADDR) { 1795 DIP_SET(ip, i_db[lbn], bno); 1796 } else if (ip->i_ump->um_fstype == UFS1) { 1797 ((ufs1_daddr_t *)(ibp->b_data))[indiroff] = bno; 1798 bdwrite(ibp); 1799 } else { 1800 ((ufs2_daddr_t *)(ibp->b_data))[indiroff] = bno; 1801 bdwrite(ibp); 1802 } 1803 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + btodb(size)); 1804 ip->i_flag |= IN_CHANGE | IN_UPDATE; 1805 lockmgr(vp->v_vnlock, LK_RELEASE, NULL); 1806 return (1); 1807 } 1808 if (lbn >= NDADDR) 1809 bqrelse(ibp); 1810 /* 1811 * Allocate the block into which to do the copy. Note that this 1812 * allocation will never require any additional allocations for 1813 * the snapshot inode. 1814 */ 1815 td->td_pflags |= TDP_COWINPROGRESS; 1816 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), 1817 fs->fs_bsize, KERNCRED, 0, &cbp); 1818 td->td_pflags &= ~TDP_COWINPROGRESS; 1819 if (error) 1820 break; 1821 #ifdef DEBUG 1822 if (snapdebug) 1823 printf("%s%d lbn %jd %s %d size %ld to blkno %jd\n", 1824 "Copyonremove: snapino ", ip->i_number, 1825 (intmax_t)lbn, "for inum", inum, size, 1826 (intmax_t)cbp->b_blkno); 1827 #endif 1828 /* 1829 * If we have already read the old block contents, then 1830 * simply copy them to the new block. Note that we need 1831 * to synchronously write snapshots that have not been 1832 * unlinked, and hence will be visible after a crash, 1833 * to ensure their integrity. 1834 */ 1835 if (savedcbp != 0) { 1836 bcopy(savedcbp->b_data, cbp->b_data, fs->fs_bsize); 1837 bawrite(cbp); 1838 if (dopersistence && ip->i_effnlink > 0) 1839 (void) ffs_syncvnode(vp, MNT_WAIT); 1840 continue; 1841 } 1842 /* 1843 * Otherwise, read the old block contents into the buffer. 1844 */ 1845 if ((error = readblock(vp, cbp, lbn)) != 0) { 1846 bzero(cbp->b_data, fs->fs_bsize); 1847 bawrite(cbp); 1848 if (dopersistence && ip->i_effnlink > 0) 1849 (void) ffs_syncvnode(vp, MNT_WAIT); 1850 break; 1851 } 1852 savedcbp = cbp; 1853 } 1854 /* 1855 * Note that we need to synchronously write snapshots that 1856 * have not been unlinked, and hence will be visible after 1857 * a crash, to ensure their integrity. 1858 */ 1859 if (savedcbp) { 1860 vp = savedcbp->b_vp; 1861 bawrite(savedcbp); 1862 if (dopersistence && VTOI(vp)->i_effnlink > 0) 1863 (void) ffs_syncvnode(vp, MNT_WAIT); 1864 } 1865 /* 1866 * If we have been unable to allocate a block in which to do 1867 * the copy, then return non-zero so that the fragment will 1868 * not be freed. Although space will be lost, the snapshot 1869 * will stay consistent. 1870 */ 1871 lockmgr(vp->v_vnlock, LK_RELEASE, NULL); 1872 return (error); 1873 } 1874 1875 /* 1876 * Associate snapshot files when mounting. 1877 */ 1878 void 1879 ffs_snapshot_mount(mp) 1880 struct mount *mp; 1881 { 1882 struct ufsmount *ump = VFSTOUFS(mp); 1883 struct vnode *devvp = ump->um_devvp; 1884 struct fs *fs = ump->um_fs; 1885 struct thread *td = curthread; 1886 struct snapdata *sn; 1887 struct vnode *vp; 1888 struct vnode *lastvp; 1889 struct inode *ip; 1890 struct uio auio; 1891 struct iovec aiov; 1892 void *snapblklist; 1893 char *reason; 1894 daddr_t snaplistsize; 1895 int error, snaploc, loc; 1896 1897 /* 1898 * XXX The following needs to be set before ffs_truncate or 1899 * VOP_READ can be called. 1900 */ 1901 mp->mnt_stat.f_iosize = fs->fs_bsize; 1902 /* 1903 * Process each snapshot listed in the superblock. 1904 */ 1905 vp = NULL; 1906 lastvp = NULL; 1907 sn = devvp->v_rdev->si_snapdata; 1908 for (snaploc = 0; snaploc < FSMAXSNAP; snaploc++) { 1909 if (fs->fs_snapinum[snaploc] == 0) 1910 break; 1911 if ((error = ffs_vget(mp, fs->fs_snapinum[snaploc], 1912 LK_EXCLUSIVE, &vp)) != 0){ 1913 printf("ffs_snapshot_mount: vget failed %d\n", error); 1914 continue; 1915 } 1916 ip = VTOI(vp); 1917 if ((ip->i_flags & SF_SNAPSHOT) == 0 || ip->i_size == 1918 lblktosize(fs, howmany(fs->fs_size, fs->fs_frag))) { 1919 if ((ip->i_flags & SF_SNAPSHOT) == 0) { 1920 reason = "non-snapshot"; 1921 } else { 1922 reason = "old format snapshot"; 1923 (void)ffs_truncate(vp, (off_t)0, 0, NOCRED, td); 1924 (void)ffs_syncvnode(vp, MNT_WAIT); 1925 } 1926 printf("ffs_snapshot_mount: %s inode %d\n", 1927 reason, fs->fs_snapinum[snaploc]); 1928 vput(vp); 1929 vp = NULL; 1930 for (loc = snaploc + 1; loc < FSMAXSNAP; loc++) { 1931 if (fs->fs_snapinum[loc] == 0) 1932 break; 1933 fs->fs_snapinum[loc - 1] = fs->fs_snapinum[loc]; 1934 } 1935 fs->fs_snapinum[loc - 1] = 0; 1936 snaploc--; 1937 continue; 1938 } 1939 /* 1940 * If there already exist snapshots on this filesystem, grab a 1941 * reference to their shared lock. If this is the first snapshot 1942 * on this filesystem, we need to allocate a lock for the 1943 * snapshots to share. In either case, acquire the snapshot 1944 * lock and give up our original private lock. 1945 */ 1946 VI_LOCK(devvp); 1947 if (sn != NULL) { 1948 1949 VI_UNLOCK(devvp); 1950 VI_LOCK(vp); 1951 vp->v_vnlock = &sn->sn_lock; 1952 } else { 1953 VI_UNLOCK(devvp); 1954 sn = malloc(sizeof *sn, M_UFSMNT, M_WAITOK | M_ZERO); 1955 TAILQ_INIT(&sn->sn_head); 1956 lockinit(&sn->sn_lock, PVFS, "snaplk", VLKTIMEOUT, 1957 LK_CANRECURSE | LK_NOSHARE); 1958 VI_LOCK(vp); 1959 vp->v_vnlock = &sn->sn_lock; 1960 devvp->v_rdev->si_snapdata = sn; 1961 } 1962 lockmgr(vp->v_vnlock, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY, 1963 VI_MTX(vp)); 1964 lockmgr(&vp->v_lock, LK_RELEASE, NULL); 1965 /* 1966 * Link it onto the active snapshot list. 1967 */ 1968 VI_LOCK(devvp); 1969 if (ip->i_nextsnap.tqe_prev != 0) 1970 panic("ffs_snapshot_mount: %d already on list", 1971 ip->i_number); 1972 else 1973 TAILQ_INSERT_TAIL(&sn->sn_head, ip, i_nextsnap); 1974 vp->v_vflag |= VV_SYSTEM; 1975 VI_UNLOCK(devvp); 1976 VOP_UNLOCK(vp, 0); 1977 lastvp = vp; 1978 } 1979 vp = lastvp; 1980 /* 1981 * No usable snapshots found. 1982 */ 1983 if (vp == NULL) 1984 return; 1985 /* 1986 * Allocate the space for the block hints list. We always want to 1987 * use the list from the newest snapshot. 1988 */ 1989 auio.uio_iov = &aiov; 1990 auio.uio_iovcnt = 1; 1991 aiov.iov_base = (void *)&snaplistsize; 1992 aiov.iov_len = sizeof(snaplistsize); 1993 auio.uio_resid = aiov.iov_len; 1994 auio.uio_offset = 1995 lblktosize(fs, howmany(fs->fs_size, fs->fs_frag)); 1996 auio.uio_segflg = UIO_SYSSPACE; 1997 auio.uio_rw = UIO_READ; 1998 auio.uio_td = td; 1999 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2000 if ((error = VOP_READ(vp, &auio, IO_UNIT, td->td_ucred)) != 0) { 2001 printf("ffs_snapshot_mount: read_1 failed %d\n", error); 2002 VOP_UNLOCK(vp, 0); 2003 return; 2004 } 2005 MALLOC(snapblklist, void *, snaplistsize * sizeof(daddr_t), 2006 M_UFSMNT, M_WAITOK); 2007 auio.uio_iovcnt = 1; 2008 aiov.iov_base = snapblklist; 2009 aiov.iov_len = snaplistsize * sizeof (daddr_t); 2010 auio.uio_resid = aiov.iov_len; 2011 auio.uio_offset -= sizeof(snaplistsize); 2012 if ((error = VOP_READ(vp, &auio, IO_UNIT, td->td_ucred)) != 0) { 2013 printf("ffs_snapshot_mount: read_2 failed %d\n", error); 2014 VOP_UNLOCK(vp, 0); 2015 FREE(snapblklist, M_UFSMNT); 2016 return; 2017 } 2018 VOP_UNLOCK(vp, 0); 2019 VI_LOCK(devvp); 2020 ASSERT_VOP_LOCKED(devvp, "ffs_snapshot_mount"); 2021 sn->sn_listsize = snaplistsize; 2022 sn->sn_blklist = (daddr_t *)snapblklist; 2023 devvp->v_vflag |= VV_COPYONWRITE; 2024 VI_UNLOCK(devvp); 2025 } 2026 2027 /* 2028 * Disassociate snapshot files when unmounting. 2029 */ 2030 void 2031 ffs_snapshot_unmount(mp) 2032 struct mount *mp; 2033 { 2034 struct vnode *devvp = VFSTOUFS(mp)->um_devvp; 2035 struct snapdata *sn; 2036 struct inode *xp; 2037 struct vnode *vp; 2038 struct thread *td = curthread; 2039 2040 VI_LOCK(devvp); 2041 sn = devvp->v_rdev->si_snapdata; 2042 while (sn != NULL && (xp = TAILQ_FIRST(&sn->sn_head)) != NULL) { 2043 vp = ITOV(xp); 2044 TAILQ_REMOVE(&sn->sn_head, xp, i_nextsnap); 2045 xp->i_nextsnap.tqe_prev = 0; 2046 lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE, 2047 VI_MTX(devvp)); 2048 VI_LOCK(vp); 2049 lockmgr(&vp->v_lock, LK_INTERLOCK | LK_EXCLUSIVE, VI_MTX(vp)); 2050 VI_LOCK(vp); 2051 KASSERT(vp->v_vnlock == &sn->sn_lock, 2052 ("ffs_snapshot_unmount: lost lock mutation")); 2053 vp->v_vnlock = &vp->v_lock; 2054 VI_UNLOCK(vp); 2055 lockmgr(&vp->v_lock, LK_RELEASE, NULL); 2056 lockmgr(&sn->sn_lock, LK_RELEASE, NULL); 2057 if (xp->i_effnlink > 0) 2058 vrele(vp); 2059 VI_LOCK(devvp); 2060 sn = devvp->v_rdev->si_snapdata; 2061 } 2062 try_free_snapdata(devvp, td); 2063 ASSERT_VOP_LOCKED(devvp, "ffs_snapshot_unmount"); 2064 } 2065 2066 /* 2067 * Check the buffer block to be belong to device buffer that shall be 2068 * locked after snaplk. devvp shall be locked on entry, and will be 2069 * leaved locked upon exit. 2070 */ 2071 static int 2072 ffs_bp_snapblk(devvp, bp) 2073 struct vnode *devvp; 2074 struct buf *bp; 2075 { 2076 struct snapdata *sn; 2077 struct fs *fs; 2078 ufs2_daddr_t lbn, *snapblklist; 2079 int lower, upper, mid; 2080 2081 ASSERT_VI_LOCKED(devvp, "ffs_bp_snapblk"); 2082 KASSERT(devvp->v_type == VCHR, ("Not a device %p", devvp)); 2083 sn = devvp->v_rdev->si_snapdata; 2084 if (sn == NULL || TAILQ_FIRST(&sn->sn_head) == NULL) 2085 return (0); 2086 fs = TAILQ_FIRST(&sn->sn_head)->i_fs; 2087 lbn = fragstoblks(fs, dbtofsb(fs, bp->b_blkno)); 2088 snapblklist = sn->sn_blklist; 2089 upper = sn->sn_listsize - 1; 2090 lower = 1; 2091 while (lower <= upper) { 2092 mid = (lower + upper) / 2; 2093 if (snapblklist[mid] == lbn) 2094 break; 2095 if (snapblklist[mid] < lbn) 2096 lower = mid + 1; 2097 else 2098 upper = mid - 1; 2099 } 2100 if (lower <= upper) 2101 return (1); 2102 return (0); 2103 } 2104 2105 void 2106 ffs_bdflush(bo, bp) 2107 struct bufobj *bo; 2108 struct buf *bp; 2109 { 2110 struct thread *td; 2111 struct vnode *vp, *devvp; 2112 struct buf *nbp; 2113 int bp_bdskip; 2114 2115 if (bo->bo_dirty.bv_cnt <= dirtybufthresh) 2116 return; 2117 2118 td = curthread; 2119 vp = bp->b_vp; 2120 devvp = bo->__bo_vnode; 2121 KASSERT(vp == devvp, ("devvp != vp %p %p", bo, bp)); 2122 2123 VI_LOCK(devvp); 2124 bp_bdskip = ffs_bp_snapblk(devvp, bp); 2125 if (bp_bdskip) 2126 bdwriteskip++; 2127 VI_UNLOCK(devvp); 2128 if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10 && !bp_bdskip) { 2129 (void) VOP_FSYNC(vp, MNT_NOWAIT, td); 2130 altbufferflushes++; 2131 } else { 2132 BO_LOCK(bo); 2133 /* 2134 * Try to find a buffer to flush. 2135 */ 2136 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) { 2137 if ((nbp->b_vflags & BV_BKGRDINPROG) || 2138 BUF_LOCK(nbp, 2139 LK_EXCLUSIVE | LK_NOWAIT, NULL)) 2140 continue; 2141 if (bp == nbp) 2142 panic("bdwrite: found ourselves"); 2143 BO_UNLOCK(bo); 2144 /* 2145 * Don't countdeps with the bo lock 2146 * held. 2147 */ 2148 if (buf_countdeps(nbp, 0)) { 2149 BO_LOCK(bo); 2150 BUF_UNLOCK(nbp); 2151 continue; 2152 } 2153 if (bp_bdskip) { 2154 VI_LOCK(devvp); 2155 if (!ffs_bp_snapblk(vp, nbp)) { 2156 if (BO_MTX(bo) != VI_MTX(vp)) { 2157 VI_UNLOCK(devvp); 2158 BO_LOCK(bo); 2159 } 2160 BUF_UNLOCK(nbp); 2161 continue; 2162 } 2163 VI_UNLOCK(devvp); 2164 } 2165 if (nbp->b_flags & B_CLUSTEROK) { 2166 vfs_bio_awrite(nbp); 2167 } else { 2168 bremfree(nbp); 2169 bawrite(nbp); 2170 } 2171 dirtybufferflushes++; 2172 break; 2173 } 2174 if (nbp == NULL) 2175 BO_UNLOCK(bo); 2176 } 2177 } 2178 2179 /* 2180 * Check for need to copy block that is about to be written, 2181 * copying the block if necessary. 2182 */ 2183 int 2184 ffs_copyonwrite(devvp, bp) 2185 struct vnode *devvp; 2186 struct buf *bp; 2187 { 2188 struct snapdata *sn; 2189 struct buf *ibp, *cbp, *savedcbp = 0; 2190 struct thread *td = curthread; 2191 struct fs *fs; 2192 struct inode *ip; 2193 struct vnode *vp = 0; 2194 ufs2_daddr_t lbn, blkno, *snapblklist; 2195 int lower, upper, mid, indiroff, error = 0; 2196 int launched_async_io, prev_norunningbuf; 2197 long saved_runningbufspace; 2198 2199 if (devvp != bp->b_vp && (VTOI(bp->b_vp)->i_flags & SF_SNAPSHOT) != 0) 2200 return (0); /* Update on a snapshot file */ 2201 if (td->td_pflags & TDP_COWINPROGRESS) 2202 panic("ffs_copyonwrite: recursive call"); 2203 /* 2204 * First check to see if it is in the preallocated list. 2205 * By doing this check we avoid several potential deadlocks. 2206 */ 2207 VI_LOCK(devvp); 2208 sn = devvp->v_rdev->si_snapdata; 2209 if (sn == NULL || 2210 TAILQ_EMPTY(&sn->sn_head)) { 2211 VI_UNLOCK(devvp); 2212 return (0); /* No snapshot */ 2213 } 2214 ip = TAILQ_FIRST(&sn->sn_head); 2215 fs = ip->i_fs; 2216 lbn = fragstoblks(fs, dbtofsb(fs, bp->b_blkno)); 2217 snapblklist = sn->sn_blklist; 2218 upper = sn->sn_listsize - 1; 2219 lower = 1; 2220 while (lower <= upper) { 2221 mid = (lower + upper) / 2; 2222 if (snapblklist[mid] == lbn) 2223 break; 2224 if (snapblklist[mid] < lbn) 2225 lower = mid + 1; 2226 else 2227 upper = mid - 1; 2228 } 2229 if (lower <= upper) { 2230 VI_UNLOCK(devvp); 2231 return (0); 2232 } 2233 launched_async_io = 0; 2234 prev_norunningbuf = td->td_pflags & TDP_NORUNNINGBUF; 2235 /* 2236 * Since I/O on bp isn't yet in progress and it may be blocked 2237 * for a long time waiting on snaplk, back it out of 2238 * runningbufspace, possibly waking other threads waiting for space. 2239 */ 2240 saved_runningbufspace = bp->b_runningbufspace; 2241 if (saved_runningbufspace != 0) 2242 runningbufwakeup(bp); 2243 /* 2244 * Not in the precomputed list, so check the snapshots. 2245 */ 2246 while (lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL, 2247 VI_MTX(devvp)) != 0) { 2248 VI_LOCK(devvp); 2249 sn = devvp->v_rdev->si_snapdata; 2250 if (sn == NULL || 2251 TAILQ_EMPTY(&sn->sn_head)) { 2252 VI_UNLOCK(devvp); 2253 if (saved_runningbufspace != 0) { 2254 bp->b_runningbufspace = saved_runningbufspace; 2255 atomic_add_int(&runningbufspace, 2256 bp->b_runningbufspace); 2257 } 2258 return (0); /* Snapshot gone */ 2259 } 2260 } 2261 TAILQ_FOREACH(ip, &sn->sn_head, i_nextsnap) { 2262 vp = ITOV(ip); 2263 /* 2264 * We ensure that everything of our own that needs to be 2265 * copied will be done at the time that ffs_snapshot is 2266 * called. Thus we can skip the check here which can 2267 * deadlock in doing the lookup in UFS_BALLOC. 2268 */ 2269 if (bp->b_vp == vp) 2270 continue; 2271 /* 2272 * Check to see if block needs to be copied. We do not have 2273 * to hold the snapshot lock while doing this lookup as it 2274 * will never require any additional allocations for the 2275 * snapshot inode. 2276 */ 2277 if (lbn < NDADDR) { 2278 blkno = DIP(ip, i_db[lbn]); 2279 } else { 2280 td->td_pflags |= TDP_COWINPROGRESS | TDP_NORUNNINGBUF; 2281 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), 2282 fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); 2283 td->td_pflags &= ~TDP_COWINPROGRESS; 2284 if (error) 2285 break; 2286 indiroff = (lbn - NDADDR) % NINDIR(fs); 2287 if (ip->i_ump->um_fstype == UFS1) 2288 blkno=((ufs1_daddr_t *)(ibp->b_data))[indiroff]; 2289 else 2290 blkno=((ufs2_daddr_t *)(ibp->b_data))[indiroff]; 2291 bqrelse(ibp); 2292 } 2293 #ifdef INVARIANTS 2294 if (blkno == BLK_SNAP && bp->b_lblkno >= 0) 2295 panic("ffs_copyonwrite: bad copy block"); 2296 #endif 2297 if (blkno != 0) 2298 continue; 2299 /* 2300 * Allocate the block into which to do the copy. Since 2301 * multiple processes may all try to copy the same block, 2302 * we have to recheck our need to do a copy if we sleep 2303 * waiting for the lock. 2304 * 2305 * Because all snapshots on a filesystem share a single 2306 * lock, we ensure that we will never be in competition 2307 * with another process to allocate a block. 2308 */ 2309 td->td_pflags |= TDP_COWINPROGRESS | TDP_NORUNNINGBUF; 2310 error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), 2311 fs->fs_bsize, KERNCRED, 0, &cbp); 2312 td->td_pflags &= ~TDP_COWINPROGRESS; 2313 if (error) 2314 break; 2315 #ifdef DEBUG 2316 if (snapdebug) { 2317 printf("Copyonwrite: snapino %d lbn %jd for ", 2318 ip->i_number, (intmax_t)lbn); 2319 if (bp->b_vp == devvp) 2320 printf("fs metadata"); 2321 else 2322 printf("inum %d", VTOI(bp->b_vp)->i_number); 2323 printf(" lblkno %jd to blkno %jd\n", 2324 (intmax_t)bp->b_lblkno, (intmax_t)cbp->b_blkno); 2325 } 2326 #endif 2327 /* 2328 * If we have already read the old block contents, then 2329 * simply copy them to the new block. Note that we need 2330 * to synchronously write snapshots that have not been 2331 * unlinked, and hence will be visible after a crash, 2332 * to ensure their integrity. 2333 */ 2334 if (savedcbp != 0) { 2335 bcopy(savedcbp->b_data, cbp->b_data, fs->fs_bsize); 2336 bawrite(cbp); 2337 if (dopersistence && ip->i_effnlink > 0) 2338 (void) ffs_syncvnode(vp, MNT_WAIT); 2339 else 2340 launched_async_io = 1; 2341 continue; 2342 } 2343 /* 2344 * Otherwise, read the old block contents into the buffer. 2345 */ 2346 if ((error = readblock(vp, cbp, lbn)) != 0) { 2347 bzero(cbp->b_data, fs->fs_bsize); 2348 bawrite(cbp); 2349 if (dopersistence && ip->i_effnlink > 0) 2350 (void) ffs_syncvnode(vp, MNT_WAIT); 2351 else 2352 launched_async_io = 1; 2353 break; 2354 } 2355 savedcbp = cbp; 2356 } 2357 /* 2358 * Note that we need to synchronously write snapshots that 2359 * have not been unlinked, and hence will be visible after 2360 * a crash, to ensure their integrity. 2361 */ 2362 if (savedcbp) { 2363 vp = savedcbp->b_vp; 2364 bawrite(savedcbp); 2365 if (dopersistence && VTOI(vp)->i_effnlink > 0) 2366 (void) ffs_syncvnode(vp, MNT_WAIT); 2367 else 2368 launched_async_io = 1; 2369 } 2370 lockmgr(vp->v_vnlock, LK_RELEASE, NULL); 2371 td->td_pflags = (td->td_pflags & ~TDP_NORUNNINGBUF) | 2372 prev_norunningbuf; 2373 if (launched_async_io && (td->td_pflags & TDP_NORUNNINGBUF) == 0) 2374 waitrunningbufspace(); 2375 /* 2376 * I/O on bp will now be started, so count it in runningbufspace. 2377 */ 2378 if (saved_runningbufspace != 0) { 2379 bp->b_runningbufspace = saved_runningbufspace; 2380 atomic_add_int(&runningbufspace, bp->b_runningbufspace); 2381 } 2382 return (error); 2383 } 2384 2385 /* 2386 * Read the specified block into the given buffer. 2387 * Much of this boiler-plate comes from bwrite(). 2388 */ 2389 static int 2390 readblock(vp, bp, lbn) 2391 struct vnode *vp; 2392 struct buf *bp; 2393 ufs2_daddr_t lbn; 2394 { 2395 struct inode *ip = VTOI(vp); 2396 struct bio *bip; 2397 2398 bip = g_alloc_bio(); 2399 bip->bio_cmd = BIO_READ; 2400 bip->bio_offset = dbtob(fsbtodb(ip->i_fs, blkstofrags(ip->i_fs, lbn))); 2401 bip->bio_data = bp->b_data; 2402 bip->bio_length = bp->b_bcount; 2403 bip->bio_done = NULL; 2404 2405 g_io_request(bip, ip->i_devvp->v_bufobj.bo_private); 2406 bp->b_error = biowait(bip, "snaprdb"); 2407 g_destroy_bio(bip); 2408 return (bp->b_error); 2409 } 2410 2411 /* 2412 * Process file deletes that were deferred by ufs_inactive() due to 2413 * the file system being suspended. Transfer IN_LAZYACCESS into 2414 * IN_MODIFIED for vnodes that were accessed during suspension. 2415 */ 2416 static void 2417 process_deferred_inactive(struct mount *mp) 2418 { 2419 struct vnode *vp, *mvp; 2420 struct inode *ip; 2421 struct thread *td; 2422 int error; 2423 2424 td = curthread; 2425 (void) vn_start_secondary_write(NULL, &mp, V_WAIT); 2426 MNT_ILOCK(mp); 2427 loop: 2428 MNT_VNODE_FOREACH(vp, mp, mvp) { 2429 VI_LOCK(vp); 2430 /* 2431 * IN_LAZYACCESS is checked here without holding any 2432 * vnode lock, but this flag is set only while holding 2433 * vnode interlock. 2434 */ 2435 if (vp->v_type == VNON || (vp->v_iflag & VI_DOOMED) != 0 || 2436 ((VTOI(vp)->i_flag & IN_LAZYACCESS) == 0 && 2437 ((vp->v_iflag & VI_OWEINACT) == 0 || 2438 vp->v_usecount > 0))) { 2439 VI_UNLOCK(vp); 2440 continue; 2441 } 2442 MNT_IUNLOCK(mp); 2443 vholdl(vp); 2444 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2445 if (error != 0) { 2446 vdrop(vp); 2447 MNT_ILOCK(mp); 2448 if (error == ENOENT) 2449 continue; /* vnode recycled */ 2450 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); 2451 goto loop; 2452 } 2453 ip = VTOI(vp); 2454 if ((ip->i_flag & IN_LAZYACCESS) != 0) { 2455 ip->i_flag &= ~IN_LAZYACCESS; 2456 ip->i_flag |= IN_MODIFIED; 2457 } 2458 VI_LOCK(vp); 2459 if ((vp->v_iflag & VI_OWEINACT) == 0 || vp->v_usecount > 0) { 2460 VI_UNLOCK(vp); 2461 VOP_UNLOCK(vp, 0); 2462 vdrop(vp); 2463 MNT_ILOCK(mp); 2464 continue; 2465 } 2466 2467 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 2468 ("process_deferred_inactive: " 2469 "recursed on VI_DOINGINACT")); 2470 vp->v_iflag |= VI_DOINGINACT; 2471 vp->v_iflag &= ~VI_OWEINACT; 2472 VI_UNLOCK(vp); 2473 (void) VOP_INACTIVE(vp, td); 2474 VI_LOCK(vp); 2475 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 2476 ("process_deferred_inactive: lost VI_DOINGINACT")); 2477 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2478 ("process_deferred_inactive: got VI_OWEINACT")); 2479 vp->v_iflag &= ~VI_DOINGINACT; 2480 VI_UNLOCK(vp); 2481 VOP_UNLOCK(vp, 0); 2482 vdrop(vp); 2483 MNT_ILOCK(mp); 2484 } 2485 MNT_IUNLOCK(mp); 2486 vn_finished_secondary_write(mp); 2487 } 2488 2489 /* Try to free snapdata associated with devvp */ 2490 static void 2491 try_free_snapdata(struct vnode *devvp, 2492 struct thread *td) 2493 { 2494 struct snapdata *sn; 2495 ufs2_daddr_t *snapblklist; 2496 2497 sn = devvp->v_rdev->si_snapdata; 2498 2499 if (sn == NULL || TAILQ_FIRST(&sn->sn_head) != NULL || 2500 (devvp->v_vflag & VV_COPYONWRITE) == 0) { 2501 VI_UNLOCK(devvp); 2502 return; 2503 } 2504 2505 devvp->v_rdev->si_snapdata = NULL; 2506 devvp->v_vflag &= ~VV_COPYONWRITE; 2507 snapblklist = sn->sn_blklist; 2508 sn->sn_blklist = NULL; 2509 sn->sn_listsize = 0; 2510 lockmgr(&sn->sn_lock, LK_DRAIN|LK_INTERLOCK, VI_MTX(devvp)); 2511 lockmgr(&sn->sn_lock, LK_RELEASE, NULL); 2512 lockdestroy(&sn->sn_lock); 2513 free(sn, M_UFSMNT); 2514 if (snapblklist != NULL) 2515 FREE(snapblklist, M_UFSMNT); 2516 } 2517 #endif 2518