1 /*- 2 * Copyright 1998, 2000 Marshall Kirk McKusick. 3 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org> 4 * All rights reserved. 5 * 6 * The soft updates code is derived from the appendix of a University 7 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 8 * "Soft Updates: A Solution to the Metadata Update Problem in File 9 * Systems", CSE-TR-254-95, August 1995). 10 * 11 * Further information about soft updates can be obtained from: 12 * 13 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 14 * 1614 Oxford Street mckusick@mckusick.com 15 * Berkeley, CA 94709-1608 +1-510-843-9542 16 * USA 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 31 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 * 39 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 40 */ 41 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include "opt_ffs.h" 46 #include "opt_quota.h" 47 #include "opt_ddb.h" 48 49 /* 50 * For now we want the safety net that the DEBUG flag provides. 51 */ 52 #ifndef DEBUG 53 #define DEBUG 54 #endif 55 56 #include <sys/param.h> 57 #include <sys/kernel.h> 58 #include <sys/systm.h> 59 #include <sys/bio.h> 60 #include <sys/buf.h> 61 #include <sys/kdb.h> 62 #include <sys/kthread.h> 63 #include <sys/ktr.h> 64 #include <sys/limits.h> 65 #include <sys/lock.h> 66 #include <sys/malloc.h> 67 #include <sys/mount.h> 68 #include <sys/mutex.h> 69 #include <sys/namei.h> 70 #include <sys/priv.h> 71 #include <sys/proc.h> 72 #include <sys/stat.h> 73 #include <sys/sysctl.h> 74 #include <sys/syslog.h> 75 #include <sys/vnode.h> 76 #include <sys/conf.h> 77 78 #include <ufs/ufs/dir.h> 79 #include <ufs/ufs/extattr.h> 80 #include <ufs/ufs/quota.h> 81 #include <ufs/ufs/inode.h> 82 #include <ufs/ufs/ufsmount.h> 83 #include <ufs/ffs/fs.h> 84 #include <ufs/ffs/softdep.h> 85 #include <ufs/ffs/ffs_extern.h> 86 #include <ufs/ufs/ufs_extern.h> 87 88 #include <vm/vm.h> 89 #include <vm/vm_extern.h> 90 #include <vm/vm_object.h> 91 92 #include <geom/geom.h> 93 94 #include <ddb/ddb.h> 95 96 #define KTR_SUJ 0 /* Define to KTR_SPARE. */ 97 98 #ifndef SOFTUPDATES 99 100 int 101 softdep_flushfiles(oldmnt, flags, td) 102 struct mount *oldmnt; 103 int flags; 104 struct thread *td; 105 { 106 107 panic("softdep_flushfiles called"); 108 } 109 110 int 111 softdep_mount(devvp, mp, fs, cred) 112 struct vnode *devvp; 113 struct mount *mp; 114 struct fs *fs; 115 struct ucred *cred; 116 { 117 118 return (0); 119 } 120 121 void 122 softdep_initialize() 123 { 124 125 return; 126 } 127 128 void 129 softdep_uninitialize() 130 { 131 132 return; 133 } 134 135 void 136 softdep_unmount(mp) 137 struct mount *mp; 138 { 139 140 } 141 142 void 143 softdep_setup_sbupdate(ump, fs, bp) 144 struct ufsmount *ump; 145 struct fs *fs; 146 struct buf *bp; 147 { 148 } 149 150 void 151 softdep_setup_inomapdep(bp, ip, newinum, mode) 152 struct buf *bp; 153 struct inode *ip; 154 ino_t newinum; 155 int mode; 156 { 157 158 panic("softdep_setup_inomapdep called"); 159 } 160 161 void 162 softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) 163 struct buf *bp; 164 struct mount *mp; 165 ufs2_daddr_t newblkno; 166 int frags; 167 int oldfrags; 168 { 169 170 panic("softdep_setup_blkmapdep called"); 171 } 172 173 void 174 softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 175 struct inode *ip; 176 ufs_lbn_t lbn; 177 ufs2_daddr_t newblkno; 178 ufs2_daddr_t oldblkno; 179 long newsize; 180 long oldsize; 181 struct buf *bp; 182 { 183 184 panic("softdep_setup_allocdirect called"); 185 } 186 187 void 188 softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 189 struct inode *ip; 190 ufs_lbn_t lbn; 191 ufs2_daddr_t newblkno; 192 ufs2_daddr_t oldblkno; 193 long newsize; 194 long oldsize; 195 struct buf *bp; 196 { 197 198 panic("softdep_setup_allocext called"); 199 } 200 201 void 202 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 203 struct inode *ip; 204 ufs_lbn_t lbn; 205 struct buf *bp; 206 int ptrno; 207 ufs2_daddr_t newblkno; 208 ufs2_daddr_t oldblkno; 209 struct buf *nbp; 210 { 211 212 panic("softdep_setup_allocindir_page called"); 213 } 214 215 void 216 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 217 struct buf *nbp; 218 struct inode *ip; 219 struct buf *bp; 220 int ptrno; 221 ufs2_daddr_t newblkno; 222 { 223 224 panic("softdep_setup_allocindir_meta called"); 225 } 226 227 void 228 softdep_journal_freeblocks(ip, cred, length, flags) 229 struct inode *ip; 230 struct ucred *cred; 231 off_t length; 232 int flags; 233 { 234 235 panic("softdep_journal_freeblocks called"); 236 } 237 238 void 239 softdep_journal_fsync(ip) 240 struct inode *ip; 241 { 242 243 panic("softdep_journal_fsync called"); 244 } 245 246 void 247 softdep_setup_freeblocks(ip, length, flags) 248 struct inode *ip; 249 off_t length; 250 int flags; 251 { 252 253 panic("softdep_setup_freeblocks called"); 254 } 255 256 void 257 softdep_freefile(pvp, ino, mode) 258 struct vnode *pvp; 259 ino_t ino; 260 int mode; 261 { 262 263 panic("softdep_freefile called"); 264 } 265 266 int 267 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 268 struct buf *bp; 269 struct inode *dp; 270 off_t diroffset; 271 ino_t newinum; 272 struct buf *newdirbp; 273 int isnewblk; 274 { 275 276 panic("softdep_setup_directory_add called"); 277 } 278 279 void 280 softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) 281 struct buf *bp; 282 struct inode *dp; 283 caddr_t base; 284 caddr_t oldloc; 285 caddr_t newloc; 286 int entrysize; 287 { 288 289 panic("softdep_change_directoryentry_offset called"); 290 } 291 292 void 293 softdep_setup_remove(bp, dp, ip, isrmdir) 294 struct buf *bp; 295 struct inode *dp; 296 struct inode *ip; 297 int isrmdir; 298 { 299 300 panic("softdep_setup_remove called"); 301 } 302 303 void 304 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 305 struct buf *bp; 306 struct inode *dp; 307 struct inode *ip; 308 ino_t newinum; 309 int isrmdir; 310 { 311 312 panic("softdep_setup_directory_change called"); 313 } 314 315 void 316 softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) 317 struct mount *mp; 318 struct buf *bp; 319 ufs2_daddr_t blkno; 320 int frags; 321 struct workhead *wkhd; 322 { 323 324 panic("%s called", __FUNCTION__); 325 } 326 327 void 328 softdep_setup_inofree(mp, bp, ino, wkhd) 329 struct mount *mp; 330 struct buf *bp; 331 ino_t ino; 332 struct workhead *wkhd; 333 { 334 335 panic("%s called", __FUNCTION__); 336 } 337 338 void 339 softdep_setup_unlink(dp, ip) 340 struct inode *dp; 341 struct inode *ip; 342 { 343 344 panic("%s called", __FUNCTION__); 345 } 346 347 void 348 softdep_setup_link(dp, ip) 349 struct inode *dp; 350 struct inode *ip; 351 { 352 353 panic("%s called", __FUNCTION__); 354 } 355 356 void 357 softdep_revert_link(dp, ip) 358 struct inode *dp; 359 struct inode *ip; 360 { 361 362 panic("%s called", __FUNCTION__); 363 } 364 365 void 366 softdep_setup_rmdir(dp, ip) 367 struct inode *dp; 368 struct inode *ip; 369 { 370 371 panic("%s called", __FUNCTION__); 372 } 373 374 void 375 softdep_revert_rmdir(dp, ip) 376 struct inode *dp; 377 struct inode *ip; 378 { 379 380 panic("%s called", __FUNCTION__); 381 } 382 383 void 384 softdep_setup_create(dp, ip) 385 struct inode *dp; 386 struct inode *ip; 387 { 388 389 panic("%s called", __FUNCTION__); 390 } 391 392 void 393 softdep_revert_create(dp, ip) 394 struct inode *dp; 395 struct inode *ip; 396 { 397 398 panic("%s called", __FUNCTION__); 399 } 400 401 void 402 softdep_setup_mkdir(dp, ip) 403 struct inode *dp; 404 struct inode *ip; 405 { 406 407 panic("%s called", __FUNCTION__); 408 } 409 410 void 411 softdep_revert_mkdir(dp, ip) 412 struct inode *dp; 413 struct inode *ip; 414 { 415 416 panic("%s called", __FUNCTION__); 417 } 418 419 void 420 softdep_setup_dotdot_link(dp, ip) 421 struct inode *dp; 422 struct inode *ip; 423 { 424 425 panic("%s called", __FUNCTION__); 426 } 427 428 int 429 softdep_prealloc(vp, waitok) 430 struct vnode *vp; 431 int waitok; 432 { 433 434 panic("%s called", __FUNCTION__); 435 436 return (0); 437 } 438 439 int 440 softdep_journal_lookup(mp, vpp) 441 struct mount *mp; 442 struct vnode **vpp; 443 { 444 445 return (ENOENT); 446 } 447 448 void 449 softdep_change_linkcnt(ip) 450 struct inode *ip; 451 { 452 453 panic("softdep_change_linkcnt called"); 454 } 455 456 void 457 softdep_load_inodeblock(ip) 458 struct inode *ip; 459 { 460 461 panic("softdep_load_inodeblock called"); 462 } 463 464 void 465 softdep_update_inodeblock(ip, bp, waitfor) 466 struct inode *ip; 467 struct buf *bp; 468 int waitfor; 469 { 470 471 panic("softdep_update_inodeblock called"); 472 } 473 474 int 475 softdep_fsync(vp) 476 struct vnode *vp; /* the "in_core" copy of the inode */ 477 { 478 479 return (0); 480 } 481 482 void 483 softdep_fsync_mountdev(vp) 484 struct vnode *vp; 485 { 486 487 return; 488 } 489 490 int 491 softdep_flushworklist(oldmnt, countp, td) 492 struct mount *oldmnt; 493 int *countp; 494 struct thread *td; 495 { 496 497 *countp = 0; 498 return (0); 499 } 500 501 int 502 softdep_sync_metadata(struct vnode *vp) 503 { 504 505 return (0); 506 } 507 508 int 509 softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) 510 { 511 512 return (0); 513 } 514 515 int 516 softdep_slowdown(vp) 517 struct vnode *vp; 518 { 519 520 panic("softdep_slowdown called"); 521 } 522 523 void 524 softdep_releasefile(ip) 525 struct inode *ip; /* inode with the zero effective link count */ 526 { 527 528 panic("softdep_releasefile called"); 529 } 530 531 int 532 softdep_request_cleanup(fs, vp, cred, resource) 533 struct fs *fs; 534 struct vnode *vp; 535 struct ucred *cred; 536 int resource; 537 { 538 539 return (0); 540 } 541 542 int 543 softdep_check_suspend(struct mount *mp, 544 struct vnode *devvp, 545 int softdep_deps, 546 int softdep_accdeps, 547 int secondary_writes, 548 int secondary_accwrites) 549 { 550 struct bufobj *bo; 551 int error; 552 553 (void) softdep_deps, 554 (void) softdep_accdeps; 555 556 bo = &devvp->v_bufobj; 557 ASSERT_BO_LOCKED(bo); 558 559 MNT_ILOCK(mp); 560 while (mp->mnt_secondary_writes != 0) { 561 BO_UNLOCK(bo); 562 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), 563 (PUSER - 1) | PDROP, "secwr", 0); 564 BO_LOCK(bo); 565 MNT_ILOCK(mp); 566 } 567 568 /* 569 * Reasons for needing more work before suspend: 570 * - Dirty buffers on devvp. 571 * - Secondary writes occurred after start of vnode sync loop 572 */ 573 error = 0; 574 if (bo->bo_numoutput > 0 || 575 bo->bo_dirty.bv_cnt > 0 || 576 secondary_writes != 0 || 577 mp->mnt_secondary_writes != 0 || 578 secondary_accwrites != mp->mnt_secondary_accwrites) 579 error = EAGAIN; 580 BO_UNLOCK(bo); 581 return (error); 582 } 583 584 void 585 softdep_get_depcounts(struct mount *mp, 586 int *softdepactivep, 587 int *softdepactiveaccp) 588 { 589 (void) mp; 590 *softdepactivep = 0; 591 *softdepactiveaccp = 0; 592 } 593 594 void 595 softdep_buf_append(bp, wkhd) 596 struct buf *bp; 597 struct workhead *wkhd; 598 { 599 600 panic("softdep_buf_appendwork called"); 601 } 602 603 void 604 softdep_inode_append(ip, cred, wkhd) 605 struct inode *ip; 606 struct ucred *cred; 607 struct workhead *wkhd; 608 { 609 610 panic("softdep_inode_appendwork called"); 611 } 612 613 void 614 softdep_freework(wkhd) 615 struct workhead *wkhd; 616 { 617 618 panic("softdep_freework called"); 619 } 620 621 #else 622 623 FEATURE(softupdates, "FFS soft-updates support"); 624 625 /* 626 * These definitions need to be adapted to the system to which 627 * this file is being ported. 628 */ 629 630 #define M_SOFTDEP_FLAGS (M_WAITOK) 631 632 #define D_PAGEDEP 0 633 #define D_INODEDEP 1 634 #define D_BMSAFEMAP 2 635 #define D_NEWBLK 3 636 #define D_ALLOCDIRECT 4 637 #define D_INDIRDEP 5 638 #define D_ALLOCINDIR 6 639 #define D_FREEFRAG 7 640 #define D_FREEBLKS 8 641 #define D_FREEFILE 9 642 #define D_DIRADD 10 643 #define D_MKDIR 11 644 #define D_DIRREM 12 645 #define D_NEWDIRBLK 13 646 #define D_FREEWORK 14 647 #define D_FREEDEP 15 648 #define D_JADDREF 16 649 #define D_JREMREF 17 650 #define D_JMVREF 18 651 #define D_JNEWBLK 19 652 #define D_JFREEBLK 20 653 #define D_JFREEFRAG 21 654 #define D_JSEG 22 655 #define D_JSEGDEP 23 656 #define D_SBDEP 24 657 #define D_JTRUNC 25 658 #define D_JFSYNC 26 659 #define D_SENTINAL 27 660 #define D_LAST D_SENTINAL 661 662 unsigned long dep_current[D_LAST + 1]; 663 unsigned long dep_total[D_LAST + 1]; 664 unsigned long dep_write[D_LAST + 1]; 665 666 667 static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0, 668 "soft updates stats"); 669 static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0, 670 "total dependencies allocated"); 671 static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0, 672 "current dependencies allocated"); 673 static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0, 674 "current dependencies written"); 675 676 #define SOFTDEP_TYPE(type, str, long) \ 677 static MALLOC_DEFINE(M_ ## type, #str, long); \ 678 SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD, \ 679 &dep_total[D_ ## type], 0, ""); \ 680 SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, \ 681 &dep_current[D_ ## type], 0, ""); \ 682 SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, \ 683 &dep_write[D_ ## type], 0, ""); 684 685 SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies"); 686 SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies"); 687 SOFTDEP_TYPE(BMSAFEMAP, bmsafemap, 688 "Block or frag allocated from cyl group map"); 689 SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency"); 690 SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode"); 691 SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies"); 692 SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block"); 693 SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode"); 694 SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode"); 695 SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated"); 696 SOFTDEP_TYPE(DIRADD, diradd, "New directory entry"); 697 SOFTDEP_TYPE(MKDIR, mkdir, "New directory"); 698 SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted"); 699 SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block"); 700 SOFTDEP_TYPE(FREEWORK, freework, "free an inode block"); 701 SOFTDEP_TYPE(FREEDEP, freedep, "track a block free"); 702 SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add"); 703 SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove"); 704 SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move"); 705 SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block"); 706 SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block"); 707 SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag"); 708 SOFTDEP_TYPE(JSEG, jseg, "Journal segment"); 709 SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete"); 710 SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency"); 711 SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation"); 712 SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete"); 713 714 static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes"); 715 static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations"); 716 717 /* 718 * translate from workitem type to memory type 719 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 720 */ 721 static struct malloc_type *memtype[] = { 722 M_PAGEDEP, 723 M_INODEDEP, 724 M_BMSAFEMAP, 725 M_NEWBLK, 726 M_ALLOCDIRECT, 727 M_INDIRDEP, 728 M_ALLOCINDIR, 729 M_FREEFRAG, 730 M_FREEBLKS, 731 M_FREEFILE, 732 M_DIRADD, 733 M_MKDIR, 734 M_DIRREM, 735 M_NEWDIRBLK, 736 M_FREEWORK, 737 M_FREEDEP, 738 M_JADDREF, 739 M_JREMREF, 740 M_JMVREF, 741 M_JNEWBLK, 742 M_JFREEBLK, 743 M_JFREEFRAG, 744 M_JSEG, 745 M_JSEGDEP, 746 M_SBDEP, 747 M_JTRUNC, 748 M_JFSYNC 749 }; 750 751 static LIST_HEAD(mkdirlist, mkdir) mkdirlisthd; 752 753 #define DtoM(type) (memtype[type]) 754 755 /* 756 * Names of malloc types. 757 */ 758 #define TYPENAME(type) \ 759 ((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???") 760 /* 761 * End system adaptation definitions. 762 */ 763 764 #define DOTDOT_OFFSET offsetof(struct dirtemplate, dotdot_ino) 765 #define DOT_OFFSET offsetof(struct dirtemplate, dot_ino) 766 767 /* 768 * Forward declarations. 769 */ 770 struct inodedep_hashhead; 771 struct newblk_hashhead; 772 struct pagedep_hashhead; 773 struct bmsafemap_hashhead; 774 775 /* 776 * Private journaling structures. 777 */ 778 struct jblocks { 779 struct jseglst jb_segs; /* TAILQ of current segments. */ 780 struct jseg *jb_writeseg; /* Next write to complete. */ 781 struct jseg *jb_oldestseg; /* Oldest segment with valid entries. */ 782 struct jextent *jb_extent; /* Extent array. */ 783 uint64_t jb_nextseq; /* Next sequence number. */ 784 uint64_t jb_oldestwrseq; /* Oldest written sequence number. */ 785 uint8_t jb_needseg; /* Need a forced segment. */ 786 uint8_t jb_suspended; /* Did journal suspend writes? */ 787 int jb_avail; /* Available extents. */ 788 int jb_used; /* Last used extent. */ 789 int jb_head; /* Allocator head. */ 790 int jb_off; /* Allocator extent offset. */ 791 int jb_blocks; /* Total disk blocks covered. */ 792 int jb_free; /* Total disk blocks free. */ 793 int jb_min; /* Minimum free space. */ 794 int jb_low; /* Low on space. */ 795 int jb_age; /* Insertion time of oldest rec. */ 796 }; 797 798 struct jextent { 799 ufs2_daddr_t je_daddr; /* Disk block address. */ 800 int je_blocks; /* Disk block count. */ 801 }; 802 803 /* 804 * Internal function prototypes. 805 */ 806 static void softdep_error(char *, int); 807 static void drain_output(struct vnode *); 808 static struct buf *getdirtybuf(struct buf *, struct mtx *, int); 809 static void clear_remove(void); 810 static void clear_inodedeps(void); 811 static void unlinked_inodedep(struct mount *, struct inodedep *); 812 static void clear_unlinked_inodedep(struct inodedep *); 813 static struct inodedep *first_unlinked_inodedep(struct ufsmount *); 814 static int flush_pagedep_deps(struct vnode *, struct mount *, 815 struct diraddhd *); 816 static int free_pagedep(struct pagedep *); 817 static int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t); 818 static int flush_inodedep_deps(struct vnode *, struct mount *, ino_t); 819 static int flush_deplist(struct allocdirectlst *, int, int *); 820 static int sync_cgs(struct mount *, int); 821 static int handle_written_filepage(struct pagedep *, struct buf *); 822 static int handle_written_sbdep(struct sbdep *, struct buf *); 823 static void initiate_write_sbdep(struct sbdep *); 824 static void diradd_inode_written(struct diradd *, struct inodedep *); 825 static int handle_written_indirdep(struct indirdep *, struct buf *, 826 struct buf**); 827 static int handle_written_inodeblock(struct inodedep *, struct buf *); 828 static int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *, 829 uint8_t *); 830 static int handle_written_bmsafemap(struct bmsafemap *, struct buf *); 831 static void handle_written_jaddref(struct jaddref *); 832 static void handle_written_jremref(struct jremref *); 833 static void handle_written_jseg(struct jseg *, struct buf *); 834 static void handle_written_jnewblk(struct jnewblk *); 835 static void handle_written_jblkdep(struct jblkdep *); 836 static void handle_written_jfreefrag(struct jfreefrag *); 837 static void complete_jseg(struct jseg *); 838 static void complete_jsegs(struct jseg *); 839 static void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *); 840 static void jaddref_write(struct jaddref *, struct jseg *, uint8_t *); 841 static void jremref_write(struct jremref *, struct jseg *, uint8_t *); 842 static void jmvref_write(struct jmvref *, struct jseg *, uint8_t *); 843 static void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *); 844 static void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data); 845 static void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *); 846 static void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *); 847 static void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *); 848 static inline void inoref_write(struct inoref *, struct jseg *, 849 struct jrefrec *); 850 static void handle_allocdirect_partdone(struct allocdirect *, 851 struct workhead *); 852 static struct jnewblk *cancel_newblk(struct newblk *, struct worklist *, 853 struct workhead *); 854 static void indirdep_complete(struct indirdep *); 855 static int indirblk_lookup(struct mount *, ufs2_daddr_t); 856 static void indirblk_insert(struct freework *); 857 static void indirblk_remove(struct freework *); 858 static void handle_allocindir_partdone(struct allocindir *); 859 static void initiate_write_filepage(struct pagedep *, struct buf *); 860 static void initiate_write_indirdep(struct indirdep*, struct buf *); 861 static void handle_written_mkdir(struct mkdir *, int); 862 static int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *, 863 uint8_t *); 864 static void initiate_write_bmsafemap(struct bmsafemap *, struct buf *); 865 static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *); 866 static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *); 867 static void handle_workitem_freefile(struct freefile *); 868 static int handle_workitem_remove(struct dirrem *, int); 869 static struct dirrem *newdirrem(struct buf *, struct inode *, 870 struct inode *, int, struct dirrem **); 871 static struct indirdep *indirdep_lookup(struct mount *, struct inode *, 872 struct buf *); 873 static void cancel_indirdep(struct indirdep *, struct buf *, 874 struct freeblks *); 875 static void free_indirdep(struct indirdep *); 876 static void free_diradd(struct diradd *, struct workhead *); 877 static void merge_diradd(struct inodedep *, struct diradd *); 878 static void complete_diradd(struct diradd *); 879 static struct diradd *diradd_lookup(struct pagedep *, int); 880 static struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *, 881 struct jremref *); 882 static struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *, 883 struct jremref *); 884 static void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *, 885 struct jremref *, struct jremref *); 886 static void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *, 887 struct jremref *); 888 static void cancel_allocindir(struct allocindir *, struct buf *bp, 889 struct freeblks *, int); 890 static int setup_trunc_indir(struct freeblks *, struct inode *, 891 ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t); 892 static void complete_trunc_indir(struct freework *); 893 static void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *, 894 int); 895 static void complete_mkdir(struct mkdir *); 896 static void free_newdirblk(struct newdirblk *); 897 static void free_jremref(struct jremref *); 898 static void free_jaddref(struct jaddref *); 899 static void free_jsegdep(struct jsegdep *); 900 static void free_jsegs(struct jblocks *); 901 static void rele_jseg(struct jseg *); 902 static void free_jseg(struct jseg *, struct jblocks *); 903 static void free_jnewblk(struct jnewblk *); 904 static void free_jblkdep(struct jblkdep *); 905 static void free_jfreefrag(struct jfreefrag *); 906 static void free_freedep(struct freedep *); 907 static void journal_jremref(struct dirrem *, struct jremref *, 908 struct inodedep *); 909 static void cancel_jnewblk(struct jnewblk *, struct workhead *); 910 static int cancel_jaddref(struct jaddref *, struct inodedep *, 911 struct workhead *); 912 static void cancel_jfreefrag(struct jfreefrag *); 913 static inline void setup_freedirect(struct freeblks *, struct inode *, 914 int, int); 915 static inline void setup_freeext(struct freeblks *, struct inode *, int, int); 916 static inline void setup_freeindir(struct freeblks *, struct inode *, int, 917 ufs_lbn_t, int); 918 static inline struct freeblks *newfreeblks(struct mount *, struct inode *); 919 static void freeblks_free(struct ufsmount *, struct freeblks *, int); 920 static void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t); 921 ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t); 922 static int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int); 923 static void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t, 924 int, int); 925 static void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int); 926 static int cancel_pagedep(struct pagedep *, struct freeblks *, int); 927 static int deallocate_dependencies(struct buf *, struct freeblks *, int); 928 static void newblk_freefrag(struct newblk*); 929 static void free_newblk(struct newblk *); 930 static void cancel_allocdirect(struct allocdirectlst *, 931 struct allocdirect *, struct freeblks *); 932 static int check_inode_unwritten(struct inodedep *); 933 static int free_inodedep(struct inodedep *); 934 static void freework_freeblock(struct freework *); 935 static void freework_enqueue(struct freework *); 936 static int handle_workitem_freeblocks(struct freeblks *, int); 937 static int handle_complete_freeblocks(struct freeblks *, int); 938 static void handle_workitem_indirblk(struct freework *); 939 static void handle_written_freework(struct freework *); 940 static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *); 941 static struct worklist *jnewblk_merge(struct worklist *, struct worklist *, 942 struct workhead *); 943 static struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *, 944 struct inodedep *, struct allocindir *, ufs_lbn_t); 945 static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t, 946 ufs2_daddr_t, ufs_lbn_t); 947 static void handle_workitem_freefrag(struct freefrag *); 948 static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long, 949 ufs_lbn_t); 950 static void allocdirect_merge(struct allocdirectlst *, 951 struct allocdirect *, struct allocdirect *); 952 static struct freefrag *allocindir_merge(struct allocindir *, 953 struct allocindir *); 954 static int bmsafemap_find(struct bmsafemap_hashhead *, struct mount *, int, 955 struct bmsafemap **); 956 static struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *, 957 int cg, struct bmsafemap *); 958 static int newblk_find(struct newblk_hashhead *, struct mount *, ufs2_daddr_t, 959 int, struct newblk **); 960 static int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **); 961 static int inodedep_find(struct inodedep_hashhead *, struct fs *, ino_t, 962 struct inodedep **); 963 static int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **); 964 static int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t, 965 int, struct pagedep **); 966 static int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t, 967 struct mount *mp, int, struct pagedep **); 968 static void pause_timer(void *); 969 static int request_cleanup(struct mount *, int); 970 static int process_worklist_item(struct mount *, int, int); 971 static void process_removes(struct vnode *); 972 static void process_truncates(struct vnode *); 973 static void jwork_move(struct workhead *, struct workhead *); 974 static void jwork_insert(struct workhead *, struct jsegdep *); 975 static void add_to_worklist(struct worklist *, int); 976 static void wake_worklist(struct worklist *); 977 static void wait_worklist(struct worklist *, char *); 978 static void remove_from_worklist(struct worklist *); 979 static void softdep_flush(void); 980 static void softdep_flushjournal(struct mount *); 981 static int softdep_speedup(void); 982 static void worklist_speedup(void); 983 static int journal_mount(struct mount *, struct fs *, struct ucred *); 984 static void journal_unmount(struct mount *); 985 static int journal_space(struct ufsmount *, int); 986 static void journal_suspend(struct ufsmount *); 987 static int journal_unsuspend(struct ufsmount *ump); 988 static void softdep_prelink(struct vnode *, struct vnode *); 989 static void add_to_journal(struct worklist *); 990 static void remove_from_journal(struct worklist *); 991 static void softdep_process_journal(struct mount *, struct worklist *, int); 992 static struct jremref *newjremref(struct dirrem *, struct inode *, 993 struct inode *ip, off_t, nlink_t); 994 static struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t, 995 uint16_t); 996 static inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t, 997 uint16_t); 998 static inline struct jsegdep *inoref_jseg(struct inoref *); 999 static struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t); 1000 static struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t, 1001 ufs2_daddr_t, int); 1002 static struct jtrunc *newjtrunc(struct freeblks *, off_t, int); 1003 static void move_newblock_dep(struct jaddref *, struct inodedep *); 1004 static void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t); 1005 static struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *, 1006 ufs2_daddr_t, long, ufs_lbn_t); 1007 static struct freework *newfreework(struct ufsmount *, struct freeblks *, 1008 struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int); 1009 static int jwait(struct worklist *, int); 1010 static struct inodedep *inodedep_lookup_ip(struct inode *); 1011 static int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *); 1012 static struct freefile *handle_bufwait(struct inodedep *, struct workhead *); 1013 static void handle_jwork(struct workhead *); 1014 static struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *, 1015 struct mkdir **); 1016 static struct jblocks *jblocks_create(void); 1017 static ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *); 1018 static void jblocks_free(struct jblocks *, struct mount *, int); 1019 static void jblocks_destroy(struct jblocks *); 1020 static void jblocks_add(struct jblocks *, ufs2_daddr_t, int); 1021 1022 /* 1023 * Exported softdep operations. 1024 */ 1025 static void softdep_disk_io_initiation(struct buf *); 1026 static void softdep_disk_write_complete(struct buf *); 1027 static void softdep_deallocate_dependencies(struct buf *); 1028 static int softdep_count_dependencies(struct buf *bp, int); 1029 1030 static struct mtx lk; 1031 MTX_SYSINIT(softdep_lock, &lk, "Softdep Lock", MTX_DEF); 1032 1033 #define TRY_ACQUIRE_LOCK(lk) mtx_trylock(lk) 1034 #define ACQUIRE_LOCK(lk) mtx_lock(lk) 1035 #define FREE_LOCK(lk) mtx_unlock(lk) 1036 1037 #define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock) 1038 #define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock) 1039 1040 /* 1041 * Worklist queue management. 1042 * These routines require that the lock be held. 1043 */ 1044 #ifndef /* NOT */ DEBUG 1045 #define WORKLIST_INSERT(head, item) do { \ 1046 (item)->wk_state |= ONWORKLIST; \ 1047 LIST_INSERT_HEAD(head, item, wk_list); \ 1048 } while (0) 1049 #define WORKLIST_REMOVE(item) do { \ 1050 (item)->wk_state &= ~ONWORKLIST; \ 1051 LIST_REMOVE(item, wk_list); \ 1052 } while (0) 1053 #define WORKLIST_INSERT_UNLOCKED WORKLIST_INSERT 1054 #define WORKLIST_REMOVE_UNLOCKED WORKLIST_REMOVE 1055 1056 #else /* DEBUG */ 1057 static void worklist_insert(struct workhead *, struct worklist *, int); 1058 static void worklist_remove(struct worklist *, int); 1059 1060 #define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1) 1061 #define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0) 1062 #define WORKLIST_REMOVE(item) worklist_remove(item, 1) 1063 #define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0) 1064 1065 static void 1066 worklist_insert(head, item, locked) 1067 struct workhead *head; 1068 struct worklist *item; 1069 int locked; 1070 { 1071 1072 if (locked) 1073 mtx_assert(&lk, MA_OWNED); 1074 if (item->wk_state & ONWORKLIST) 1075 panic("worklist_insert: %p %s(0x%X) already on list", 1076 item, TYPENAME(item->wk_type), item->wk_state); 1077 item->wk_state |= ONWORKLIST; 1078 LIST_INSERT_HEAD(head, item, wk_list); 1079 } 1080 1081 static void 1082 worklist_remove(item, locked) 1083 struct worklist *item; 1084 int locked; 1085 { 1086 1087 if (locked) 1088 mtx_assert(&lk, MA_OWNED); 1089 if ((item->wk_state & ONWORKLIST) == 0) 1090 panic("worklist_remove: %p %s(0x%X) not on list", 1091 item, TYPENAME(item->wk_type), item->wk_state); 1092 item->wk_state &= ~ONWORKLIST; 1093 LIST_REMOVE(item, wk_list); 1094 } 1095 #endif /* DEBUG */ 1096 1097 /* 1098 * Merge two jsegdeps keeping only the oldest one as newer references 1099 * can't be discarded until after older references. 1100 */ 1101 static inline struct jsegdep * 1102 jsegdep_merge(struct jsegdep *one, struct jsegdep *two) 1103 { 1104 struct jsegdep *swp; 1105 1106 if (two == NULL) 1107 return (one); 1108 1109 if (one->jd_seg->js_seq > two->jd_seg->js_seq) { 1110 swp = one; 1111 one = two; 1112 two = swp; 1113 } 1114 WORKLIST_REMOVE(&two->jd_list); 1115 free_jsegdep(two); 1116 1117 return (one); 1118 } 1119 1120 /* 1121 * If two freedeps are compatible free one to reduce list size. 1122 */ 1123 static inline struct freedep * 1124 freedep_merge(struct freedep *one, struct freedep *two) 1125 { 1126 if (two == NULL) 1127 return (one); 1128 1129 if (one->fd_freework == two->fd_freework) { 1130 WORKLIST_REMOVE(&two->fd_list); 1131 free_freedep(two); 1132 } 1133 return (one); 1134 } 1135 1136 /* 1137 * Move journal work from one list to another. Duplicate freedeps and 1138 * jsegdeps are coalesced to keep the lists as small as possible. 1139 */ 1140 static void 1141 jwork_move(dst, src) 1142 struct workhead *dst; 1143 struct workhead *src; 1144 { 1145 struct freedep *freedep; 1146 struct jsegdep *jsegdep; 1147 struct worklist *wkn; 1148 struct worklist *wk; 1149 1150 KASSERT(dst != src, 1151 ("jwork_move: dst == src")); 1152 freedep = NULL; 1153 jsegdep = NULL; 1154 LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) { 1155 if (wk->wk_type == D_JSEGDEP) 1156 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); 1157 if (wk->wk_type == D_FREEDEP) 1158 freedep = freedep_merge(WK_FREEDEP(wk), freedep); 1159 } 1160 1161 mtx_assert(&lk, MA_OWNED); 1162 while ((wk = LIST_FIRST(src)) != NULL) { 1163 WORKLIST_REMOVE(wk); 1164 WORKLIST_INSERT(dst, wk); 1165 if (wk->wk_type == D_JSEGDEP) { 1166 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); 1167 continue; 1168 } 1169 if (wk->wk_type == D_FREEDEP) 1170 freedep = freedep_merge(WK_FREEDEP(wk), freedep); 1171 } 1172 } 1173 1174 static void 1175 jwork_insert(dst, jsegdep) 1176 struct workhead *dst; 1177 struct jsegdep *jsegdep; 1178 { 1179 struct jsegdep *jsegdepn; 1180 struct worklist *wk; 1181 1182 LIST_FOREACH(wk, dst, wk_list) 1183 if (wk->wk_type == D_JSEGDEP) 1184 break; 1185 if (wk == NULL) { 1186 WORKLIST_INSERT(dst, &jsegdep->jd_list); 1187 return; 1188 } 1189 jsegdepn = WK_JSEGDEP(wk); 1190 if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) { 1191 WORKLIST_REMOVE(wk); 1192 free_jsegdep(jsegdepn); 1193 WORKLIST_INSERT(dst, &jsegdep->jd_list); 1194 } else 1195 free_jsegdep(jsegdep); 1196 } 1197 1198 /* 1199 * Routines for tracking and managing workitems. 1200 */ 1201 static void workitem_free(struct worklist *, int); 1202 static void workitem_alloc(struct worklist *, int, struct mount *); 1203 1204 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)(item), (type)) 1205 1206 static void 1207 workitem_free(item, type) 1208 struct worklist *item; 1209 int type; 1210 { 1211 struct ufsmount *ump; 1212 mtx_assert(&lk, MA_OWNED); 1213 1214 #ifdef DEBUG 1215 if (item->wk_state & ONWORKLIST) 1216 panic("workitem_free: %s(0x%X) still on list", 1217 TYPENAME(item->wk_type), item->wk_state); 1218 if (item->wk_type != type) 1219 panic("workitem_free: type mismatch %s != %s", 1220 TYPENAME(item->wk_type), TYPENAME(type)); 1221 #endif 1222 if (item->wk_state & IOWAITING) 1223 wakeup(item); 1224 ump = VFSTOUFS(item->wk_mp); 1225 if (--ump->softdep_deps == 0 && ump->softdep_req) 1226 wakeup(&ump->softdep_deps); 1227 dep_current[type]--; 1228 free(item, DtoM(type)); 1229 } 1230 1231 static void 1232 workitem_alloc(item, type, mp) 1233 struct worklist *item; 1234 int type; 1235 struct mount *mp; 1236 { 1237 struct ufsmount *ump; 1238 1239 item->wk_type = type; 1240 item->wk_mp = mp; 1241 item->wk_state = 0; 1242 1243 ump = VFSTOUFS(mp); 1244 ACQUIRE_LOCK(&lk); 1245 dep_current[type]++; 1246 dep_total[type]++; 1247 ump->softdep_deps++; 1248 ump->softdep_accdeps++; 1249 FREE_LOCK(&lk); 1250 } 1251 1252 /* 1253 * Workitem queue management 1254 */ 1255 static int max_softdeps; /* maximum number of structs before slowdown */ 1256 static int maxindirdeps = 50; /* max number of indirdeps before slowdown */ 1257 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 1258 static int proc_waiting; /* tracks whether we have a timeout posted */ 1259 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 1260 static struct callout softdep_callout; 1261 static int req_pending; 1262 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 1263 static int req_clear_remove; /* syncer process flush some freeblks */ 1264 static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */ 1265 1266 /* 1267 * runtime statistics 1268 */ 1269 static int stat_worklist_push; /* number of worklist cleanups */ 1270 static int stat_blk_limit_push; /* number of times block limit neared */ 1271 static int stat_ino_limit_push; /* number of times inode limit neared */ 1272 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 1273 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 1274 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 1275 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 1276 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 1277 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 1278 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 1279 static int stat_jaddref; /* bufs redirtied as ino bitmap can not write */ 1280 static int stat_jnewblk; /* bufs redirtied as blk bitmap can not write */ 1281 static int stat_journal_min; /* Times hit journal min threshold */ 1282 static int stat_journal_low; /* Times hit journal low threshold */ 1283 static int stat_journal_wait; /* Times blocked in jwait(). */ 1284 static int stat_jwait_filepage; /* Times blocked in jwait() for filepage. */ 1285 static int stat_jwait_freeblks; /* Times blocked in jwait() for freeblks. */ 1286 static int stat_jwait_inode; /* Times blocked in jwait() for inodes. */ 1287 static int stat_jwait_newblk; /* Times blocked in jwait() for newblks. */ 1288 static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */ 1289 static int stat_cleanup_blkrequests; /* Number of block cleanup requests */ 1290 static int stat_cleanup_inorequests; /* Number of inode cleanup requests */ 1291 static int stat_cleanup_retries; /* Number of cleanups that needed to flush */ 1292 static int stat_cleanup_failures; /* Number of cleanup requests that failed */ 1293 1294 SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW, 1295 &max_softdeps, 0, ""); 1296 SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW, 1297 &tickdelay, 0, ""); 1298 SYSCTL_INT(_debug_softdep, OID_AUTO, maxindirdeps, CTLFLAG_RW, 1299 &maxindirdeps, 0, ""); 1300 SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW, 1301 &stat_worklist_push, 0,""); 1302 SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW, 1303 &stat_blk_limit_push, 0,""); 1304 SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW, 1305 &stat_ino_limit_push, 0,""); 1306 SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW, 1307 &stat_blk_limit_hit, 0, ""); 1308 SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW, 1309 &stat_ino_limit_hit, 0, ""); 1310 SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW, 1311 &stat_sync_limit_hit, 0, ""); 1312 SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, 1313 &stat_indir_blk_ptrs, 0, ""); 1314 SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW, 1315 &stat_inode_bitmap, 0, ""); 1316 SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, 1317 &stat_direct_blk_ptrs, 0, ""); 1318 SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW, 1319 &stat_dir_entry, 0, ""); 1320 SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW, 1321 &stat_jaddref, 0, ""); 1322 SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW, 1323 &stat_jnewblk, 0, ""); 1324 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW, 1325 &stat_journal_low, 0, ""); 1326 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW, 1327 &stat_journal_min, 0, ""); 1328 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW, 1329 &stat_journal_wait, 0, ""); 1330 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW, 1331 &stat_jwait_filepage, 0, ""); 1332 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW, 1333 &stat_jwait_freeblks, 0, ""); 1334 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW, 1335 &stat_jwait_inode, 0, ""); 1336 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW, 1337 &stat_jwait_newblk, 0, ""); 1338 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW, 1339 &stat_cleanup_blkrequests, 0, ""); 1340 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW, 1341 &stat_cleanup_inorequests, 0, ""); 1342 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW, 1343 &stat_cleanup_high_delay, 0, ""); 1344 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW, 1345 &stat_cleanup_retries, 0, ""); 1346 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW, 1347 &stat_cleanup_failures, 0, ""); 1348 SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW, 1349 &softdep_flushcache, 0, ""); 1350 1351 SYSCTL_DECL(_vfs_ffs); 1352 1353 LIST_HEAD(bmsafemap_hashhead, bmsafemap) *bmsafemap_hashtbl; 1354 static u_long bmsafemap_hash; /* size of hash table - 1 */ 1355 1356 static int compute_summary_at_mount = 0; /* Whether to recompute the summary at mount time */ 1357 SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW, 1358 &compute_summary_at_mount, 0, "Recompute summary at mount"); 1359 1360 static struct proc *softdepproc; 1361 static struct kproc_desc softdep_kp = { 1362 "softdepflush", 1363 softdep_flush, 1364 &softdepproc 1365 }; 1366 SYSINIT(sdproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start, 1367 &softdep_kp); 1368 1369 static void 1370 softdep_flush(void) 1371 { 1372 struct mount *nmp; 1373 struct mount *mp; 1374 struct ufsmount *ump; 1375 struct thread *td; 1376 int remaining; 1377 int progress; 1378 1379 td = curthread; 1380 td->td_pflags |= TDP_NORUNNINGBUF; 1381 1382 for (;;) { 1383 kproc_suspend_check(softdepproc); 1384 ACQUIRE_LOCK(&lk); 1385 /* 1386 * If requested, try removing inode or removal dependencies. 1387 */ 1388 if (req_clear_inodedeps) { 1389 clear_inodedeps(); 1390 req_clear_inodedeps -= 1; 1391 wakeup_one(&proc_waiting); 1392 } 1393 if (req_clear_remove) { 1394 clear_remove(); 1395 req_clear_remove -= 1; 1396 wakeup_one(&proc_waiting); 1397 } 1398 FREE_LOCK(&lk); 1399 remaining = progress = 0; 1400 mtx_lock(&mountlist_mtx); 1401 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1402 nmp = TAILQ_NEXT(mp, mnt_list); 1403 if (MOUNTEDSOFTDEP(mp) == 0) 1404 continue; 1405 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 1406 continue; 1407 progress += softdep_process_worklist(mp, 0); 1408 ump = VFSTOUFS(mp); 1409 remaining += ump->softdep_on_worklist; 1410 mtx_lock(&mountlist_mtx); 1411 nmp = TAILQ_NEXT(mp, mnt_list); 1412 vfs_unbusy(mp); 1413 } 1414 mtx_unlock(&mountlist_mtx); 1415 if (remaining && progress) 1416 continue; 1417 ACQUIRE_LOCK(&lk); 1418 if (!req_pending) 1419 msleep(&req_pending, &lk, PVM, "sdflush", hz); 1420 req_pending = 0; 1421 FREE_LOCK(&lk); 1422 } 1423 } 1424 1425 static void 1426 worklist_speedup(void) 1427 { 1428 mtx_assert(&lk, MA_OWNED); 1429 if (req_pending == 0) { 1430 req_pending = 1; 1431 wakeup(&req_pending); 1432 } 1433 } 1434 1435 static int 1436 softdep_speedup(void) 1437 { 1438 1439 worklist_speedup(); 1440 bd_speedup(); 1441 return speedup_syncer(); 1442 } 1443 1444 /* 1445 * Add an item to the end of the work queue. 1446 * This routine requires that the lock be held. 1447 * This is the only routine that adds items to the list. 1448 * The following routine is the only one that removes items 1449 * and does so in order from first to last. 1450 */ 1451 1452 #define WK_HEAD 0x0001 /* Add to HEAD. */ 1453 #define WK_NODELAY 0x0002 /* Process immediately. */ 1454 1455 static void 1456 add_to_worklist(wk, flags) 1457 struct worklist *wk; 1458 int flags; 1459 { 1460 struct ufsmount *ump; 1461 1462 mtx_assert(&lk, MA_OWNED); 1463 ump = VFSTOUFS(wk->wk_mp); 1464 if (wk->wk_state & ONWORKLIST) 1465 panic("add_to_worklist: %s(0x%X) already on list", 1466 TYPENAME(wk->wk_type), wk->wk_state); 1467 wk->wk_state |= ONWORKLIST; 1468 if (ump->softdep_on_worklist == 0) { 1469 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); 1470 ump->softdep_worklist_tail = wk; 1471 } else if (flags & WK_HEAD) { 1472 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); 1473 } else { 1474 LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list); 1475 ump->softdep_worklist_tail = wk; 1476 } 1477 ump->softdep_on_worklist += 1; 1478 if (flags & WK_NODELAY) 1479 worklist_speedup(); 1480 } 1481 1482 /* 1483 * Remove the item to be processed. If we are removing the last 1484 * item on the list, we need to recalculate the tail pointer. 1485 */ 1486 static void 1487 remove_from_worklist(wk) 1488 struct worklist *wk; 1489 { 1490 struct ufsmount *ump; 1491 1492 ump = VFSTOUFS(wk->wk_mp); 1493 WORKLIST_REMOVE(wk); 1494 if (ump->softdep_worklist_tail == wk) 1495 ump->softdep_worklist_tail = 1496 (struct worklist *)wk->wk_list.le_prev; 1497 ump->softdep_on_worklist -= 1; 1498 } 1499 1500 static void 1501 wake_worklist(wk) 1502 struct worklist *wk; 1503 { 1504 if (wk->wk_state & IOWAITING) { 1505 wk->wk_state &= ~IOWAITING; 1506 wakeup(wk); 1507 } 1508 } 1509 1510 static void 1511 wait_worklist(wk, wmesg) 1512 struct worklist *wk; 1513 char *wmesg; 1514 { 1515 1516 wk->wk_state |= IOWAITING; 1517 msleep(wk, &lk, PVM, wmesg, 0); 1518 } 1519 1520 /* 1521 * Process that runs once per second to handle items in the background queue. 1522 * 1523 * Note that we ensure that everything is done in the order in which they 1524 * appear in the queue. The code below depends on this property to ensure 1525 * that blocks of a file are freed before the inode itself is freed. This 1526 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 1527 * until all the old ones have been purged from the dependency lists. 1528 */ 1529 int 1530 softdep_process_worklist(mp, full) 1531 struct mount *mp; 1532 int full; 1533 { 1534 int cnt, matchcnt; 1535 struct ufsmount *ump; 1536 long starttime; 1537 1538 KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp")); 1539 /* 1540 * Record the process identifier of our caller so that we can give 1541 * this process preferential treatment in request_cleanup below. 1542 */ 1543 matchcnt = 0; 1544 ump = VFSTOUFS(mp); 1545 ACQUIRE_LOCK(&lk); 1546 starttime = time_second; 1547 softdep_process_journal(mp, NULL, full?MNT_WAIT:0); 1548 while (ump->softdep_on_worklist > 0) { 1549 if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0) 1550 break; 1551 else 1552 matchcnt += cnt; 1553 /* 1554 * If requested, try removing inode or removal dependencies. 1555 */ 1556 if (req_clear_inodedeps) { 1557 clear_inodedeps(); 1558 req_clear_inodedeps -= 1; 1559 wakeup_one(&proc_waiting); 1560 } 1561 if (req_clear_remove) { 1562 clear_remove(); 1563 req_clear_remove -= 1; 1564 wakeup_one(&proc_waiting); 1565 } 1566 /* 1567 * We do not generally want to stop for buffer space, but if 1568 * we are really being a buffer hog, we will stop and wait. 1569 */ 1570 if (should_yield()) { 1571 FREE_LOCK(&lk); 1572 kern_yield(PRI_USER); 1573 bwillwrite(); 1574 ACQUIRE_LOCK(&lk); 1575 } 1576 /* 1577 * Never allow processing to run for more than one 1578 * second. Otherwise the other mountpoints may get 1579 * excessively backlogged. 1580 */ 1581 if (!full && starttime != time_second) 1582 break; 1583 } 1584 if (full == 0) 1585 journal_unsuspend(ump); 1586 FREE_LOCK(&lk); 1587 return (matchcnt); 1588 } 1589 1590 /* 1591 * Process all removes associated with a vnode if we are running out of 1592 * journal space. Any other process which attempts to flush these will 1593 * be unable as we have the vnodes locked. 1594 */ 1595 static void 1596 process_removes(vp) 1597 struct vnode *vp; 1598 { 1599 struct inodedep *inodedep; 1600 struct dirrem *dirrem; 1601 struct mount *mp; 1602 ino_t inum; 1603 1604 mtx_assert(&lk, MA_OWNED); 1605 1606 mp = vp->v_mount; 1607 inum = VTOI(vp)->i_number; 1608 for (;;) { 1609 top: 1610 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) 1611 return; 1612 LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) { 1613 /* 1614 * If another thread is trying to lock this vnode 1615 * it will fail but we must wait for it to do so 1616 * before we can proceed. 1617 */ 1618 if (dirrem->dm_state & INPROGRESS) { 1619 wait_worklist(&dirrem->dm_list, "pwrwait"); 1620 goto top; 1621 } 1622 if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) == 1623 (COMPLETE | ONWORKLIST)) 1624 break; 1625 } 1626 if (dirrem == NULL) 1627 return; 1628 remove_from_worklist(&dirrem->dm_list); 1629 FREE_LOCK(&lk); 1630 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) 1631 panic("process_removes: suspended filesystem"); 1632 handle_workitem_remove(dirrem, 0); 1633 vn_finished_secondary_write(mp); 1634 ACQUIRE_LOCK(&lk); 1635 } 1636 } 1637 1638 /* 1639 * Process all truncations associated with a vnode if we are running out 1640 * of journal space. This is called when the vnode lock is already held 1641 * and no other process can clear the truncation. This function returns 1642 * a value greater than zero if it did any work. 1643 */ 1644 static void 1645 process_truncates(vp) 1646 struct vnode *vp; 1647 { 1648 struct inodedep *inodedep; 1649 struct freeblks *freeblks; 1650 struct mount *mp; 1651 ino_t inum; 1652 int cgwait; 1653 1654 mtx_assert(&lk, MA_OWNED); 1655 1656 mp = vp->v_mount; 1657 inum = VTOI(vp)->i_number; 1658 for (;;) { 1659 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) 1660 return; 1661 cgwait = 0; 1662 TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) { 1663 /* Journal entries not yet written. */ 1664 if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) { 1665 jwait(&LIST_FIRST( 1666 &freeblks->fb_jblkdephd)->jb_list, 1667 MNT_WAIT); 1668 break; 1669 } 1670 /* Another thread is executing this item. */ 1671 if (freeblks->fb_state & INPROGRESS) { 1672 wait_worklist(&freeblks->fb_list, "ptrwait"); 1673 break; 1674 } 1675 /* Freeblks is waiting on a inode write. */ 1676 if ((freeblks->fb_state & COMPLETE) == 0) { 1677 FREE_LOCK(&lk); 1678 ffs_update(vp, 1); 1679 ACQUIRE_LOCK(&lk); 1680 break; 1681 } 1682 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) == 1683 (ALLCOMPLETE | ONWORKLIST)) { 1684 remove_from_worklist(&freeblks->fb_list); 1685 freeblks->fb_state |= INPROGRESS; 1686 FREE_LOCK(&lk); 1687 if (vn_start_secondary_write(NULL, &mp, 1688 V_NOWAIT)) 1689 panic("process_truncates: " 1690 "suspended filesystem"); 1691 handle_workitem_freeblocks(freeblks, 0); 1692 vn_finished_secondary_write(mp); 1693 ACQUIRE_LOCK(&lk); 1694 break; 1695 } 1696 if (freeblks->fb_cgwait) 1697 cgwait++; 1698 } 1699 if (cgwait) { 1700 FREE_LOCK(&lk); 1701 sync_cgs(mp, MNT_WAIT); 1702 ffs_sync_snap(mp, MNT_WAIT); 1703 ACQUIRE_LOCK(&lk); 1704 continue; 1705 } 1706 if (freeblks == NULL) 1707 break; 1708 } 1709 return; 1710 } 1711 1712 /* 1713 * Process one item on the worklist. 1714 */ 1715 static int 1716 process_worklist_item(mp, target, flags) 1717 struct mount *mp; 1718 int target; 1719 int flags; 1720 { 1721 struct worklist sintenel; 1722 struct worklist *wk; 1723 struct ufsmount *ump; 1724 int matchcnt; 1725 int error; 1726 1727 mtx_assert(&lk, MA_OWNED); 1728 KASSERT(mp != NULL, ("process_worklist_item: NULL mp")); 1729 /* 1730 * If we are being called because of a process doing a 1731 * copy-on-write, then it is not safe to write as we may 1732 * recurse into the copy-on-write routine. 1733 */ 1734 if (curthread->td_pflags & TDP_COWINPROGRESS) 1735 return (-1); 1736 PHOLD(curproc); /* Don't let the stack go away. */ 1737 ump = VFSTOUFS(mp); 1738 matchcnt = 0; 1739 sintenel.wk_mp = NULL; 1740 sintenel.wk_type = D_SENTINAL; 1741 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sintenel, wk_list); 1742 for (wk = LIST_NEXT(&sintenel, wk_list); wk != NULL; 1743 wk = LIST_NEXT(&sintenel, wk_list)) { 1744 if (wk->wk_type == D_SENTINAL) { 1745 LIST_REMOVE(&sintenel, wk_list); 1746 LIST_INSERT_AFTER(wk, &sintenel, wk_list); 1747 continue; 1748 } 1749 if (wk->wk_state & INPROGRESS) 1750 panic("process_worklist_item: %p already in progress.", 1751 wk); 1752 wk->wk_state |= INPROGRESS; 1753 remove_from_worklist(wk); 1754 FREE_LOCK(&lk); 1755 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) 1756 panic("process_worklist_item: suspended filesystem"); 1757 switch (wk->wk_type) { 1758 case D_DIRREM: 1759 /* removal of a directory entry */ 1760 error = handle_workitem_remove(WK_DIRREM(wk), flags); 1761 break; 1762 1763 case D_FREEBLKS: 1764 /* releasing blocks and/or fragments from a file */ 1765 error = handle_workitem_freeblocks(WK_FREEBLKS(wk), 1766 flags); 1767 break; 1768 1769 case D_FREEFRAG: 1770 /* releasing a fragment when replaced as a file grows */ 1771 handle_workitem_freefrag(WK_FREEFRAG(wk)); 1772 error = 0; 1773 break; 1774 1775 case D_FREEFILE: 1776 /* releasing an inode when its link count drops to 0 */ 1777 handle_workitem_freefile(WK_FREEFILE(wk)); 1778 error = 0; 1779 break; 1780 1781 default: 1782 panic("%s_process_worklist: Unknown type %s", 1783 "softdep", TYPENAME(wk->wk_type)); 1784 /* NOTREACHED */ 1785 } 1786 vn_finished_secondary_write(mp); 1787 ACQUIRE_LOCK(&lk); 1788 if (error == 0) { 1789 if (++matchcnt == target) 1790 break; 1791 continue; 1792 } 1793 /* 1794 * We have to retry the worklist item later. Wake up any 1795 * waiters who may be able to complete it immediately and 1796 * add the item back to the head so we don't try to execute 1797 * it again. 1798 */ 1799 wk->wk_state &= ~INPROGRESS; 1800 wake_worklist(wk); 1801 add_to_worklist(wk, WK_HEAD); 1802 } 1803 LIST_REMOVE(&sintenel, wk_list); 1804 /* Sentinal could've become the tail from remove_from_worklist. */ 1805 if (ump->softdep_worklist_tail == &sintenel) 1806 ump->softdep_worklist_tail = 1807 (struct worklist *)sintenel.wk_list.le_prev; 1808 PRELE(curproc); 1809 return (matchcnt); 1810 } 1811 1812 /* 1813 * Move dependencies from one buffer to another. 1814 */ 1815 int 1816 softdep_move_dependencies(oldbp, newbp) 1817 struct buf *oldbp; 1818 struct buf *newbp; 1819 { 1820 struct worklist *wk, *wktail; 1821 int dirty; 1822 1823 dirty = 0; 1824 wktail = NULL; 1825 ACQUIRE_LOCK(&lk); 1826 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 1827 LIST_REMOVE(wk, wk_list); 1828 if (wk->wk_type == D_BMSAFEMAP && 1829 bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp)) 1830 dirty = 1; 1831 if (wktail == 0) 1832 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 1833 else 1834 LIST_INSERT_AFTER(wktail, wk, wk_list); 1835 wktail = wk; 1836 } 1837 FREE_LOCK(&lk); 1838 1839 return (dirty); 1840 } 1841 1842 /* 1843 * Purge the work list of all items associated with a particular mount point. 1844 */ 1845 int 1846 softdep_flushworklist(oldmnt, countp, td) 1847 struct mount *oldmnt; 1848 int *countp; 1849 struct thread *td; 1850 { 1851 struct vnode *devvp; 1852 int count, error = 0; 1853 struct ufsmount *ump; 1854 1855 /* 1856 * Alternately flush the block device associated with the mount 1857 * point and process any dependencies that the flushing 1858 * creates. We continue until no more worklist dependencies 1859 * are found. 1860 */ 1861 *countp = 0; 1862 ump = VFSTOUFS(oldmnt); 1863 devvp = ump->um_devvp; 1864 while ((count = softdep_process_worklist(oldmnt, 1)) > 0) { 1865 *countp += count; 1866 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1867 error = VOP_FSYNC(devvp, MNT_WAIT, td); 1868 VOP_UNLOCK(devvp, 0); 1869 if (error) 1870 break; 1871 } 1872 return (error); 1873 } 1874 1875 int 1876 softdep_waitidle(struct mount *mp) 1877 { 1878 struct ufsmount *ump; 1879 int error; 1880 int i; 1881 1882 ump = VFSTOUFS(mp); 1883 ACQUIRE_LOCK(&lk); 1884 for (i = 0; i < 10 && ump->softdep_deps; i++) { 1885 ump->softdep_req = 1; 1886 if (ump->softdep_on_worklist) 1887 panic("softdep_waitidle: work added after flush."); 1888 msleep(&ump->softdep_deps, &lk, PVM, "softdeps", 1); 1889 } 1890 ump->softdep_req = 0; 1891 FREE_LOCK(&lk); 1892 error = 0; 1893 if (i == 10) { 1894 error = EBUSY; 1895 printf("softdep_waitidle: Failed to flush worklist for %p\n", 1896 mp); 1897 } 1898 1899 return (error); 1900 } 1901 1902 /* 1903 * Flush all vnodes and worklist items associated with a specified mount point. 1904 */ 1905 int 1906 softdep_flushfiles(oldmnt, flags, td) 1907 struct mount *oldmnt; 1908 int flags; 1909 struct thread *td; 1910 { 1911 int error, depcount, loopcnt, retry_flush_count, retry; 1912 1913 loopcnt = 10; 1914 retry_flush_count = 3; 1915 retry_flush: 1916 error = 0; 1917 1918 /* 1919 * Alternately flush the vnodes associated with the mount 1920 * point and process any dependencies that the flushing 1921 * creates. In theory, this loop can happen at most twice, 1922 * but we give it a few extra just to be sure. 1923 */ 1924 for (; loopcnt > 0; loopcnt--) { 1925 /* 1926 * Do another flush in case any vnodes were brought in 1927 * as part of the cleanup operations. 1928 */ 1929 if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0) 1930 break; 1931 if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 || 1932 depcount == 0) 1933 break; 1934 } 1935 /* 1936 * If we are unmounting then it is an error to fail. If we 1937 * are simply trying to downgrade to read-only, then filesystem 1938 * activity can keep us busy forever, so we just fail with EBUSY. 1939 */ 1940 if (loopcnt == 0) { 1941 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 1942 panic("softdep_flushfiles: looping"); 1943 error = EBUSY; 1944 } 1945 if (!error) 1946 error = softdep_waitidle(oldmnt); 1947 if (!error) { 1948 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) { 1949 retry = 0; 1950 MNT_ILOCK(oldmnt); 1951 KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0, 1952 ("softdep_flushfiles: !MNTK_NOINSMNTQ")); 1953 if (oldmnt->mnt_nvnodelistsize > 0) { 1954 if (--retry_flush_count > 0) { 1955 retry = 1; 1956 loopcnt = 3; 1957 } else 1958 error = EBUSY; 1959 } 1960 MNT_IUNLOCK(oldmnt); 1961 if (retry) 1962 goto retry_flush; 1963 } 1964 } 1965 return (error); 1966 } 1967 1968 /* 1969 * Structure hashing. 1970 * 1971 * There are three types of structures that can be looked up: 1972 * 1) pagedep structures identified by mount point, inode number, 1973 * and logical block. 1974 * 2) inodedep structures identified by mount point and inode number. 1975 * 3) newblk structures identified by mount point and 1976 * physical block number. 1977 * 1978 * The "pagedep" and "inodedep" dependency structures are hashed 1979 * separately from the file blocks and inodes to which they correspond. 1980 * This separation helps when the in-memory copy of an inode or 1981 * file block must be replaced. It also obviates the need to access 1982 * an inode or file page when simply updating (or de-allocating) 1983 * dependency structures. Lookup of newblk structures is needed to 1984 * find newly allocated blocks when trying to associate them with 1985 * their allocdirect or allocindir structure. 1986 * 1987 * The lookup routines optionally create and hash a new instance when 1988 * an existing entry is not found. 1989 */ 1990 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 1991 #define NODELAY 0x0002 /* cannot do background work */ 1992 1993 /* 1994 * Structures and routines associated with pagedep caching. 1995 */ 1996 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 1997 u_long pagedep_hash; /* size of hash table - 1 */ 1998 #define PAGEDEP_HASH(mp, inum, lbn) \ 1999 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 2000 pagedep_hash]) 2001 2002 static int 2003 pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp) 2004 struct pagedep_hashhead *pagedephd; 2005 ino_t ino; 2006 ufs_lbn_t lbn; 2007 struct mount *mp; 2008 int flags; 2009 struct pagedep **pagedeppp; 2010 { 2011 struct pagedep *pagedep; 2012 2013 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 2014 if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn && 2015 mp == pagedep->pd_list.wk_mp) { 2016 *pagedeppp = pagedep; 2017 return (1); 2018 } 2019 } 2020 *pagedeppp = NULL; 2021 return (0); 2022 } 2023 /* 2024 * Look up a pagedep. Return 1 if found, 0 otherwise. 2025 * If not found, allocate if DEPALLOC flag is passed. 2026 * Found or allocated entry is returned in pagedeppp. 2027 * This routine must be called with splbio interrupts blocked. 2028 */ 2029 static int 2030 pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp) 2031 struct mount *mp; 2032 struct buf *bp; 2033 ino_t ino; 2034 ufs_lbn_t lbn; 2035 int flags; 2036 struct pagedep **pagedeppp; 2037 { 2038 struct pagedep *pagedep; 2039 struct pagedep_hashhead *pagedephd; 2040 struct worklist *wk; 2041 int ret; 2042 int i; 2043 2044 mtx_assert(&lk, MA_OWNED); 2045 if (bp) { 2046 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 2047 if (wk->wk_type == D_PAGEDEP) { 2048 *pagedeppp = WK_PAGEDEP(wk); 2049 return (1); 2050 } 2051 } 2052 } 2053 pagedephd = PAGEDEP_HASH(mp, ino, lbn); 2054 ret = pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp); 2055 if (ret) { 2056 if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp) 2057 WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list); 2058 return (1); 2059 } 2060 if ((flags & DEPALLOC) == 0) 2061 return (0); 2062 FREE_LOCK(&lk); 2063 pagedep = malloc(sizeof(struct pagedep), 2064 M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO); 2065 workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp); 2066 ACQUIRE_LOCK(&lk); 2067 ret = pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp); 2068 if (*pagedeppp) { 2069 /* 2070 * This should never happen since we only create pagedeps 2071 * with the vnode lock held. Could be an assert. 2072 */ 2073 WORKITEM_FREE(pagedep, D_PAGEDEP); 2074 return (ret); 2075 } 2076 pagedep->pd_ino = ino; 2077 pagedep->pd_lbn = lbn; 2078 LIST_INIT(&pagedep->pd_dirremhd); 2079 LIST_INIT(&pagedep->pd_pendinghd); 2080 for (i = 0; i < DAHASHSZ; i++) 2081 LIST_INIT(&pagedep->pd_diraddhd[i]); 2082 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 2083 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2084 *pagedeppp = pagedep; 2085 return (0); 2086 } 2087 2088 /* 2089 * Structures and routines associated with inodedep caching. 2090 */ 2091 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 2092 static u_long inodedep_hash; /* size of hash table - 1 */ 2093 #define INODEDEP_HASH(fs, inum) \ 2094 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 2095 2096 static int 2097 inodedep_find(inodedephd, fs, inum, inodedeppp) 2098 struct inodedep_hashhead *inodedephd; 2099 struct fs *fs; 2100 ino_t inum; 2101 struct inodedep **inodedeppp; 2102 { 2103 struct inodedep *inodedep; 2104 2105 LIST_FOREACH(inodedep, inodedephd, id_hash) 2106 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 2107 break; 2108 if (inodedep) { 2109 *inodedeppp = inodedep; 2110 return (1); 2111 } 2112 *inodedeppp = NULL; 2113 2114 return (0); 2115 } 2116 /* 2117 * Look up an inodedep. Return 1 if found, 0 if not found. 2118 * If not found, allocate if DEPALLOC flag is passed. 2119 * Found or allocated entry is returned in inodedeppp. 2120 * This routine must be called with splbio interrupts blocked. 2121 */ 2122 static int 2123 inodedep_lookup(mp, inum, flags, inodedeppp) 2124 struct mount *mp; 2125 ino_t inum; 2126 int flags; 2127 struct inodedep **inodedeppp; 2128 { 2129 struct inodedep *inodedep; 2130 struct inodedep_hashhead *inodedephd; 2131 struct fs *fs; 2132 2133 mtx_assert(&lk, MA_OWNED); 2134 fs = VFSTOUFS(mp)->um_fs; 2135 inodedephd = INODEDEP_HASH(fs, inum); 2136 2137 if (inodedep_find(inodedephd, fs, inum, inodedeppp)) 2138 return (1); 2139 if ((flags & DEPALLOC) == 0) 2140 return (0); 2141 /* 2142 * If we are over our limit, try to improve the situation. 2143 */ 2144 if (dep_current[D_INODEDEP] > max_softdeps && (flags & NODELAY) == 0) 2145 request_cleanup(mp, FLUSH_INODES); 2146 FREE_LOCK(&lk); 2147 inodedep = malloc(sizeof(struct inodedep), 2148 M_INODEDEP, M_SOFTDEP_FLAGS); 2149 workitem_alloc(&inodedep->id_list, D_INODEDEP, mp); 2150 ACQUIRE_LOCK(&lk); 2151 if (inodedep_find(inodedephd, fs, inum, inodedeppp)) { 2152 WORKITEM_FREE(inodedep, D_INODEDEP); 2153 return (1); 2154 } 2155 inodedep->id_fs = fs; 2156 inodedep->id_ino = inum; 2157 inodedep->id_state = ALLCOMPLETE; 2158 inodedep->id_nlinkdelta = 0; 2159 inodedep->id_savedino1 = NULL; 2160 inodedep->id_savedsize = -1; 2161 inodedep->id_savedextsize = -1; 2162 inodedep->id_savednlink = -1; 2163 inodedep->id_bmsafemap = NULL; 2164 inodedep->id_mkdiradd = NULL; 2165 LIST_INIT(&inodedep->id_dirremhd); 2166 LIST_INIT(&inodedep->id_pendinghd); 2167 LIST_INIT(&inodedep->id_inowait); 2168 LIST_INIT(&inodedep->id_bufwait); 2169 TAILQ_INIT(&inodedep->id_inoreflst); 2170 TAILQ_INIT(&inodedep->id_inoupdt); 2171 TAILQ_INIT(&inodedep->id_newinoupdt); 2172 TAILQ_INIT(&inodedep->id_extupdt); 2173 TAILQ_INIT(&inodedep->id_newextupdt); 2174 TAILQ_INIT(&inodedep->id_freeblklst); 2175 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 2176 *inodedeppp = inodedep; 2177 return (0); 2178 } 2179 2180 /* 2181 * Structures and routines associated with newblk caching. 2182 */ 2183 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 2184 u_long newblk_hash; /* size of hash table - 1 */ 2185 #define NEWBLK_HASH(fs, inum) \ 2186 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 2187 2188 static int 2189 newblk_find(newblkhd, mp, newblkno, flags, newblkpp) 2190 struct newblk_hashhead *newblkhd; 2191 struct mount *mp; 2192 ufs2_daddr_t newblkno; 2193 int flags; 2194 struct newblk **newblkpp; 2195 { 2196 struct newblk *newblk; 2197 2198 LIST_FOREACH(newblk, newblkhd, nb_hash) { 2199 if (newblkno != newblk->nb_newblkno) 2200 continue; 2201 if (mp != newblk->nb_list.wk_mp) 2202 continue; 2203 /* 2204 * If we're creating a new dependency don't match those that 2205 * have already been converted to allocdirects. This is for 2206 * a frag extend. 2207 */ 2208 if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK) 2209 continue; 2210 break; 2211 } 2212 if (newblk) { 2213 *newblkpp = newblk; 2214 return (1); 2215 } 2216 *newblkpp = NULL; 2217 return (0); 2218 } 2219 2220 /* 2221 * Look up a newblk. Return 1 if found, 0 if not found. 2222 * If not found, allocate if DEPALLOC flag is passed. 2223 * Found or allocated entry is returned in newblkpp. 2224 */ 2225 static int 2226 newblk_lookup(mp, newblkno, flags, newblkpp) 2227 struct mount *mp; 2228 ufs2_daddr_t newblkno; 2229 int flags; 2230 struct newblk **newblkpp; 2231 { 2232 struct newblk *newblk; 2233 struct newblk_hashhead *newblkhd; 2234 2235 newblkhd = NEWBLK_HASH(VFSTOUFS(mp)->um_fs, newblkno); 2236 if (newblk_find(newblkhd, mp, newblkno, flags, newblkpp)) 2237 return (1); 2238 if ((flags & DEPALLOC) == 0) 2239 return (0); 2240 FREE_LOCK(&lk); 2241 newblk = malloc(sizeof(union allblk), M_NEWBLK, 2242 M_SOFTDEP_FLAGS | M_ZERO); 2243 workitem_alloc(&newblk->nb_list, D_NEWBLK, mp); 2244 ACQUIRE_LOCK(&lk); 2245 if (newblk_find(newblkhd, mp, newblkno, flags, newblkpp)) { 2246 WORKITEM_FREE(newblk, D_NEWBLK); 2247 return (1); 2248 } 2249 newblk->nb_freefrag = NULL; 2250 LIST_INIT(&newblk->nb_indirdeps); 2251 LIST_INIT(&newblk->nb_newdirblk); 2252 LIST_INIT(&newblk->nb_jwork); 2253 newblk->nb_state = ATTACHED; 2254 newblk->nb_newblkno = newblkno; 2255 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 2256 *newblkpp = newblk; 2257 return (0); 2258 } 2259 2260 /* 2261 * Structures and routines associated with freed indirect block caching. 2262 */ 2263 struct freeworklst *indir_hashtbl; 2264 u_long indir_hash; /* size of hash table - 1 */ 2265 #define INDIR_HASH(mp, blkno) \ 2266 (&indir_hashtbl[((((register_t)(mp)) >> 13) + (blkno)) & indir_hash]) 2267 2268 /* 2269 * Lookup an indirect block in the indir hash table. The freework is 2270 * removed and potentially freed. The caller must do a blocking journal 2271 * write before writing to the blkno. 2272 */ 2273 static int 2274 indirblk_lookup(mp, blkno) 2275 struct mount *mp; 2276 ufs2_daddr_t blkno; 2277 { 2278 struct freework *freework; 2279 struct freeworklst *wkhd; 2280 2281 wkhd = INDIR_HASH(mp, blkno); 2282 TAILQ_FOREACH(freework, wkhd, fw_next) { 2283 if (freework->fw_blkno != blkno) 2284 continue; 2285 if (freework->fw_list.wk_mp != mp) 2286 continue; 2287 indirblk_remove(freework); 2288 return (1); 2289 } 2290 return (0); 2291 } 2292 2293 /* 2294 * Insert an indirect block represented by freework into the indirblk 2295 * hash table so that it may prevent the block from being re-used prior 2296 * to the journal being written. 2297 */ 2298 static void 2299 indirblk_insert(freework) 2300 struct freework *freework; 2301 { 2302 struct jblocks *jblocks; 2303 struct jseg *jseg; 2304 2305 jblocks = VFSTOUFS(freework->fw_list.wk_mp)->softdep_jblocks; 2306 jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst); 2307 if (jseg == NULL) 2308 return; 2309 2310 LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs); 2311 TAILQ_INSERT_HEAD(INDIR_HASH(freework->fw_list.wk_mp, 2312 freework->fw_blkno), freework, fw_next); 2313 freework->fw_state &= ~DEPCOMPLETE; 2314 } 2315 2316 static void 2317 indirblk_remove(freework) 2318 struct freework *freework; 2319 { 2320 2321 LIST_REMOVE(freework, fw_segs); 2322 TAILQ_REMOVE(INDIR_HASH(freework->fw_list.wk_mp, 2323 freework->fw_blkno), freework, fw_next); 2324 freework->fw_state |= DEPCOMPLETE; 2325 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) 2326 WORKITEM_FREE(freework, D_FREEWORK); 2327 } 2328 2329 /* 2330 * Executed during filesystem system initialization before 2331 * mounting any filesystems. 2332 */ 2333 void 2334 softdep_initialize() 2335 { 2336 int i; 2337 2338 LIST_INIT(&mkdirlisthd); 2339 max_softdeps = desiredvnodes * 4; 2340 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, &pagedep_hash); 2341 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); 2342 newblk_hashtbl = hashinit(desiredvnodes / 5, M_NEWBLK, &newblk_hash); 2343 bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP, &bmsafemap_hash); 2344 i = 1 << (ffs(desiredvnodes / 10) - 1); 2345 indir_hashtbl = malloc(i * sizeof(indir_hashtbl[0]), M_FREEWORK, 2346 M_WAITOK); 2347 indir_hash = i - 1; 2348 for (i = 0; i <= indir_hash; i++) 2349 TAILQ_INIT(&indir_hashtbl[i]); 2350 2351 /* initialise bioops hack */ 2352 bioops.io_start = softdep_disk_io_initiation; 2353 bioops.io_complete = softdep_disk_write_complete; 2354 bioops.io_deallocate = softdep_deallocate_dependencies; 2355 bioops.io_countdeps = softdep_count_dependencies; 2356 2357 /* Initialize the callout with an mtx. */ 2358 callout_init_mtx(&softdep_callout, &lk, 0); 2359 } 2360 2361 /* 2362 * Executed after all filesystems have been unmounted during 2363 * filesystem module unload. 2364 */ 2365 void 2366 softdep_uninitialize() 2367 { 2368 2369 callout_drain(&softdep_callout); 2370 hashdestroy(pagedep_hashtbl, M_PAGEDEP, pagedep_hash); 2371 hashdestroy(inodedep_hashtbl, M_INODEDEP, inodedep_hash); 2372 hashdestroy(newblk_hashtbl, M_NEWBLK, newblk_hash); 2373 hashdestroy(bmsafemap_hashtbl, M_BMSAFEMAP, bmsafemap_hash); 2374 free(indir_hashtbl, M_FREEWORK); 2375 } 2376 2377 /* 2378 * Called at mount time to notify the dependency code that a 2379 * filesystem wishes to use it. 2380 */ 2381 int 2382 softdep_mount(devvp, mp, fs, cred) 2383 struct vnode *devvp; 2384 struct mount *mp; 2385 struct fs *fs; 2386 struct ucred *cred; 2387 { 2388 struct csum_total cstotal; 2389 struct ufsmount *ump; 2390 struct cg *cgp; 2391 struct buf *bp; 2392 int error, cyl; 2393 2394 MNT_ILOCK(mp); 2395 mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP; 2396 if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) { 2397 mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) | 2398 MNTK_SOFTDEP | MNTK_NOASYNC; 2399 } 2400 MNT_IUNLOCK(mp); 2401 ump = VFSTOUFS(mp); 2402 LIST_INIT(&ump->softdep_workitem_pending); 2403 LIST_INIT(&ump->softdep_journal_pending); 2404 TAILQ_INIT(&ump->softdep_unlinked); 2405 LIST_INIT(&ump->softdep_dirtycg); 2406 ump->softdep_worklist_tail = NULL; 2407 ump->softdep_on_worklist = 0; 2408 ump->softdep_deps = 0; 2409 if ((fs->fs_flags & FS_SUJ) && 2410 (error = journal_mount(mp, fs, cred)) != 0) { 2411 printf("Failed to start journal: %d\n", error); 2412 return (error); 2413 } 2414 /* 2415 * When doing soft updates, the counters in the 2416 * superblock may have gotten out of sync. Recomputation 2417 * can take a long time and can be deferred for background 2418 * fsck. However, the old behavior of scanning the cylinder 2419 * groups and recalculating them at mount time is available 2420 * by setting vfs.ffs.compute_summary_at_mount to one. 2421 */ 2422 if (compute_summary_at_mount == 0 || fs->fs_clean != 0) 2423 return (0); 2424 bzero(&cstotal, sizeof cstotal); 2425 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 2426 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 2427 fs->fs_cgsize, cred, &bp)) != 0) { 2428 brelse(bp); 2429 return (error); 2430 } 2431 cgp = (struct cg *)bp->b_data; 2432 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 2433 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 2434 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 2435 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 2436 fs->fs_cs(fs, cyl) = cgp->cg_cs; 2437 brelse(bp); 2438 } 2439 #ifdef DEBUG 2440 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 2441 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); 2442 #endif 2443 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 2444 return (0); 2445 } 2446 2447 void 2448 softdep_unmount(mp) 2449 struct mount *mp; 2450 { 2451 2452 MNT_ILOCK(mp); 2453 mp->mnt_flag &= ~MNT_SOFTDEP; 2454 if (MOUNTEDSUJ(mp) == 0) { 2455 MNT_IUNLOCK(mp); 2456 return; 2457 } 2458 mp->mnt_flag &= ~MNT_SUJ; 2459 MNT_IUNLOCK(mp); 2460 journal_unmount(mp); 2461 } 2462 2463 static struct jblocks * 2464 jblocks_create(void) 2465 { 2466 struct jblocks *jblocks; 2467 2468 jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO); 2469 TAILQ_INIT(&jblocks->jb_segs); 2470 jblocks->jb_avail = 10; 2471 jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail, 2472 M_JBLOCKS, M_WAITOK | M_ZERO); 2473 2474 return (jblocks); 2475 } 2476 2477 static ufs2_daddr_t 2478 jblocks_alloc(jblocks, bytes, actual) 2479 struct jblocks *jblocks; 2480 int bytes; 2481 int *actual; 2482 { 2483 ufs2_daddr_t daddr; 2484 struct jextent *jext; 2485 int freecnt; 2486 int blocks; 2487 2488 blocks = bytes / DEV_BSIZE; 2489 jext = &jblocks->jb_extent[jblocks->jb_head]; 2490 freecnt = jext->je_blocks - jblocks->jb_off; 2491 if (freecnt == 0) { 2492 jblocks->jb_off = 0; 2493 if (++jblocks->jb_head > jblocks->jb_used) 2494 jblocks->jb_head = 0; 2495 jext = &jblocks->jb_extent[jblocks->jb_head]; 2496 freecnt = jext->je_blocks; 2497 } 2498 if (freecnt > blocks) 2499 freecnt = blocks; 2500 *actual = freecnt * DEV_BSIZE; 2501 daddr = jext->je_daddr + jblocks->jb_off; 2502 jblocks->jb_off += freecnt; 2503 jblocks->jb_free -= freecnt; 2504 2505 return (daddr); 2506 } 2507 2508 static void 2509 jblocks_free(jblocks, mp, bytes) 2510 struct jblocks *jblocks; 2511 struct mount *mp; 2512 int bytes; 2513 { 2514 2515 jblocks->jb_free += bytes / DEV_BSIZE; 2516 if (jblocks->jb_suspended) 2517 worklist_speedup(); 2518 wakeup(jblocks); 2519 } 2520 2521 static void 2522 jblocks_destroy(jblocks) 2523 struct jblocks *jblocks; 2524 { 2525 2526 if (jblocks->jb_extent) 2527 free(jblocks->jb_extent, M_JBLOCKS); 2528 free(jblocks, M_JBLOCKS); 2529 } 2530 2531 static void 2532 jblocks_add(jblocks, daddr, blocks) 2533 struct jblocks *jblocks; 2534 ufs2_daddr_t daddr; 2535 int blocks; 2536 { 2537 struct jextent *jext; 2538 2539 jblocks->jb_blocks += blocks; 2540 jblocks->jb_free += blocks; 2541 jext = &jblocks->jb_extent[jblocks->jb_used]; 2542 /* Adding the first block. */ 2543 if (jext->je_daddr == 0) { 2544 jext->je_daddr = daddr; 2545 jext->je_blocks = blocks; 2546 return; 2547 } 2548 /* Extending the last extent. */ 2549 if (jext->je_daddr + jext->je_blocks == daddr) { 2550 jext->je_blocks += blocks; 2551 return; 2552 } 2553 /* Adding a new extent. */ 2554 if (++jblocks->jb_used == jblocks->jb_avail) { 2555 jblocks->jb_avail *= 2; 2556 jext = malloc(sizeof(struct jextent) * jblocks->jb_avail, 2557 M_JBLOCKS, M_WAITOK | M_ZERO); 2558 memcpy(jext, jblocks->jb_extent, 2559 sizeof(struct jextent) * jblocks->jb_used); 2560 free(jblocks->jb_extent, M_JBLOCKS); 2561 jblocks->jb_extent = jext; 2562 } 2563 jext = &jblocks->jb_extent[jblocks->jb_used]; 2564 jext->je_daddr = daddr; 2565 jext->je_blocks = blocks; 2566 return; 2567 } 2568 2569 int 2570 softdep_journal_lookup(mp, vpp) 2571 struct mount *mp; 2572 struct vnode **vpp; 2573 { 2574 struct componentname cnp; 2575 struct vnode *dvp; 2576 ino_t sujournal; 2577 int error; 2578 2579 error = VFS_VGET(mp, ROOTINO, LK_EXCLUSIVE, &dvp); 2580 if (error) 2581 return (error); 2582 bzero(&cnp, sizeof(cnp)); 2583 cnp.cn_nameiop = LOOKUP; 2584 cnp.cn_flags = ISLASTCN; 2585 cnp.cn_thread = curthread; 2586 cnp.cn_cred = curthread->td_ucred; 2587 cnp.cn_pnbuf = SUJ_FILE; 2588 cnp.cn_nameptr = SUJ_FILE; 2589 cnp.cn_namelen = strlen(SUJ_FILE); 2590 error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal); 2591 vput(dvp); 2592 if (error != 0) 2593 return (error); 2594 error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp); 2595 return (error); 2596 } 2597 2598 /* 2599 * Open and verify the journal file. 2600 */ 2601 static int 2602 journal_mount(mp, fs, cred) 2603 struct mount *mp; 2604 struct fs *fs; 2605 struct ucred *cred; 2606 { 2607 struct jblocks *jblocks; 2608 struct vnode *vp; 2609 struct inode *ip; 2610 ufs2_daddr_t blkno; 2611 int bcount; 2612 int error; 2613 int i; 2614 2615 error = softdep_journal_lookup(mp, &vp); 2616 if (error != 0) { 2617 printf("Failed to find journal. Use tunefs to create one\n"); 2618 return (error); 2619 } 2620 ip = VTOI(vp); 2621 if (ip->i_size < SUJ_MIN) { 2622 error = ENOSPC; 2623 goto out; 2624 } 2625 bcount = lblkno(fs, ip->i_size); /* Only use whole blocks. */ 2626 jblocks = jblocks_create(); 2627 for (i = 0; i < bcount; i++) { 2628 error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL); 2629 if (error) 2630 break; 2631 jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag)); 2632 } 2633 if (error) { 2634 jblocks_destroy(jblocks); 2635 goto out; 2636 } 2637 jblocks->jb_low = jblocks->jb_free / 3; /* Reserve 33%. */ 2638 jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */ 2639 VFSTOUFS(mp)->softdep_jblocks = jblocks; 2640 out: 2641 if (error == 0) { 2642 MNT_ILOCK(mp); 2643 mp->mnt_flag |= MNT_SUJ; 2644 mp->mnt_flag &= ~MNT_SOFTDEP; 2645 MNT_IUNLOCK(mp); 2646 /* 2647 * Only validate the journal contents if the 2648 * filesystem is clean, otherwise we write the logs 2649 * but they'll never be used. If the filesystem was 2650 * still dirty when we mounted it the journal is 2651 * invalid and a new journal can only be valid if it 2652 * starts from a clean mount. 2653 */ 2654 if (fs->fs_clean) { 2655 DIP_SET(ip, i_modrev, fs->fs_mtime); 2656 ip->i_flags |= IN_MODIFIED; 2657 ffs_update(vp, 1); 2658 } 2659 } 2660 vput(vp); 2661 return (error); 2662 } 2663 2664 static void 2665 journal_unmount(mp) 2666 struct mount *mp; 2667 { 2668 struct ufsmount *ump; 2669 2670 ump = VFSTOUFS(mp); 2671 if (ump->softdep_jblocks) 2672 jblocks_destroy(ump->softdep_jblocks); 2673 ump->softdep_jblocks = NULL; 2674 } 2675 2676 /* 2677 * Called when a journal record is ready to be written. Space is allocated 2678 * and the journal entry is created when the journal is flushed to stable 2679 * store. 2680 */ 2681 static void 2682 add_to_journal(wk) 2683 struct worklist *wk; 2684 { 2685 struct ufsmount *ump; 2686 2687 mtx_assert(&lk, MA_OWNED); 2688 ump = VFSTOUFS(wk->wk_mp); 2689 if (wk->wk_state & ONWORKLIST) 2690 panic("add_to_journal: %s(0x%X) already on list", 2691 TYPENAME(wk->wk_type), wk->wk_state); 2692 wk->wk_state |= ONWORKLIST | DEPCOMPLETE; 2693 if (LIST_EMPTY(&ump->softdep_journal_pending)) { 2694 ump->softdep_jblocks->jb_age = ticks; 2695 LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list); 2696 } else 2697 LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list); 2698 ump->softdep_journal_tail = wk; 2699 ump->softdep_on_journal += 1; 2700 } 2701 2702 /* 2703 * Remove an arbitrary item for the journal worklist maintain the tail 2704 * pointer. This happens when a new operation obviates the need to 2705 * journal an old operation. 2706 */ 2707 static void 2708 remove_from_journal(wk) 2709 struct worklist *wk; 2710 { 2711 struct ufsmount *ump; 2712 2713 mtx_assert(&lk, MA_OWNED); 2714 ump = VFSTOUFS(wk->wk_mp); 2715 #ifdef SUJ_DEBUG 2716 { 2717 struct worklist *wkn; 2718 2719 LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list) 2720 if (wkn == wk) 2721 break; 2722 if (wkn == NULL) 2723 panic("remove_from_journal: %p is not in journal", wk); 2724 } 2725 #endif 2726 /* 2727 * We emulate a TAILQ to save space in most structures which do not 2728 * require TAILQ semantics. Here we must update the tail position 2729 * when removing the tail which is not the final entry. This works 2730 * only if the worklist linkage are at the beginning of the structure. 2731 */ 2732 if (ump->softdep_journal_tail == wk) 2733 ump->softdep_journal_tail = 2734 (struct worklist *)wk->wk_list.le_prev; 2735 2736 WORKLIST_REMOVE(wk); 2737 ump->softdep_on_journal -= 1; 2738 } 2739 2740 /* 2741 * Check for journal space as well as dependency limits so the prelink 2742 * code can throttle both journaled and non-journaled filesystems. 2743 * Threshold is 0 for low and 1 for min. 2744 */ 2745 static int 2746 journal_space(ump, thresh) 2747 struct ufsmount *ump; 2748 int thresh; 2749 { 2750 struct jblocks *jblocks; 2751 int avail; 2752 2753 jblocks = ump->softdep_jblocks; 2754 if (jblocks == NULL) 2755 return (1); 2756 /* 2757 * We use a tighter restriction here to prevent request_cleanup() 2758 * running in threads from running into locks we currently hold. 2759 */ 2760 if (dep_current[D_INODEDEP] > (max_softdeps / 10) * 9) 2761 return (0); 2762 if (thresh) 2763 thresh = jblocks->jb_min; 2764 else 2765 thresh = jblocks->jb_low; 2766 avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE; 2767 avail = jblocks->jb_free - avail; 2768 2769 return (avail > thresh); 2770 } 2771 2772 static void 2773 journal_suspend(ump) 2774 struct ufsmount *ump; 2775 { 2776 struct jblocks *jblocks; 2777 struct mount *mp; 2778 2779 mp = UFSTOVFS(ump); 2780 jblocks = ump->softdep_jblocks; 2781 MNT_ILOCK(mp); 2782 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 2783 stat_journal_min++; 2784 mp->mnt_kern_flag |= MNTK_SUSPEND; 2785 mp->mnt_susp_owner = FIRST_THREAD_IN_PROC(softdepproc); 2786 } 2787 jblocks->jb_suspended = 1; 2788 MNT_IUNLOCK(mp); 2789 } 2790 2791 static int 2792 journal_unsuspend(struct ufsmount *ump) 2793 { 2794 struct jblocks *jblocks; 2795 struct mount *mp; 2796 2797 mp = UFSTOVFS(ump); 2798 jblocks = ump->softdep_jblocks; 2799 2800 if (jblocks != NULL && jblocks->jb_suspended && 2801 journal_space(ump, jblocks->jb_min)) { 2802 jblocks->jb_suspended = 0; 2803 FREE_LOCK(&lk); 2804 mp->mnt_susp_owner = curthread; 2805 vfs_write_resume(mp); 2806 ACQUIRE_LOCK(&lk); 2807 return (1); 2808 } 2809 return (0); 2810 } 2811 2812 /* 2813 * Called before any allocation function to be certain that there is 2814 * sufficient space in the journal prior to creating any new records. 2815 * Since in the case of block allocation we may have multiple locked 2816 * buffers at the time of the actual allocation we can not block 2817 * when the journal records are created. Doing so would create a deadlock 2818 * if any of these buffers needed to be flushed to reclaim space. Instead 2819 * we require a sufficiently large amount of available space such that 2820 * each thread in the system could have passed this allocation check and 2821 * still have sufficient free space. With 20% of a minimum journal size 2822 * of 1MB we have 6553 records available. 2823 */ 2824 int 2825 softdep_prealloc(vp, waitok) 2826 struct vnode *vp; 2827 int waitok; 2828 { 2829 struct ufsmount *ump; 2830 2831 /* 2832 * Nothing to do if we are not running journaled soft updates. 2833 * If we currently hold the snapshot lock, we must avoid handling 2834 * other resources that could cause deadlock. 2835 */ 2836 if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp))) 2837 return (0); 2838 ump = VFSTOUFS(vp->v_mount); 2839 ACQUIRE_LOCK(&lk); 2840 if (journal_space(ump, 0)) { 2841 FREE_LOCK(&lk); 2842 return (0); 2843 } 2844 stat_journal_low++; 2845 FREE_LOCK(&lk); 2846 if (waitok == MNT_NOWAIT) 2847 return (ENOSPC); 2848 /* 2849 * Attempt to sync this vnode once to flush any journal 2850 * work attached to it. 2851 */ 2852 if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0) 2853 ffs_syncvnode(vp, waitok, 0); 2854 ACQUIRE_LOCK(&lk); 2855 process_removes(vp); 2856 process_truncates(vp); 2857 if (journal_space(ump, 0) == 0) { 2858 softdep_speedup(); 2859 if (journal_space(ump, 1) == 0) 2860 journal_suspend(ump); 2861 } 2862 FREE_LOCK(&lk); 2863 2864 return (0); 2865 } 2866 2867 /* 2868 * Before adjusting a link count on a vnode verify that we have sufficient 2869 * journal space. If not, process operations that depend on the currently 2870 * locked pair of vnodes to try to flush space as the syncer, buf daemon, 2871 * and softdep flush threads can not acquire these locks to reclaim space. 2872 */ 2873 static void 2874 softdep_prelink(dvp, vp) 2875 struct vnode *dvp; 2876 struct vnode *vp; 2877 { 2878 struct ufsmount *ump; 2879 2880 ump = VFSTOUFS(dvp->v_mount); 2881 mtx_assert(&lk, MA_OWNED); 2882 /* 2883 * Nothing to do if we have sufficient journal space. 2884 * If we currently hold the snapshot lock, we must avoid 2885 * handling other resources that could cause deadlock. 2886 */ 2887 if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp)))) 2888 return; 2889 stat_journal_low++; 2890 FREE_LOCK(&lk); 2891 if (vp) 2892 ffs_syncvnode(vp, MNT_NOWAIT, 0); 2893 ffs_syncvnode(dvp, MNT_WAIT, 0); 2894 ACQUIRE_LOCK(&lk); 2895 /* Process vp before dvp as it may create .. removes. */ 2896 if (vp) { 2897 process_removes(vp); 2898 process_truncates(vp); 2899 } 2900 process_removes(dvp); 2901 process_truncates(dvp); 2902 softdep_speedup(); 2903 process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT); 2904 if (journal_space(ump, 0) == 0) { 2905 softdep_speedup(); 2906 if (journal_space(ump, 1) == 0) 2907 journal_suspend(ump); 2908 } 2909 } 2910 2911 static void 2912 jseg_write(ump, jseg, data) 2913 struct ufsmount *ump; 2914 struct jseg *jseg; 2915 uint8_t *data; 2916 { 2917 struct jsegrec *rec; 2918 2919 rec = (struct jsegrec *)data; 2920 rec->jsr_seq = jseg->js_seq; 2921 rec->jsr_oldest = jseg->js_oldseq; 2922 rec->jsr_cnt = jseg->js_cnt; 2923 rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize; 2924 rec->jsr_crc = 0; 2925 rec->jsr_time = ump->um_fs->fs_mtime; 2926 } 2927 2928 static inline void 2929 inoref_write(inoref, jseg, rec) 2930 struct inoref *inoref; 2931 struct jseg *jseg; 2932 struct jrefrec *rec; 2933 { 2934 2935 inoref->if_jsegdep->jd_seg = jseg; 2936 rec->jr_ino = inoref->if_ino; 2937 rec->jr_parent = inoref->if_parent; 2938 rec->jr_nlink = inoref->if_nlink; 2939 rec->jr_mode = inoref->if_mode; 2940 rec->jr_diroff = inoref->if_diroff; 2941 } 2942 2943 static void 2944 jaddref_write(jaddref, jseg, data) 2945 struct jaddref *jaddref; 2946 struct jseg *jseg; 2947 uint8_t *data; 2948 { 2949 struct jrefrec *rec; 2950 2951 rec = (struct jrefrec *)data; 2952 rec->jr_op = JOP_ADDREF; 2953 inoref_write(&jaddref->ja_ref, jseg, rec); 2954 } 2955 2956 static void 2957 jremref_write(jremref, jseg, data) 2958 struct jremref *jremref; 2959 struct jseg *jseg; 2960 uint8_t *data; 2961 { 2962 struct jrefrec *rec; 2963 2964 rec = (struct jrefrec *)data; 2965 rec->jr_op = JOP_REMREF; 2966 inoref_write(&jremref->jr_ref, jseg, rec); 2967 } 2968 2969 static void 2970 jmvref_write(jmvref, jseg, data) 2971 struct jmvref *jmvref; 2972 struct jseg *jseg; 2973 uint8_t *data; 2974 { 2975 struct jmvrec *rec; 2976 2977 rec = (struct jmvrec *)data; 2978 rec->jm_op = JOP_MVREF; 2979 rec->jm_ino = jmvref->jm_ino; 2980 rec->jm_parent = jmvref->jm_parent; 2981 rec->jm_oldoff = jmvref->jm_oldoff; 2982 rec->jm_newoff = jmvref->jm_newoff; 2983 } 2984 2985 static void 2986 jnewblk_write(jnewblk, jseg, data) 2987 struct jnewblk *jnewblk; 2988 struct jseg *jseg; 2989 uint8_t *data; 2990 { 2991 struct jblkrec *rec; 2992 2993 jnewblk->jn_jsegdep->jd_seg = jseg; 2994 rec = (struct jblkrec *)data; 2995 rec->jb_op = JOP_NEWBLK; 2996 rec->jb_ino = jnewblk->jn_ino; 2997 rec->jb_blkno = jnewblk->jn_blkno; 2998 rec->jb_lbn = jnewblk->jn_lbn; 2999 rec->jb_frags = jnewblk->jn_frags; 3000 rec->jb_oldfrags = jnewblk->jn_oldfrags; 3001 } 3002 3003 static void 3004 jfreeblk_write(jfreeblk, jseg, data) 3005 struct jfreeblk *jfreeblk; 3006 struct jseg *jseg; 3007 uint8_t *data; 3008 { 3009 struct jblkrec *rec; 3010 3011 jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg; 3012 rec = (struct jblkrec *)data; 3013 rec->jb_op = JOP_FREEBLK; 3014 rec->jb_ino = jfreeblk->jf_ino; 3015 rec->jb_blkno = jfreeblk->jf_blkno; 3016 rec->jb_lbn = jfreeblk->jf_lbn; 3017 rec->jb_frags = jfreeblk->jf_frags; 3018 rec->jb_oldfrags = 0; 3019 } 3020 3021 static void 3022 jfreefrag_write(jfreefrag, jseg, data) 3023 struct jfreefrag *jfreefrag; 3024 struct jseg *jseg; 3025 uint8_t *data; 3026 { 3027 struct jblkrec *rec; 3028 3029 jfreefrag->fr_jsegdep->jd_seg = jseg; 3030 rec = (struct jblkrec *)data; 3031 rec->jb_op = JOP_FREEBLK; 3032 rec->jb_ino = jfreefrag->fr_ino; 3033 rec->jb_blkno = jfreefrag->fr_blkno; 3034 rec->jb_lbn = jfreefrag->fr_lbn; 3035 rec->jb_frags = jfreefrag->fr_frags; 3036 rec->jb_oldfrags = 0; 3037 } 3038 3039 static void 3040 jtrunc_write(jtrunc, jseg, data) 3041 struct jtrunc *jtrunc; 3042 struct jseg *jseg; 3043 uint8_t *data; 3044 { 3045 struct jtrncrec *rec; 3046 3047 jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg; 3048 rec = (struct jtrncrec *)data; 3049 rec->jt_op = JOP_TRUNC; 3050 rec->jt_ino = jtrunc->jt_ino; 3051 rec->jt_size = jtrunc->jt_size; 3052 rec->jt_extsize = jtrunc->jt_extsize; 3053 } 3054 3055 static void 3056 jfsync_write(jfsync, jseg, data) 3057 struct jfsync *jfsync; 3058 struct jseg *jseg; 3059 uint8_t *data; 3060 { 3061 struct jtrncrec *rec; 3062 3063 rec = (struct jtrncrec *)data; 3064 rec->jt_op = JOP_SYNC; 3065 rec->jt_ino = jfsync->jfs_ino; 3066 rec->jt_size = jfsync->jfs_size; 3067 rec->jt_extsize = jfsync->jfs_extsize; 3068 } 3069 3070 static void 3071 softdep_flushjournal(mp) 3072 struct mount *mp; 3073 { 3074 struct jblocks *jblocks; 3075 struct ufsmount *ump; 3076 3077 if (MOUNTEDSUJ(mp) == 0) 3078 return; 3079 ump = VFSTOUFS(mp); 3080 jblocks = ump->softdep_jblocks; 3081 ACQUIRE_LOCK(&lk); 3082 while (ump->softdep_on_journal) { 3083 jblocks->jb_needseg = 1; 3084 softdep_process_journal(mp, NULL, MNT_WAIT); 3085 } 3086 FREE_LOCK(&lk); 3087 } 3088 3089 static void softdep_synchronize_completed(struct bio *); 3090 static void softdep_synchronize(struct bio *, struct ufsmount *, void *); 3091 3092 static void 3093 softdep_synchronize_completed(bp) 3094 struct bio *bp; 3095 { 3096 struct jseg *oldest; 3097 struct jseg *jseg; 3098 3099 /* 3100 * caller1 marks the last segment written before we issued the 3101 * synchronize cache. 3102 */ 3103 jseg = bp->bio_caller1; 3104 oldest = NULL; 3105 ACQUIRE_LOCK(&lk); 3106 /* 3107 * Mark all the journal entries waiting on the synchronize cache 3108 * as completed so they may continue on. 3109 */ 3110 while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) { 3111 jseg->js_state |= COMPLETE; 3112 oldest = jseg; 3113 jseg = TAILQ_PREV(jseg, jseglst, js_next); 3114 } 3115 /* 3116 * Restart deferred journal entry processing from the oldest 3117 * completed jseg. 3118 */ 3119 if (oldest) 3120 complete_jsegs(oldest); 3121 3122 FREE_LOCK(&lk); 3123 g_destroy_bio(bp); 3124 } 3125 3126 /* 3127 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering 3128 * barriers. The journal must be written prior to any blocks that depend 3129 * on it and the journal can not be released until the blocks have be 3130 * written. This code handles both barriers simultaneously. 3131 */ 3132 static void 3133 softdep_synchronize(bp, ump, caller1) 3134 struct bio *bp; 3135 struct ufsmount *ump; 3136 void *caller1; 3137 { 3138 3139 bp->bio_cmd = BIO_FLUSH; 3140 bp->bio_flags |= BIO_ORDERED; 3141 bp->bio_data = NULL; 3142 bp->bio_offset = ump->um_cp->provider->mediasize; 3143 bp->bio_length = 0; 3144 bp->bio_done = softdep_synchronize_completed; 3145 bp->bio_caller1 = caller1; 3146 g_io_request(bp, 3147 (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private); 3148 } 3149 3150 /* 3151 * Flush some journal records to disk. 3152 */ 3153 static void 3154 softdep_process_journal(mp, needwk, flags) 3155 struct mount *mp; 3156 struct worklist *needwk; 3157 int flags; 3158 { 3159 struct jblocks *jblocks; 3160 struct ufsmount *ump; 3161 struct worklist *wk; 3162 struct jseg *jseg; 3163 struct buf *bp; 3164 struct bio *bio; 3165 uint8_t *data; 3166 struct fs *fs; 3167 int shouldflush; 3168 int segwritten; 3169 int jrecmin; /* Minimum records per block. */ 3170 int jrecmax; /* Maximum records per block. */ 3171 int size; 3172 int cnt; 3173 int off; 3174 int devbsize; 3175 3176 if (MOUNTEDSUJ(mp) == 0) 3177 return; 3178 shouldflush = softdep_flushcache; 3179 bio = NULL; 3180 jseg = NULL; 3181 ump = VFSTOUFS(mp); 3182 fs = ump->um_fs; 3183 jblocks = ump->softdep_jblocks; 3184 devbsize = ump->um_devvp->v_bufobj.bo_bsize; 3185 /* 3186 * We write anywhere between a disk block and fs block. The upper 3187 * bound is picked to prevent buffer cache fragmentation and limit 3188 * processing time per I/O. 3189 */ 3190 jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */ 3191 jrecmax = (fs->fs_bsize / devbsize) * jrecmin; 3192 segwritten = 0; 3193 for (;;) { 3194 cnt = ump->softdep_on_journal; 3195 /* 3196 * Criteria for writing a segment: 3197 * 1) We have a full block. 3198 * 2) We're called from jwait() and haven't found the 3199 * journal item yet. 3200 * 3) Always write if needseg is set. 3201 * 4) If we are called from process_worklist and have 3202 * not yet written anything we write a partial block 3203 * to enforce a 1 second maximum latency on journal 3204 * entries. 3205 */ 3206 if (cnt < (jrecmax - 1) && needwk == NULL && 3207 jblocks->jb_needseg == 0 && (segwritten || cnt == 0)) 3208 break; 3209 cnt++; 3210 /* 3211 * Verify some free journal space. softdep_prealloc() should 3212 * guarantee that we don't run out so this is indicative of 3213 * a problem with the flow control. Try to recover 3214 * gracefully in any event. 3215 */ 3216 while (jblocks->jb_free == 0) { 3217 if (flags != MNT_WAIT) 3218 break; 3219 printf("softdep: Out of journal space!\n"); 3220 softdep_speedup(); 3221 msleep(jblocks, &lk, PRIBIO, "jblocks", hz); 3222 } 3223 FREE_LOCK(&lk); 3224 jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS); 3225 workitem_alloc(&jseg->js_list, D_JSEG, mp); 3226 LIST_INIT(&jseg->js_entries); 3227 LIST_INIT(&jseg->js_indirs); 3228 jseg->js_state = ATTACHED; 3229 if (shouldflush == 0) 3230 jseg->js_state |= COMPLETE; 3231 else if (bio == NULL) 3232 bio = g_alloc_bio(); 3233 jseg->js_jblocks = jblocks; 3234 bp = geteblk(fs->fs_bsize, 0); 3235 ACQUIRE_LOCK(&lk); 3236 /* 3237 * If there was a race while we were allocating the block 3238 * and jseg the entry we care about was likely written. 3239 * We bail out in both the WAIT and NOWAIT case and assume 3240 * the caller will loop if the entry it cares about is 3241 * not written. 3242 */ 3243 cnt = ump->softdep_on_journal; 3244 if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) { 3245 bp->b_flags |= B_INVAL | B_NOCACHE; 3246 WORKITEM_FREE(jseg, D_JSEG); 3247 FREE_LOCK(&lk); 3248 brelse(bp); 3249 ACQUIRE_LOCK(&lk); 3250 break; 3251 } 3252 /* 3253 * Calculate the disk block size required for the available 3254 * records rounded to the min size. 3255 */ 3256 if (cnt == 0) 3257 size = devbsize; 3258 else if (cnt < jrecmax) 3259 size = howmany(cnt, jrecmin) * devbsize; 3260 else 3261 size = fs->fs_bsize; 3262 /* 3263 * Allocate a disk block for this journal data and account 3264 * for truncation of the requested size if enough contiguous 3265 * space was not available. 3266 */ 3267 bp->b_blkno = jblocks_alloc(jblocks, size, &size); 3268 bp->b_lblkno = bp->b_blkno; 3269 bp->b_offset = bp->b_blkno * DEV_BSIZE; 3270 bp->b_bcount = size; 3271 bp->b_bufobj = &ump->um_devvp->v_bufobj; 3272 bp->b_flags &= ~B_INVAL; 3273 bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY; 3274 /* 3275 * Initialize our jseg with cnt records. Assign the next 3276 * sequence number to it and link it in-order. 3277 */ 3278 cnt = MIN(cnt, (size / devbsize) * jrecmin); 3279 jseg->js_buf = bp; 3280 jseg->js_cnt = cnt; 3281 jseg->js_refs = cnt + 1; /* Self ref. */ 3282 jseg->js_size = size; 3283 jseg->js_seq = jblocks->jb_nextseq++; 3284 if (jblocks->jb_oldestseg == NULL) 3285 jblocks->jb_oldestseg = jseg; 3286 jseg->js_oldseq = jblocks->jb_oldestseg->js_seq; 3287 TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next); 3288 if (jblocks->jb_writeseg == NULL) 3289 jblocks->jb_writeseg = jseg; 3290 /* 3291 * Start filling in records from the pending list. 3292 */ 3293 data = bp->b_data; 3294 off = 0; 3295 while ((wk = LIST_FIRST(&ump->softdep_journal_pending)) 3296 != NULL) { 3297 if (cnt == 0) 3298 break; 3299 /* Place a segment header on every device block. */ 3300 if ((off % devbsize) == 0) { 3301 jseg_write(ump, jseg, data); 3302 off += JREC_SIZE; 3303 data = bp->b_data + off; 3304 } 3305 if (wk == needwk) 3306 needwk = NULL; 3307 remove_from_journal(wk); 3308 wk->wk_state |= INPROGRESS; 3309 WORKLIST_INSERT(&jseg->js_entries, wk); 3310 switch (wk->wk_type) { 3311 case D_JADDREF: 3312 jaddref_write(WK_JADDREF(wk), jseg, data); 3313 break; 3314 case D_JREMREF: 3315 jremref_write(WK_JREMREF(wk), jseg, data); 3316 break; 3317 case D_JMVREF: 3318 jmvref_write(WK_JMVREF(wk), jseg, data); 3319 break; 3320 case D_JNEWBLK: 3321 jnewblk_write(WK_JNEWBLK(wk), jseg, data); 3322 break; 3323 case D_JFREEBLK: 3324 jfreeblk_write(WK_JFREEBLK(wk), jseg, data); 3325 break; 3326 case D_JFREEFRAG: 3327 jfreefrag_write(WK_JFREEFRAG(wk), jseg, data); 3328 break; 3329 case D_JTRUNC: 3330 jtrunc_write(WK_JTRUNC(wk), jseg, data); 3331 break; 3332 case D_JFSYNC: 3333 jfsync_write(WK_JFSYNC(wk), jseg, data); 3334 break; 3335 default: 3336 panic("process_journal: Unknown type %s", 3337 TYPENAME(wk->wk_type)); 3338 /* NOTREACHED */ 3339 } 3340 off += JREC_SIZE; 3341 data = bp->b_data + off; 3342 cnt--; 3343 } 3344 /* 3345 * Write this one buffer and continue. 3346 */ 3347 segwritten = 1; 3348 jblocks->jb_needseg = 0; 3349 WORKLIST_INSERT(&bp->b_dep, &jseg->js_list); 3350 FREE_LOCK(&lk); 3351 BO_LOCK(bp->b_bufobj); 3352 bgetvp(ump->um_devvp, bp); 3353 BO_UNLOCK(bp->b_bufobj); 3354 /* 3355 * We only do the blocking wait once we find the journal 3356 * entry we're looking for. 3357 */ 3358 if (needwk == NULL && flags == MNT_WAIT) 3359 bwrite(bp); 3360 else 3361 bawrite(bp); 3362 ACQUIRE_LOCK(&lk); 3363 } 3364 /* 3365 * If we wrote a segment issue a synchronize cache so the journal 3366 * is reflected on disk before the data is written. Since reclaiming 3367 * journal space also requires writing a journal record this 3368 * process also enforces a barrier before reclamation. 3369 */ 3370 if (segwritten && shouldflush) { 3371 softdep_synchronize(bio, ump, 3372 TAILQ_LAST(&jblocks->jb_segs, jseglst)); 3373 } else if (bio) 3374 g_destroy_bio(bio); 3375 /* 3376 * If we've suspended the filesystem because we ran out of journal 3377 * space either try to sync it here to make some progress or 3378 * unsuspend it if we already have. 3379 */ 3380 if (flags == 0 && jblocks->jb_suspended) { 3381 if (journal_unsuspend(ump)) 3382 return; 3383 FREE_LOCK(&lk); 3384 VFS_SYNC(mp, MNT_NOWAIT); 3385 ffs_sbupdate(ump, MNT_WAIT, 0); 3386 ACQUIRE_LOCK(&lk); 3387 } 3388 } 3389 3390 /* 3391 * Complete a jseg, allowing all dependencies awaiting journal writes 3392 * to proceed. Each journal dependency also attaches a jsegdep to dependent 3393 * structures so that the journal segment can be freed to reclaim space. 3394 */ 3395 static void 3396 complete_jseg(jseg) 3397 struct jseg *jseg; 3398 { 3399 struct worklist *wk; 3400 struct jmvref *jmvref; 3401 int waiting; 3402 #ifdef INVARIANTS 3403 int i = 0; 3404 #endif 3405 3406 while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) { 3407 WORKLIST_REMOVE(wk); 3408 waiting = wk->wk_state & IOWAITING; 3409 wk->wk_state &= ~(INPROGRESS | IOWAITING); 3410 wk->wk_state |= COMPLETE; 3411 KASSERT(i++ < jseg->js_cnt, 3412 ("handle_written_jseg: overflow %d >= %d", 3413 i - 1, jseg->js_cnt)); 3414 switch (wk->wk_type) { 3415 case D_JADDREF: 3416 handle_written_jaddref(WK_JADDREF(wk)); 3417 break; 3418 case D_JREMREF: 3419 handle_written_jremref(WK_JREMREF(wk)); 3420 break; 3421 case D_JMVREF: 3422 rele_jseg(jseg); /* No jsegdep. */ 3423 jmvref = WK_JMVREF(wk); 3424 LIST_REMOVE(jmvref, jm_deps); 3425 if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0) 3426 free_pagedep(jmvref->jm_pagedep); 3427 WORKITEM_FREE(jmvref, D_JMVREF); 3428 break; 3429 case D_JNEWBLK: 3430 handle_written_jnewblk(WK_JNEWBLK(wk)); 3431 break; 3432 case D_JFREEBLK: 3433 handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep); 3434 break; 3435 case D_JTRUNC: 3436 handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep); 3437 break; 3438 case D_JFSYNC: 3439 rele_jseg(jseg); /* No jsegdep. */ 3440 WORKITEM_FREE(wk, D_JFSYNC); 3441 break; 3442 case D_JFREEFRAG: 3443 handle_written_jfreefrag(WK_JFREEFRAG(wk)); 3444 break; 3445 default: 3446 panic("handle_written_jseg: Unknown type %s", 3447 TYPENAME(wk->wk_type)); 3448 /* NOTREACHED */ 3449 } 3450 if (waiting) 3451 wakeup(wk); 3452 } 3453 /* Release the self reference so the structure may be freed. */ 3454 rele_jseg(jseg); 3455 } 3456 3457 /* 3458 * Determine which jsegs are ready for completion processing. Waits for 3459 * synchronize cache to complete as well as forcing in-order completion 3460 * of journal entries. 3461 */ 3462 static void 3463 complete_jsegs(jseg) 3464 struct jseg *jseg; 3465 { 3466 struct jblocks *jblocks; 3467 struct jseg *jsegn; 3468 3469 jblocks = jseg->js_jblocks; 3470 /* 3471 * Don't allow out of order completions. If this isn't the first 3472 * block wait for it to write before we're done. 3473 */ 3474 if (jseg != jblocks->jb_writeseg) 3475 return; 3476 /* Iterate through available jsegs processing their entries. */ 3477 while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) { 3478 jblocks->jb_oldestwrseq = jseg->js_oldseq; 3479 jsegn = TAILQ_NEXT(jseg, js_next); 3480 complete_jseg(jseg); 3481 jseg = jsegn; 3482 } 3483 jblocks->jb_writeseg = jseg; 3484 /* 3485 * Attempt to free jsegs now that oldestwrseq may have advanced. 3486 */ 3487 free_jsegs(jblocks); 3488 } 3489 3490 /* 3491 * Mark a jseg as DEPCOMPLETE and throw away the buffer. Attempt to handle 3492 * the final completions. 3493 */ 3494 static void 3495 handle_written_jseg(jseg, bp) 3496 struct jseg *jseg; 3497 struct buf *bp; 3498 { 3499 3500 if (jseg->js_refs == 0) 3501 panic("handle_written_jseg: No self-reference on %p", jseg); 3502 jseg->js_state |= DEPCOMPLETE; 3503 /* 3504 * We'll never need this buffer again, set flags so it will be 3505 * discarded. 3506 */ 3507 bp->b_flags |= B_INVAL | B_NOCACHE; 3508 complete_jsegs(jseg); 3509 } 3510 3511 static inline struct jsegdep * 3512 inoref_jseg(inoref) 3513 struct inoref *inoref; 3514 { 3515 struct jsegdep *jsegdep; 3516 3517 jsegdep = inoref->if_jsegdep; 3518 inoref->if_jsegdep = NULL; 3519 3520 return (jsegdep); 3521 } 3522 3523 /* 3524 * Called once a jremref has made it to stable store. The jremref is marked 3525 * complete and we attempt to free it. Any pagedeps writes sleeping waiting 3526 * for the jremref to complete will be awoken by free_jremref. 3527 */ 3528 static void 3529 handle_written_jremref(jremref) 3530 struct jremref *jremref; 3531 { 3532 struct inodedep *inodedep; 3533 struct jsegdep *jsegdep; 3534 struct dirrem *dirrem; 3535 3536 /* Grab the jsegdep. */ 3537 jsegdep = inoref_jseg(&jremref->jr_ref); 3538 /* 3539 * Remove us from the inoref list. 3540 */ 3541 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 3542 0, &inodedep) == 0) 3543 panic("handle_written_jremref: Lost inodedep"); 3544 TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); 3545 /* 3546 * Complete the dirrem. 3547 */ 3548 dirrem = jremref->jr_dirrem; 3549 jremref->jr_dirrem = NULL; 3550 LIST_REMOVE(jremref, jr_deps); 3551 jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT; 3552 jwork_insert(&dirrem->dm_jwork, jsegdep); 3553 if (LIST_EMPTY(&dirrem->dm_jremrefhd) && 3554 (dirrem->dm_state & COMPLETE) != 0) 3555 add_to_worklist(&dirrem->dm_list, 0); 3556 free_jremref(jremref); 3557 } 3558 3559 /* 3560 * Called once a jaddref has made it to stable store. The dependency is 3561 * marked complete and any dependent structures are added to the inode 3562 * bufwait list to be completed as soon as it is written. If a bitmap write 3563 * depends on this entry we move the inode into the inodedephd of the 3564 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap. 3565 */ 3566 static void 3567 handle_written_jaddref(jaddref) 3568 struct jaddref *jaddref; 3569 { 3570 struct jsegdep *jsegdep; 3571 struct inodedep *inodedep; 3572 struct diradd *diradd; 3573 struct mkdir *mkdir; 3574 3575 /* Grab the jsegdep. */ 3576 jsegdep = inoref_jseg(&jaddref->ja_ref); 3577 mkdir = NULL; 3578 diradd = NULL; 3579 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 3580 0, &inodedep) == 0) 3581 panic("handle_written_jaddref: Lost inodedep."); 3582 if (jaddref->ja_diradd == NULL) 3583 panic("handle_written_jaddref: No dependency"); 3584 if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) { 3585 diradd = jaddref->ja_diradd; 3586 WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list); 3587 } else if (jaddref->ja_state & MKDIR_PARENT) { 3588 mkdir = jaddref->ja_mkdir; 3589 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list); 3590 } else if (jaddref->ja_state & MKDIR_BODY) 3591 mkdir = jaddref->ja_mkdir; 3592 else 3593 panic("handle_written_jaddref: Unknown dependency %p", 3594 jaddref->ja_diradd); 3595 jaddref->ja_diradd = NULL; /* also clears ja_mkdir */ 3596 /* 3597 * Remove us from the inode list. 3598 */ 3599 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); 3600 /* 3601 * The mkdir may be waiting on the jaddref to clear before freeing. 3602 */ 3603 if (mkdir) { 3604 KASSERT(mkdir->md_list.wk_type == D_MKDIR, 3605 ("handle_written_jaddref: Incorrect type for mkdir %s", 3606 TYPENAME(mkdir->md_list.wk_type))); 3607 mkdir->md_jaddref = NULL; 3608 diradd = mkdir->md_diradd; 3609 mkdir->md_state |= DEPCOMPLETE; 3610 complete_mkdir(mkdir); 3611 } 3612 jwork_insert(&diradd->da_jwork, jsegdep); 3613 if (jaddref->ja_state & NEWBLOCK) { 3614 inodedep->id_state |= ONDEPLIST; 3615 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd, 3616 inodedep, id_deps); 3617 } 3618 free_jaddref(jaddref); 3619 } 3620 3621 /* 3622 * Called once a jnewblk journal is written. The allocdirect or allocindir 3623 * is placed in the bmsafemap to await notification of a written bitmap. If 3624 * the operation was canceled we add the segdep to the appropriate 3625 * dependency to free the journal space once the canceling operation 3626 * completes. 3627 */ 3628 static void 3629 handle_written_jnewblk(jnewblk) 3630 struct jnewblk *jnewblk; 3631 { 3632 struct bmsafemap *bmsafemap; 3633 struct freefrag *freefrag; 3634 struct freework *freework; 3635 struct jsegdep *jsegdep; 3636 struct newblk *newblk; 3637 3638 /* Grab the jsegdep. */ 3639 jsegdep = jnewblk->jn_jsegdep; 3640 jnewblk->jn_jsegdep = NULL; 3641 if (jnewblk->jn_dep == NULL) 3642 panic("handle_written_jnewblk: No dependency for the segdep."); 3643 switch (jnewblk->jn_dep->wk_type) { 3644 case D_NEWBLK: 3645 case D_ALLOCDIRECT: 3646 case D_ALLOCINDIR: 3647 /* 3648 * Add the written block to the bmsafemap so it can 3649 * be notified when the bitmap is on disk. 3650 */ 3651 newblk = WK_NEWBLK(jnewblk->jn_dep); 3652 newblk->nb_jnewblk = NULL; 3653 if ((newblk->nb_state & GOINGAWAY) == 0) { 3654 bmsafemap = newblk->nb_bmsafemap; 3655 newblk->nb_state |= ONDEPLIST; 3656 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, 3657 nb_deps); 3658 } 3659 jwork_insert(&newblk->nb_jwork, jsegdep); 3660 break; 3661 case D_FREEFRAG: 3662 /* 3663 * A newblock being removed by a freefrag when replaced by 3664 * frag extension. 3665 */ 3666 freefrag = WK_FREEFRAG(jnewblk->jn_dep); 3667 freefrag->ff_jdep = NULL; 3668 jwork_insert(&freefrag->ff_jwork, jsegdep); 3669 break; 3670 case D_FREEWORK: 3671 /* 3672 * A direct block was removed by truncate. 3673 */ 3674 freework = WK_FREEWORK(jnewblk->jn_dep); 3675 freework->fw_jnewblk = NULL; 3676 jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep); 3677 break; 3678 default: 3679 panic("handle_written_jnewblk: Unknown type %d.", 3680 jnewblk->jn_dep->wk_type); 3681 } 3682 jnewblk->jn_dep = NULL; 3683 free_jnewblk(jnewblk); 3684 } 3685 3686 /* 3687 * Cancel a jfreefrag that won't be needed, probably due to colliding with 3688 * an in-flight allocation that has not yet been committed. Divorce us 3689 * from the freefrag and mark it DEPCOMPLETE so that it may be added 3690 * to the worklist. 3691 */ 3692 static void 3693 cancel_jfreefrag(jfreefrag) 3694 struct jfreefrag *jfreefrag; 3695 { 3696 struct freefrag *freefrag; 3697 3698 if (jfreefrag->fr_jsegdep) { 3699 free_jsegdep(jfreefrag->fr_jsegdep); 3700 jfreefrag->fr_jsegdep = NULL; 3701 } 3702 freefrag = jfreefrag->fr_freefrag; 3703 jfreefrag->fr_freefrag = NULL; 3704 free_jfreefrag(jfreefrag); 3705 freefrag->ff_state |= DEPCOMPLETE; 3706 CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno); 3707 } 3708 3709 /* 3710 * Free a jfreefrag when the parent freefrag is rendered obsolete. 3711 */ 3712 static void 3713 free_jfreefrag(jfreefrag) 3714 struct jfreefrag *jfreefrag; 3715 { 3716 3717 if (jfreefrag->fr_state & INPROGRESS) 3718 WORKLIST_REMOVE(&jfreefrag->fr_list); 3719 else if (jfreefrag->fr_state & ONWORKLIST) 3720 remove_from_journal(&jfreefrag->fr_list); 3721 if (jfreefrag->fr_freefrag != NULL) 3722 panic("free_jfreefrag: Still attached to a freefrag."); 3723 WORKITEM_FREE(jfreefrag, D_JFREEFRAG); 3724 } 3725 3726 /* 3727 * Called when the journal write for a jfreefrag completes. The parent 3728 * freefrag is added to the worklist if this completes its dependencies. 3729 */ 3730 static void 3731 handle_written_jfreefrag(jfreefrag) 3732 struct jfreefrag *jfreefrag; 3733 { 3734 struct jsegdep *jsegdep; 3735 struct freefrag *freefrag; 3736 3737 /* Grab the jsegdep. */ 3738 jsegdep = jfreefrag->fr_jsegdep; 3739 jfreefrag->fr_jsegdep = NULL; 3740 freefrag = jfreefrag->fr_freefrag; 3741 if (freefrag == NULL) 3742 panic("handle_written_jfreefrag: No freefrag."); 3743 freefrag->ff_state |= DEPCOMPLETE; 3744 freefrag->ff_jdep = NULL; 3745 jwork_insert(&freefrag->ff_jwork, jsegdep); 3746 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) 3747 add_to_worklist(&freefrag->ff_list, 0); 3748 jfreefrag->fr_freefrag = NULL; 3749 free_jfreefrag(jfreefrag); 3750 } 3751 3752 /* 3753 * Called when the journal write for a jfreeblk completes. The jfreeblk 3754 * is removed from the freeblks list of pending journal writes and the 3755 * jsegdep is moved to the freeblks jwork to be completed when all blocks 3756 * have been reclaimed. 3757 */ 3758 static void 3759 handle_written_jblkdep(jblkdep) 3760 struct jblkdep *jblkdep; 3761 { 3762 struct freeblks *freeblks; 3763 struct jsegdep *jsegdep; 3764 3765 /* Grab the jsegdep. */ 3766 jsegdep = jblkdep->jb_jsegdep; 3767 jblkdep->jb_jsegdep = NULL; 3768 freeblks = jblkdep->jb_freeblks; 3769 LIST_REMOVE(jblkdep, jb_deps); 3770 jwork_insert(&freeblks->fb_jwork, jsegdep); 3771 /* 3772 * If the freeblks is all journaled, we can add it to the worklist. 3773 */ 3774 if (LIST_EMPTY(&freeblks->fb_jblkdephd) && 3775 (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 3776 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 3777 3778 free_jblkdep(jblkdep); 3779 } 3780 3781 static struct jsegdep * 3782 newjsegdep(struct worklist *wk) 3783 { 3784 struct jsegdep *jsegdep; 3785 3786 jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS); 3787 workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp); 3788 jsegdep->jd_seg = NULL; 3789 3790 return (jsegdep); 3791 } 3792 3793 static struct jmvref * 3794 newjmvref(dp, ino, oldoff, newoff) 3795 struct inode *dp; 3796 ino_t ino; 3797 off_t oldoff; 3798 off_t newoff; 3799 { 3800 struct jmvref *jmvref; 3801 3802 jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS); 3803 workitem_alloc(&jmvref->jm_list, D_JMVREF, UFSTOVFS(dp->i_ump)); 3804 jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE; 3805 jmvref->jm_parent = dp->i_number; 3806 jmvref->jm_ino = ino; 3807 jmvref->jm_oldoff = oldoff; 3808 jmvref->jm_newoff = newoff; 3809 3810 return (jmvref); 3811 } 3812 3813 /* 3814 * Allocate a new jremref that tracks the removal of ip from dp with the 3815 * directory entry offset of diroff. Mark the entry as ATTACHED and 3816 * DEPCOMPLETE as we have all the information required for the journal write 3817 * and the directory has already been removed from the buffer. The caller 3818 * is responsible for linking the jremref into the pagedep and adding it 3819 * to the journal to write. The MKDIR_PARENT flag is set if we're doing 3820 * a DOTDOT addition so handle_workitem_remove() can properly assign 3821 * the jsegdep when we're done. 3822 */ 3823 static struct jremref * 3824 newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip, 3825 off_t diroff, nlink_t nlink) 3826 { 3827 struct jremref *jremref; 3828 3829 jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS); 3830 workitem_alloc(&jremref->jr_list, D_JREMREF, UFSTOVFS(dp->i_ump)); 3831 jremref->jr_state = ATTACHED; 3832 newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff, 3833 nlink, ip->i_mode); 3834 jremref->jr_dirrem = dirrem; 3835 3836 return (jremref); 3837 } 3838 3839 static inline void 3840 newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff, 3841 nlink_t nlink, uint16_t mode) 3842 { 3843 3844 inoref->if_jsegdep = newjsegdep(&inoref->if_list); 3845 inoref->if_diroff = diroff; 3846 inoref->if_ino = ino; 3847 inoref->if_parent = parent; 3848 inoref->if_nlink = nlink; 3849 inoref->if_mode = mode; 3850 } 3851 3852 /* 3853 * Allocate a new jaddref to track the addition of ino to dp at diroff. The 3854 * directory offset may not be known until later. The caller is responsible 3855 * adding the entry to the journal when this information is available. nlink 3856 * should be the link count prior to the addition and mode is only required 3857 * to have the correct FMT. 3858 */ 3859 static struct jaddref * 3860 newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink, 3861 uint16_t mode) 3862 { 3863 struct jaddref *jaddref; 3864 3865 jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS); 3866 workitem_alloc(&jaddref->ja_list, D_JADDREF, UFSTOVFS(dp->i_ump)); 3867 jaddref->ja_state = ATTACHED; 3868 jaddref->ja_mkdir = NULL; 3869 newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode); 3870 3871 return (jaddref); 3872 } 3873 3874 /* 3875 * Create a new free dependency for a freework. The caller is responsible 3876 * for adjusting the reference count when it has the lock held. The freedep 3877 * will track an outstanding bitmap write that will ultimately clear the 3878 * freework to continue. 3879 */ 3880 static struct freedep * 3881 newfreedep(struct freework *freework) 3882 { 3883 struct freedep *freedep; 3884 3885 freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS); 3886 workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp); 3887 freedep->fd_freework = freework; 3888 3889 return (freedep); 3890 } 3891 3892 /* 3893 * Free a freedep structure once the buffer it is linked to is written. If 3894 * this is the last reference to the freework schedule it for completion. 3895 */ 3896 static void 3897 free_freedep(freedep) 3898 struct freedep *freedep; 3899 { 3900 struct freework *freework; 3901 3902 freework = freedep->fd_freework; 3903 freework->fw_freeblks->fb_cgwait--; 3904 if (--freework->fw_ref == 0) 3905 freework_enqueue(freework); 3906 WORKITEM_FREE(freedep, D_FREEDEP); 3907 } 3908 3909 /* 3910 * Allocate a new freework structure that may be a level in an indirect 3911 * when parent is not NULL or a top level block when it is. The top level 3912 * freework structures are allocated without lk held and before the freeblks 3913 * is visible outside of softdep_setup_freeblocks(). 3914 */ 3915 static struct freework * 3916 newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal) 3917 struct ufsmount *ump; 3918 struct freeblks *freeblks; 3919 struct freework *parent; 3920 ufs_lbn_t lbn; 3921 ufs2_daddr_t nb; 3922 int frags; 3923 int off; 3924 int journal; 3925 { 3926 struct freework *freework; 3927 3928 freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS); 3929 workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp); 3930 freework->fw_state = ATTACHED; 3931 freework->fw_jnewblk = NULL; 3932 freework->fw_freeblks = freeblks; 3933 freework->fw_parent = parent; 3934 freework->fw_lbn = lbn; 3935 freework->fw_blkno = nb; 3936 freework->fw_frags = frags; 3937 freework->fw_indir = NULL; 3938 freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -NXADDR) 3939 ? 0 : NINDIR(ump->um_fs) + 1; 3940 freework->fw_start = freework->fw_off = off; 3941 if (journal) 3942 newjfreeblk(freeblks, lbn, nb, frags); 3943 if (parent == NULL) { 3944 ACQUIRE_LOCK(&lk); 3945 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); 3946 freeblks->fb_ref++; 3947 FREE_LOCK(&lk); 3948 } 3949 3950 return (freework); 3951 } 3952 3953 /* 3954 * Eliminate a jfreeblk for a block that does not need journaling. 3955 */ 3956 static void 3957 cancel_jfreeblk(freeblks, blkno) 3958 struct freeblks *freeblks; 3959 ufs2_daddr_t blkno; 3960 { 3961 struct jfreeblk *jfreeblk; 3962 struct jblkdep *jblkdep; 3963 3964 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) { 3965 if (jblkdep->jb_list.wk_type != D_JFREEBLK) 3966 continue; 3967 jfreeblk = WK_JFREEBLK(&jblkdep->jb_list); 3968 if (jfreeblk->jf_blkno == blkno) 3969 break; 3970 } 3971 if (jblkdep == NULL) 3972 return; 3973 CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno); 3974 free_jsegdep(jblkdep->jb_jsegdep); 3975 LIST_REMOVE(jblkdep, jb_deps); 3976 WORKITEM_FREE(jfreeblk, D_JFREEBLK); 3977 } 3978 3979 /* 3980 * Allocate a new jfreeblk to journal top level block pointer when truncating 3981 * a file. The caller must add this to the worklist when lk is held. 3982 */ 3983 static struct jfreeblk * 3984 newjfreeblk(freeblks, lbn, blkno, frags) 3985 struct freeblks *freeblks; 3986 ufs_lbn_t lbn; 3987 ufs2_daddr_t blkno; 3988 int frags; 3989 { 3990 struct jfreeblk *jfreeblk; 3991 3992 jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS); 3993 workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK, 3994 freeblks->fb_list.wk_mp); 3995 jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list); 3996 jfreeblk->jf_dep.jb_freeblks = freeblks; 3997 jfreeblk->jf_ino = freeblks->fb_inum; 3998 jfreeblk->jf_lbn = lbn; 3999 jfreeblk->jf_blkno = blkno; 4000 jfreeblk->jf_frags = frags; 4001 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps); 4002 4003 return (jfreeblk); 4004 } 4005 4006 /* 4007 * Allocate a new jtrunc to track a partial truncation. 4008 */ 4009 static struct jtrunc * 4010 newjtrunc(freeblks, size, extsize) 4011 struct freeblks *freeblks; 4012 off_t size; 4013 int extsize; 4014 { 4015 struct jtrunc *jtrunc; 4016 4017 jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS); 4018 workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC, 4019 freeblks->fb_list.wk_mp); 4020 jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list); 4021 jtrunc->jt_dep.jb_freeblks = freeblks; 4022 jtrunc->jt_ino = freeblks->fb_inum; 4023 jtrunc->jt_size = size; 4024 jtrunc->jt_extsize = extsize; 4025 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps); 4026 4027 return (jtrunc); 4028 } 4029 4030 /* 4031 * If we're canceling a new bitmap we have to search for another ref 4032 * to move into the bmsafemap dep. This might be better expressed 4033 * with another structure. 4034 */ 4035 static void 4036 move_newblock_dep(jaddref, inodedep) 4037 struct jaddref *jaddref; 4038 struct inodedep *inodedep; 4039 { 4040 struct inoref *inoref; 4041 struct jaddref *jaddrefn; 4042 4043 jaddrefn = NULL; 4044 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; 4045 inoref = TAILQ_NEXT(inoref, if_deps)) { 4046 if ((jaddref->ja_state & NEWBLOCK) && 4047 inoref->if_list.wk_type == D_JADDREF) { 4048 jaddrefn = (struct jaddref *)inoref; 4049 break; 4050 } 4051 } 4052 if (jaddrefn == NULL) 4053 return; 4054 jaddrefn->ja_state &= ~(ATTACHED | UNDONE); 4055 jaddrefn->ja_state |= jaddref->ja_state & 4056 (ATTACHED | UNDONE | NEWBLOCK); 4057 jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK); 4058 jaddref->ja_state |= ATTACHED; 4059 LIST_REMOVE(jaddref, ja_bmdeps); 4060 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn, 4061 ja_bmdeps); 4062 } 4063 4064 /* 4065 * Cancel a jaddref either before it has been written or while it is being 4066 * written. This happens when a link is removed before the add reaches 4067 * the disk. The jaddref dependency is kept linked into the bmsafemap 4068 * and inode to prevent the link count or bitmap from reaching the disk 4069 * until handle_workitem_remove() re-adjusts the counts and bitmaps as 4070 * required. 4071 * 4072 * Returns 1 if the canceled addref requires journaling of the remove and 4073 * 0 otherwise. 4074 */ 4075 static int 4076 cancel_jaddref(jaddref, inodedep, wkhd) 4077 struct jaddref *jaddref; 4078 struct inodedep *inodedep; 4079 struct workhead *wkhd; 4080 { 4081 struct inoref *inoref; 4082 struct jsegdep *jsegdep; 4083 int needsj; 4084 4085 KASSERT((jaddref->ja_state & COMPLETE) == 0, 4086 ("cancel_jaddref: Canceling complete jaddref")); 4087 if (jaddref->ja_state & (INPROGRESS | COMPLETE)) 4088 needsj = 1; 4089 else 4090 needsj = 0; 4091 if (inodedep == NULL) 4092 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 4093 0, &inodedep) == 0) 4094 panic("cancel_jaddref: Lost inodedep"); 4095 /* 4096 * We must adjust the nlink of any reference operation that follows 4097 * us so that it is consistent with the in-memory reference. This 4098 * ensures that inode nlink rollbacks always have the correct link. 4099 */ 4100 if (needsj == 0) { 4101 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; 4102 inoref = TAILQ_NEXT(inoref, if_deps)) { 4103 if (inoref->if_state & GOINGAWAY) 4104 break; 4105 inoref->if_nlink--; 4106 } 4107 } 4108 jsegdep = inoref_jseg(&jaddref->ja_ref); 4109 if (jaddref->ja_state & NEWBLOCK) 4110 move_newblock_dep(jaddref, inodedep); 4111 wake_worklist(&jaddref->ja_list); 4112 jaddref->ja_mkdir = NULL; 4113 if (jaddref->ja_state & INPROGRESS) { 4114 jaddref->ja_state &= ~INPROGRESS; 4115 WORKLIST_REMOVE(&jaddref->ja_list); 4116 jwork_insert(wkhd, jsegdep); 4117 } else { 4118 free_jsegdep(jsegdep); 4119 if (jaddref->ja_state & DEPCOMPLETE) 4120 remove_from_journal(&jaddref->ja_list); 4121 } 4122 jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE); 4123 /* 4124 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove 4125 * can arrange for them to be freed with the bitmap. Otherwise we 4126 * no longer need this addref attached to the inoreflst and it 4127 * will incorrectly adjust nlink if we leave it. 4128 */ 4129 if ((jaddref->ja_state & NEWBLOCK) == 0) { 4130 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, 4131 if_deps); 4132 jaddref->ja_state |= COMPLETE; 4133 free_jaddref(jaddref); 4134 return (needsj); 4135 } 4136 /* 4137 * Leave the head of the list for jsegdeps for fast merging. 4138 */ 4139 if (LIST_FIRST(wkhd) != NULL) { 4140 jaddref->ja_state |= ONWORKLIST; 4141 LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list); 4142 } else 4143 WORKLIST_INSERT(wkhd, &jaddref->ja_list); 4144 4145 return (needsj); 4146 } 4147 4148 /* 4149 * Attempt to free a jaddref structure when some work completes. This 4150 * should only succeed once the entry is written and all dependencies have 4151 * been notified. 4152 */ 4153 static void 4154 free_jaddref(jaddref) 4155 struct jaddref *jaddref; 4156 { 4157 4158 if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE) 4159 return; 4160 if (jaddref->ja_ref.if_jsegdep) 4161 panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n", 4162 jaddref, jaddref->ja_state); 4163 if (jaddref->ja_state & NEWBLOCK) 4164 LIST_REMOVE(jaddref, ja_bmdeps); 4165 if (jaddref->ja_state & (INPROGRESS | ONWORKLIST)) 4166 panic("free_jaddref: Bad state %p(0x%X)", 4167 jaddref, jaddref->ja_state); 4168 if (jaddref->ja_mkdir != NULL) 4169 panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state); 4170 WORKITEM_FREE(jaddref, D_JADDREF); 4171 } 4172 4173 /* 4174 * Free a jremref structure once it has been written or discarded. 4175 */ 4176 static void 4177 free_jremref(jremref) 4178 struct jremref *jremref; 4179 { 4180 4181 if (jremref->jr_ref.if_jsegdep) 4182 free_jsegdep(jremref->jr_ref.if_jsegdep); 4183 if (jremref->jr_state & INPROGRESS) 4184 panic("free_jremref: IO still pending"); 4185 WORKITEM_FREE(jremref, D_JREMREF); 4186 } 4187 4188 /* 4189 * Free a jnewblk structure. 4190 */ 4191 static void 4192 free_jnewblk(jnewblk) 4193 struct jnewblk *jnewblk; 4194 { 4195 4196 if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE) 4197 return; 4198 LIST_REMOVE(jnewblk, jn_deps); 4199 if (jnewblk->jn_dep != NULL) 4200 panic("free_jnewblk: Dependency still attached."); 4201 WORKITEM_FREE(jnewblk, D_JNEWBLK); 4202 } 4203 4204 /* 4205 * Cancel a jnewblk which has been been made redundant by frag extension. 4206 */ 4207 static void 4208 cancel_jnewblk(jnewblk, wkhd) 4209 struct jnewblk *jnewblk; 4210 struct workhead *wkhd; 4211 { 4212 struct jsegdep *jsegdep; 4213 4214 CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno); 4215 jsegdep = jnewblk->jn_jsegdep; 4216 if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL) 4217 panic("cancel_jnewblk: Invalid state"); 4218 jnewblk->jn_jsegdep = NULL; 4219 jnewblk->jn_dep = NULL; 4220 jnewblk->jn_state |= GOINGAWAY; 4221 if (jnewblk->jn_state & INPROGRESS) { 4222 jnewblk->jn_state &= ~INPROGRESS; 4223 WORKLIST_REMOVE(&jnewblk->jn_list); 4224 jwork_insert(wkhd, jsegdep); 4225 } else { 4226 free_jsegdep(jsegdep); 4227 remove_from_journal(&jnewblk->jn_list); 4228 } 4229 wake_worklist(&jnewblk->jn_list); 4230 WORKLIST_INSERT(wkhd, &jnewblk->jn_list); 4231 } 4232 4233 static void 4234 free_jblkdep(jblkdep) 4235 struct jblkdep *jblkdep; 4236 { 4237 4238 if (jblkdep->jb_list.wk_type == D_JFREEBLK) 4239 WORKITEM_FREE(jblkdep, D_JFREEBLK); 4240 else if (jblkdep->jb_list.wk_type == D_JTRUNC) 4241 WORKITEM_FREE(jblkdep, D_JTRUNC); 4242 else 4243 panic("free_jblkdep: Unexpected type %s", 4244 TYPENAME(jblkdep->jb_list.wk_type)); 4245 } 4246 4247 /* 4248 * Free a single jseg once it is no longer referenced in memory or on 4249 * disk. Reclaim journal blocks and dependencies waiting for the segment 4250 * to disappear. 4251 */ 4252 static void 4253 free_jseg(jseg, jblocks) 4254 struct jseg *jseg; 4255 struct jblocks *jblocks; 4256 { 4257 struct freework *freework; 4258 4259 /* 4260 * Free freework structures that were lingering to indicate freed 4261 * indirect blocks that forced journal write ordering on reallocate. 4262 */ 4263 while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL) 4264 indirblk_remove(freework); 4265 if (jblocks->jb_oldestseg == jseg) 4266 jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next); 4267 TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next); 4268 jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size); 4269 KASSERT(LIST_EMPTY(&jseg->js_entries), 4270 ("free_jseg: Freed jseg has valid entries.")); 4271 WORKITEM_FREE(jseg, D_JSEG); 4272 } 4273 4274 /* 4275 * Free all jsegs that meet the criteria for being reclaimed and update 4276 * oldestseg. 4277 */ 4278 static void 4279 free_jsegs(jblocks) 4280 struct jblocks *jblocks; 4281 { 4282 struct jseg *jseg; 4283 4284 /* 4285 * Free only those jsegs which have none allocated before them to 4286 * preserve the journal space ordering. 4287 */ 4288 while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) { 4289 /* 4290 * Only reclaim space when nothing depends on this journal 4291 * set and another set has written that it is no longer 4292 * valid. 4293 */ 4294 if (jseg->js_refs != 0) { 4295 jblocks->jb_oldestseg = jseg; 4296 return; 4297 } 4298 if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE) 4299 break; 4300 if (jseg->js_seq > jblocks->jb_oldestwrseq) 4301 break; 4302 /* 4303 * We can free jsegs that didn't write entries when 4304 * oldestwrseq == js_seq. 4305 */ 4306 if (jseg->js_seq == jblocks->jb_oldestwrseq && 4307 jseg->js_cnt != 0) 4308 break; 4309 free_jseg(jseg, jblocks); 4310 } 4311 /* 4312 * If we exited the loop above we still must discover the 4313 * oldest valid segment. 4314 */ 4315 if (jseg) 4316 for (jseg = jblocks->jb_oldestseg; jseg != NULL; 4317 jseg = TAILQ_NEXT(jseg, js_next)) 4318 if (jseg->js_refs != 0) 4319 break; 4320 jblocks->jb_oldestseg = jseg; 4321 /* 4322 * The journal has no valid records but some jsegs may still be 4323 * waiting on oldestwrseq to advance. We force a small record 4324 * out to permit these lingering records to be reclaimed. 4325 */ 4326 if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs)) 4327 jblocks->jb_needseg = 1; 4328 } 4329 4330 /* 4331 * Release one reference to a jseg and free it if the count reaches 0. This 4332 * should eventually reclaim journal space as well. 4333 */ 4334 static void 4335 rele_jseg(jseg) 4336 struct jseg *jseg; 4337 { 4338 4339 KASSERT(jseg->js_refs > 0, 4340 ("free_jseg: Invalid refcnt %d", jseg->js_refs)); 4341 if (--jseg->js_refs != 0) 4342 return; 4343 free_jsegs(jseg->js_jblocks); 4344 } 4345 4346 /* 4347 * Release a jsegdep and decrement the jseg count. 4348 */ 4349 static void 4350 free_jsegdep(jsegdep) 4351 struct jsegdep *jsegdep; 4352 { 4353 4354 if (jsegdep->jd_seg) 4355 rele_jseg(jsegdep->jd_seg); 4356 WORKITEM_FREE(jsegdep, D_JSEGDEP); 4357 } 4358 4359 /* 4360 * Wait for a journal item to make it to disk. Initiate journal processing 4361 * if required. 4362 */ 4363 static int 4364 jwait(wk, waitfor) 4365 struct worklist *wk; 4366 int waitfor; 4367 { 4368 4369 /* 4370 * Blocking journal waits cause slow synchronous behavior. Record 4371 * stats on the frequency of these blocking operations. 4372 */ 4373 if (waitfor == MNT_WAIT) { 4374 stat_journal_wait++; 4375 switch (wk->wk_type) { 4376 case D_JREMREF: 4377 case D_JMVREF: 4378 stat_jwait_filepage++; 4379 break; 4380 case D_JTRUNC: 4381 case D_JFREEBLK: 4382 stat_jwait_freeblks++; 4383 break; 4384 case D_JNEWBLK: 4385 stat_jwait_newblk++; 4386 break; 4387 case D_JADDREF: 4388 stat_jwait_inode++; 4389 break; 4390 default: 4391 break; 4392 } 4393 } 4394 /* 4395 * If IO has not started we process the journal. We can't mark the 4396 * worklist item as IOWAITING because we drop the lock while 4397 * processing the journal and the worklist entry may be freed after 4398 * this point. The caller may call back in and re-issue the request. 4399 */ 4400 if ((wk->wk_state & INPROGRESS) == 0) { 4401 softdep_process_journal(wk->wk_mp, wk, waitfor); 4402 if (waitfor != MNT_WAIT) 4403 return (EBUSY); 4404 return (0); 4405 } 4406 if (waitfor != MNT_WAIT) 4407 return (EBUSY); 4408 wait_worklist(wk, "jwait"); 4409 return (0); 4410 } 4411 4412 /* 4413 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as 4414 * appropriate. This is a convenience function to reduce duplicate code 4415 * for the setup and revert functions below. 4416 */ 4417 static struct inodedep * 4418 inodedep_lookup_ip(ip) 4419 struct inode *ip; 4420 { 4421 struct inodedep *inodedep; 4422 int dflags; 4423 4424 KASSERT(ip->i_nlink >= ip->i_effnlink, 4425 ("inodedep_lookup_ip: bad delta")); 4426 dflags = DEPALLOC; 4427 if (IS_SNAPSHOT(ip)) 4428 dflags |= NODELAY; 4429 (void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, 4430 &inodedep); 4431 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 4432 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); 4433 4434 return (inodedep); 4435 } 4436 4437 /* 4438 * Called prior to creating a new inode and linking it to a directory. The 4439 * jaddref structure must already be allocated by softdep_setup_inomapdep 4440 * and it is discovered here so we can initialize the mode and update 4441 * nlinkdelta. 4442 */ 4443 void 4444 softdep_setup_create(dp, ip) 4445 struct inode *dp; 4446 struct inode *ip; 4447 { 4448 struct inodedep *inodedep; 4449 struct jaddref *jaddref; 4450 struct vnode *dvp; 4451 4452 KASSERT(ip->i_nlink == 1, 4453 ("softdep_setup_create: Invalid link count.")); 4454 dvp = ITOV(dp); 4455 ACQUIRE_LOCK(&lk); 4456 inodedep = inodedep_lookup_ip(ip); 4457 if (DOINGSUJ(dvp)) { 4458 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4459 inoreflst); 4460 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 4461 ("softdep_setup_create: No addref structure present.")); 4462 } 4463 softdep_prelink(dvp, NULL); 4464 FREE_LOCK(&lk); 4465 } 4466 4467 /* 4468 * Create a jaddref structure to track the addition of a DOTDOT link when 4469 * we are reparenting an inode as part of a rename. This jaddref will be 4470 * found by softdep_setup_directory_change. Adjusts nlinkdelta for 4471 * non-journaling softdep. 4472 */ 4473 void 4474 softdep_setup_dotdot_link(dp, ip) 4475 struct inode *dp; 4476 struct inode *ip; 4477 { 4478 struct inodedep *inodedep; 4479 struct jaddref *jaddref; 4480 struct vnode *dvp; 4481 struct vnode *vp; 4482 4483 dvp = ITOV(dp); 4484 vp = ITOV(ip); 4485 jaddref = NULL; 4486 /* 4487 * We don't set MKDIR_PARENT as this is not tied to a mkdir and 4488 * is used as a normal link would be. 4489 */ 4490 if (DOINGSUJ(dvp)) 4491 jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, 4492 dp->i_effnlink - 1, dp->i_mode); 4493 ACQUIRE_LOCK(&lk); 4494 inodedep = inodedep_lookup_ip(dp); 4495 if (jaddref) 4496 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4497 if_deps); 4498 softdep_prelink(dvp, ITOV(ip)); 4499 FREE_LOCK(&lk); 4500 } 4501 4502 /* 4503 * Create a jaddref structure to track a new link to an inode. The directory 4504 * offset is not known until softdep_setup_directory_add or 4505 * softdep_setup_directory_change. Adjusts nlinkdelta for non-journaling 4506 * softdep. 4507 */ 4508 void 4509 softdep_setup_link(dp, ip) 4510 struct inode *dp; 4511 struct inode *ip; 4512 { 4513 struct inodedep *inodedep; 4514 struct jaddref *jaddref; 4515 struct vnode *dvp; 4516 4517 dvp = ITOV(dp); 4518 jaddref = NULL; 4519 if (DOINGSUJ(dvp)) 4520 jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1, 4521 ip->i_mode); 4522 ACQUIRE_LOCK(&lk); 4523 inodedep = inodedep_lookup_ip(ip); 4524 if (jaddref) 4525 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4526 if_deps); 4527 softdep_prelink(dvp, ITOV(ip)); 4528 FREE_LOCK(&lk); 4529 } 4530 4531 /* 4532 * Called to create the jaddref structures to track . and .. references as 4533 * well as lookup and further initialize the incomplete jaddref created 4534 * by softdep_setup_inomapdep when the inode was allocated. Adjusts 4535 * nlinkdelta for non-journaling softdep. 4536 */ 4537 void 4538 softdep_setup_mkdir(dp, ip) 4539 struct inode *dp; 4540 struct inode *ip; 4541 { 4542 struct inodedep *inodedep; 4543 struct jaddref *dotdotaddref; 4544 struct jaddref *dotaddref; 4545 struct jaddref *jaddref; 4546 struct vnode *dvp; 4547 4548 dvp = ITOV(dp); 4549 dotaddref = dotdotaddref = NULL; 4550 if (DOINGSUJ(dvp)) { 4551 dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1, 4552 ip->i_mode); 4553 dotaddref->ja_state |= MKDIR_BODY; 4554 dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, 4555 dp->i_effnlink - 1, dp->i_mode); 4556 dotdotaddref->ja_state |= MKDIR_PARENT; 4557 } 4558 ACQUIRE_LOCK(&lk); 4559 inodedep = inodedep_lookup_ip(ip); 4560 if (DOINGSUJ(dvp)) { 4561 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4562 inoreflst); 4563 KASSERT(jaddref != NULL, 4564 ("softdep_setup_mkdir: No addref structure present.")); 4565 KASSERT(jaddref->ja_parent == dp->i_number, 4566 ("softdep_setup_mkdir: bad parent %ju", 4567 (uintmax_t)jaddref->ja_parent)); 4568 TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref, 4569 if_deps); 4570 } 4571 inodedep = inodedep_lookup_ip(dp); 4572 if (DOINGSUJ(dvp)) 4573 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, 4574 &dotdotaddref->ja_ref, if_deps); 4575 softdep_prelink(ITOV(dp), NULL); 4576 FREE_LOCK(&lk); 4577 } 4578 4579 /* 4580 * Called to track nlinkdelta of the inode and parent directories prior to 4581 * unlinking a directory. 4582 */ 4583 void 4584 softdep_setup_rmdir(dp, ip) 4585 struct inode *dp; 4586 struct inode *ip; 4587 { 4588 struct vnode *dvp; 4589 4590 dvp = ITOV(dp); 4591 ACQUIRE_LOCK(&lk); 4592 (void) inodedep_lookup_ip(ip); 4593 (void) inodedep_lookup_ip(dp); 4594 softdep_prelink(dvp, ITOV(ip)); 4595 FREE_LOCK(&lk); 4596 } 4597 4598 /* 4599 * Called to track nlinkdelta of the inode and parent directories prior to 4600 * unlink. 4601 */ 4602 void 4603 softdep_setup_unlink(dp, ip) 4604 struct inode *dp; 4605 struct inode *ip; 4606 { 4607 struct vnode *dvp; 4608 4609 dvp = ITOV(dp); 4610 ACQUIRE_LOCK(&lk); 4611 (void) inodedep_lookup_ip(ip); 4612 (void) inodedep_lookup_ip(dp); 4613 softdep_prelink(dvp, ITOV(ip)); 4614 FREE_LOCK(&lk); 4615 } 4616 4617 /* 4618 * Called to release the journal structures created by a failed non-directory 4619 * creation. Adjusts nlinkdelta for non-journaling softdep. 4620 */ 4621 void 4622 softdep_revert_create(dp, ip) 4623 struct inode *dp; 4624 struct inode *ip; 4625 { 4626 struct inodedep *inodedep; 4627 struct jaddref *jaddref; 4628 struct vnode *dvp; 4629 4630 dvp = ITOV(dp); 4631 ACQUIRE_LOCK(&lk); 4632 inodedep = inodedep_lookup_ip(ip); 4633 if (DOINGSUJ(dvp)) { 4634 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4635 inoreflst); 4636 KASSERT(jaddref->ja_parent == dp->i_number, 4637 ("softdep_revert_create: addref parent mismatch")); 4638 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4639 } 4640 FREE_LOCK(&lk); 4641 } 4642 4643 /* 4644 * Called to release the journal structures created by a failed dotdot link 4645 * creation. Adjusts nlinkdelta for non-journaling softdep. 4646 */ 4647 void 4648 softdep_revert_dotdot_link(dp, ip) 4649 struct inode *dp; 4650 struct inode *ip; 4651 { 4652 struct inodedep *inodedep; 4653 struct jaddref *jaddref; 4654 struct vnode *dvp; 4655 4656 dvp = ITOV(dp); 4657 ACQUIRE_LOCK(&lk); 4658 inodedep = inodedep_lookup_ip(dp); 4659 if (DOINGSUJ(dvp)) { 4660 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4661 inoreflst); 4662 KASSERT(jaddref->ja_parent == ip->i_number, 4663 ("softdep_revert_dotdot_link: addref parent mismatch")); 4664 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4665 } 4666 FREE_LOCK(&lk); 4667 } 4668 4669 /* 4670 * Called to release the journal structures created by a failed link 4671 * addition. Adjusts nlinkdelta for non-journaling softdep. 4672 */ 4673 void 4674 softdep_revert_link(dp, ip) 4675 struct inode *dp; 4676 struct inode *ip; 4677 { 4678 struct inodedep *inodedep; 4679 struct jaddref *jaddref; 4680 struct vnode *dvp; 4681 4682 dvp = ITOV(dp); 4683 ACQUIRE_LOCK(&lk); 4684 inodedep = inodedep_lookup_ip(ip); 4685 if (DOINGSUJ(dvp)) { 4686 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4687 inoreflst); 4688 KASSERT(jaddref->ja_parent == dp->i_number, 4689 ("softdep_revert_link: addref parent mismatch")); 4690 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4691 } 4692 FREE_LOCK(&lk); 4693 } 4694 4695 /* 4696 * Called to release the journal structures created by a failed mkdir 4697 * attempt. Adjusts nlinkdelta for non-journaling softdep. 4698 */ 4699 void 4700 softdep_revert_mkdir(dp, ip) 4701 struct inode *dp; 4702 struct inode *ip; 4703 { 4704 struct inodedep *inodedep; 4705 struct jaddref *jaddref; 4706 struct jaddref *dotaddref; 4707 struct vnode *dvp; 4708 4709 dvp = ITOV(dp); 4710 4711 ACQUIRE_LOCK(&lk); 4712 inodedep = inodedep_lookup_ip(dp); 4713 if (DOINGSUJ(dvp)) { 4714 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4715 inoreflst); 4716 KASSERT(jaddref->ja_parent == ip->i_number, 4717 ("softdep_revert_mkdir: dotdot addref parent mismatch")); 4718 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4719 } 4720 inodedep = inodedep_lookup_ip(ip); 4721 if (DOINGSUJ(dvp)) { 4722 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4723 inoreflst); 4724 KASSERT(jaddref->ja_parent == dp->i_number, 4725 ("softdep_revert_mkdir: addref parent mismatch")); 4726 dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, 4727 inoreflst, if_deps); 4728 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4729 KASSERT(dotaddref->ja_parent == ip->i_number, 4730 ("softdep_revert_mkdir: dot addref parent mismatch")); 4731 cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait); 4732 } 4733 FREE_LOCK(&lk); 4734 } 4735 4736 /* 4737 * Called to correct nlinkdelta after a failed rmdir. 4738 */ 4739 void 4740 softdep_revert_rmdir(dp, ip) 4741 struct inode *dp; 4742 struct inode *ip; 4743 { 4744 4745 ACQUIRE_LOCK(&lk); 4746 (void) inodedep_lookup_ip(ip); 4747 (void) inodedep_lookup_ip(dp); 4748 FREE_LOCK(&lk); 4749 } 4750 4751 /* 4752 * Protecting the freemaps (or bitmaps). 4753 * 4754 * To eliminate the need to execute fsck before mounting a filesystem 4755 * after a power failure, one must (conservatively) guarantee that the 4756 * on-disk copy of the bitmaps never indicate that a live inode or block is 4757 * free. So, when a block or inode is allocated, the bitmap should be 4758 * updated (on disk) before any new pointers. When a block or inode is 4759 * freed, the bitmap should not be updated until all pointers have been 4760 * reset. The latter dependency is handled by the delayed de-allocation 4761 * approach described below for block and inode de-allocation. The former 4762 * dependency is handled by calling the following procedure when a block or 4763 * inode is allocated. When an inode is allocated an "inodedep" is created 4764 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 4765 * Each "inodedep" is also inserted into the hash indexing structure so 4766 * that any additional link additions can be made dependent on the inode 4767 * allocation. 4768 * 4769 * The ufs filesystem maintains a number of free block counts (e.g., per 4770 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 4771 * in addition to the bitmaps. These counts are used to improve efficiency 4772 * during allocation and therefore must be consistent with the bitmaps. 4773 * There is no convenient way to guarantee post-crash consistency of these 4774 * counts with simple update ordering, for two main reasons: (1) The counts 4775 * and bitmaps for a single cylinder group block are not in the same disk 4776 * sector. If a disk write is interrupted (e.g., by power failure), one may 4777 * be written and the other not. (2) Some of the counts are located in the 4778 * superblock rather than the cylinder group block. So, we focus our soft 4779 * updates implementation on protecting the bitmaps. When mounting a 4780 * filesystem, we recompute the auxiliary counts from the bitmaps. 4781 */ 4782 4783 /* 4784 * Called just after updating the cylinder group block to allocate an inode. 4785 */ 4786 void 4787 softdep_setup_inomapdep(bp, ip, newinum, mode) 4788 struct buf *bp; /* buffer for cylgroup block with inode map */ 4789 struct inode *ip; /* inode related to allocation */ 4790 ino_t newinum; /* new inode number being allocated */ 4791 int mode; 4792 { 4793 struct inodedep *inodedep; 4794 struct bmsafemap *bmsafemap; 4795 struct jaddref *jaddref; 4796 struct mount *mp; 4797 struct fs *fs; 4798 4799 mp = UFSTOVFS(ip->i_ump); 4800 fs = ip->i_ump->um_fs; 4801 jaddref = NULL; 4802 4803 /* 4804 * Allocate the journal reference add structure so that the bitmap 4805 * can be dependent on it. 4806 */ 4807 if (MOUNTEDSUJ(mp)) { 4808 jaddref = newjaddref(ip, newinum, 0, 0, mode); 4809 jaddref->ja_state |= NEWBLOCK; 4810 } 4811 4812 /* 4813 * Create a dependency for the newly allocated inode. 4814 * Panic if it already exists as something is seriously wrong. 4815 * Otherwise add it to the dependency list for the buffer holding 4816 * the cylinder group map from which it was allocated. 4817 * 4818 * We have to preallocate a bmsafemap entry in case it is needed 4819 * in bmsafemap_lookup since once we allocate the inodedep, we 4820 * have to finish initializing it before we can FREE_LOCK(). 4821 * By preallocating, we avoid FREE_LOCK() while doing a malloc 4822 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before 4823 * creating the inodedep as it can be freed during the time 4824 * that we FREE_LOCK() while allocating the inodedep. We must 4825 * call workitem_alloc() before entering the locked section as 4826 * it also acquires the lock and we must avoid trying doing so 4827 * recursively. 4828 */ 4829 bmsafemap = malloc(sizeof(struct bmsafemap), 4830 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 4831 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); 4832 ACQUIRE_LOCK(&lk); 4833 if ((inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep))) 4834 panic("softdep_setup_inomapdep: dependency %p for new" 4835 "inode already exists", inodedep); 4836 bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap); 4837 if (jaddref) { 4838 LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps); 4839 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4840 if_deps); 4841 } else { 4842 inodedep->id_state |= ONDEPLIST; 4843 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 4844 } 4845 inodedep->id_bmsafemap = bmsafemap; 4846 inodedep->id_state &= ~DEPCOMPLETE; 4847 FREE_LOCK(&lk); 4848 } 4849 4850 /* 4851 * Called just after updating the cylinder group block to 4852 * allocate block or fragment. 4853 */ 4854 void 4855 softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) 4856 struct buf *bp; /* buffer for cylgroup block with block map */ 4857 struct mount *mp; /* filesystem doing allocation */ 4858 ufs2_daddr_t newblkno; /* number of newly allocated block */ 4859 int frags; /* Number of fragments. */ 4860 int oldfrags; /* Previous number of fragments for extend. */ 4861 { 4862 struct newblk *newblk; 4863 struct bmsafemap *bmsafemap; 4864 struct jnewblk *jnewblk; 4865 struct fs *fs; 4866 4867 fs = VFSTOUFS(mp)->um_fs; 4868 jnewblk = NULL; 4869 /* 4870 * Create a dependency for the newly allocated block. 4871 * Add it to the dependency list for the buffer holding 4872 * the cylinder group map from which it was allocated. 4873 */ 4874 if (MOUNTEDSUJ(mp)) { 4875 jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS); 4876 workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp); 4877 jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list); 4878 jnewblk->jn_state = ATTACHED; 4879 jnewblk->jn_blkno = newblkno; 4880 jnewblk->jn_frags = frags; 4881 jnewblk->jn_oldfrags = oldfrags; 4882 #ifdef SUJ_DEBUG 4883 { 4884 struct cg *cgp; 4885 uint8_t *blksfree; 4886 long bno; 4887 int i; 4888 4889 cgp = (struct cg *)bp->b_data; 4890 blksfree = cg_blksfree(cgp); 4891 bno = dtogd(fs, jnewblk->jn_blkno); 4892 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; 4893 i++) { 4894 if (isset(blksfree, bno + i)) 4895 panic("softdep_setup_blkmapdep: " 4896 "free fragment %d from %d-%d " 4897 "state 0x%X dep %p", i, 4898 jnewblk->jn_oldfrags, 4899 jnewblk->jn_frags, 4900 jnewblk->jn_state, 4901 jnewblk->jn_dep); 4902 } 4903 } 4904 #endif 4905 } 4906 4907 CTR3(KTR_SUJ, 4908 "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d", 4909 newblkno, frags, oldfrags); 4910 ACQUIRE_LOCK(&lk); 4911 if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0) 4912 panic("softdep_setup_blkmapdep: found block"); 4913 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp, 4914 dtog(fs, newblkno), NULL); 4915 if (jnewblk) { 4916 jnewblk->jn_dep = (struct worklist *)newblk; 4917 LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps); 4918 } else { 4919 newblk->nb_state |= ONDEPLIST; 4920 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 4921 } 4922 newblk->nb_bmsafemap = bmsafemap; 4923 newblk->nb_jnewblk = jnewblk; 4924 FREE_LOCK(&lk); 4925 } 4926 4927 #define BMSAFEMAP_HASH(fs, cg) \ 4928 (&bmsafemap_hashtbl[((((register_t)(fs)) >> 13) + (cg)) & bmsafemap_hash]) 4929 4930 static int 4931 bmsafemap_find(bmsafemaphd, mp, cg, bmsafemapp) 4932 struct bmsafemap_hashhead *bmsafemaphd; 4933 struct mount *mp; 4934 int cg; 4935 struct bmsafemap **bmsafemapp; 4936 { 4937 struct bmsafemap *bmsafemap; 4938 4939 LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash) 4940 if (bmsafemap->sm_list.wk_mp == mp && bmsafemap->sm_cg == cg) 4941 break; 4942 if (bmsafemap) { 4943 *bmsafemapp = bmsafemap; 4944 return (1); 4945 } 4946 *bmsafemapp = NULL; 4947 4948 return (0); 4949 } 4950 4951 /* 4952 * Find the bmsafemap associated with a cylinder group buffer. 4953 * If none exists, create one. The buffer must be locked when 4954 * this routine is called and this routine must be called with 4955 * the softdep lock held. To avoid giving up the lock while 4956 * allocating a new bmsafemap, a preallocated bmsafemap may be 4957 * provided. If it is provided but not needed, it is freed. 4958 */ 4959 static struct bmsafemap * 4960 bmsafemap_lookup(mp, bp, cg, newbmsafemap) 4961 struct mount *mp; 4962 struct buf *bp; 4963 int cg; 4964 struct bmsafemap *newbmsafemap; 4965 { 4966 struct bmsafemap_hashhead *bmsafemaphd; 4967 struct bmsafemap *bmsafemap, *collision; 4968 struct worklist *wk; 4969 struct fs *fs; 4970 4971 mtx_assert(&lk, MA_OWNED); 4972 if (bp) 4973 LIST_FOREACH(wk, &bp->b_dep, wk_list) 4974 if (wk->wk_type == D_BMSAFEMAP) { 4975 if (newbmsafemap) 4976 WORKITEM_FREE(newbmsafemap,D_BMSAFEMAP); 4977 return (WK_BMSAFEMAP(wk)); 4978 } 4979 fs = VFSTOUFS(mp)->um_fs; 4980 bmsafemaphd = BMSAFEMAP_HASH(fs, cg); 4981 if (bmsafemap_find(bmsafemaphd, mp, cg, &bmsafemap) == 1) { 4982 if (newbmsafemap) 4983 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); 4984 return (bmsafemap); 4985 } 4986 if (newbmsafemap) { 4987 bmsafemap = newbmsafemap; 4988 } else { 4989 FREE_LOCK(&lk); 4990 bmsafemap = malloc(sizeof(struct bmsafemap), 4991 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 4992 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); 4993 ACQUIRE_LOCK(&lk); 4994 } 4995 bmsafemap->sm_buf = bp; 4996 LIST_INIT(&bmsafemap->sm_inodedephd); 4997 LIST_INIT(&bmsafemap->sm_inodedepwr); 4998 LIST_INIT(&bmsafemap->sm_newblkhd); 4999 LIST_INIT(&bmsafemap->sm_newblkwr); 5000 LIST_INIT(&bmsafemap->sm_jaddrefhd); 5001 LIST_INIT(&bmsafemap->sm_jnewblkhd); 5002 LIST_INIT(&bmsafemap->sm_freehd); 5003 LIST_INIT(&bmsafemap->sm_freewr); 5004 if (bmsafemap_find(bmsafemaphd, mp, cg, &collision) == 1) { 5005 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 5006 return (collision); 5007 } 5008 bmsafemap->sm_cg = cg; 5009 LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash); 5010 LIST_INSERT_HEAD(&VFSTOUFS(mp)->softdep_dirtycg, bmsafemap, sm_next); 5011 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 5012 return (bmsafemap); 5013 } 5014 5015 /* 5016 * Direct block allocation dependencies. 5017 * 5018 * When a new block is allocated, the corresponding disk locations must be 5019 * initialized (with zeros or new data) before the on-disk inode points to 5020 * them. Also, the freemap from which the block was allocated must be 5021 * updated (on disk) before the inode's pointer. These two dependencies are 5022 * independent of each other and are needed for all file blocks and indirect 5023 * blocks that are pointed to directly by the inode. Just before the 5024 * "in-core" version of the inode is updated with a newly allocated block 5025 * number, a procedure (below) is called to setup allocation dependency 5026 * structures. These structures are removed when the corresponding 5027 * dependencies are satisfied or when the block allocation becomes obsolete 5028 * (i.e., the file is deleted, the block is de-allocated, or the block is a 5029 * fragment that gets upgraded). All of these cases are handled in 5030 * procedures described later. 5031 * 5032 * When a file extension causes a fragment to be upgraded, either to a larger 5033 * fragment or to a full block, the on-disk location may change (if the 5034 * previous fragment could not simply be extended). In this case, the old 5035 * fragment must be de-allocated, but not until after the inode's pointer has 5036 * been updated. In most cases, this is handled by later procedures, which 5037 * will construct a "freefrag" structure to be added to the workitem queue 5038 * when the inode update is complete (or obsolete). The main exception to 5039 * this is when an allocation occurs while a pending allocation dependency 5040 * (for the same block pointer) remains. This case is handled in the main 5041 * allocation dependency setup procedure by immediately freeing the 5042 * unreferenced fragments. 5043 */ 5044 void 5045 softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp) 5046 struct inode *ip; /* inode to which block is being added */ 5047 ufs_lbn_t off; /* block pointer within inode */ 5048 ufs2_daddr_t newblkno; /* disk block number being added */ 5049 ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */ 5050 long newsize; /* size of new block */ 5051 long oldsize; /* size of new block */ 5052 struct buf *bp; /* bp for allocated block */ 5053 { 5054 struct allocdirect *adp, *oldadp; 5055 struct allocdirectlst *adphead; 5056 struct freefrag *freefrag; 5057 struct inodedep *inodedep; 5058 struct pagedep *pagedep; 5059 struct jnewblk *jnewblk; 5060 struct newblk *newblk; 5061 struct mount *mp; 5062 ufs_lbn_t lbn; 5063 5064 lbn = bp->b_lblkno; 5065 mp = UFSTOVFS(ip->i_ump); 5066 if (oldblkno && oldblkno != newblkno) 5067 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn); 5068 else 5069 freefrag = NULL; 5070 5071 CTR6(KTR_SUJ, 5072 "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd " 5073 "off %jd newsize %ld oldsize %d", 5074 ip->i_number, newblkno, oldblkno, off, newsize, oldsize); 5075 ACQUIRE_LOCK(&lk); 5076 if (off >= NDADDR) { 5077 if (lbn > 0) 5078 panic("softdep_setup_allocdirect: bad lbn %jd, off %jd", 5079 lbn, off); 5080 /* allocating an indirect block */ 5081 if (oldblkno != 0) 5082 panic("softdep_setup_allocdirect: non-zero indir"); 5083 } else { 5084 if (off != lbn) 5085 panic("softdep_setup_allocdirect: lbn %jd != off %jd", 5086 lbn, off); 5087 /* 5088 * Allocating a direct block. 5089 * 5090 * If we are allocating a directory block, then we must 5091 * allocate an associated pagedep to track additions and 5092 * deletions. 5093 */ 5094 if ((ip->i_mode & IFMT) == IFDIR) 5095 pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC, 5096 &pagedep); 5097 } 5098 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) 5099 panic("softdep_setup_allocdirect: lost block"); 5100 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5101 ("softdep_setup_allocdirect: newblk already initialized")); 5102 /* 5103 * Convert the newblk to an allocdirect. 5104 */ 5105 newblk->nb_list.wk_type = D_ALLOCDIRECT; 5106 adp = (struct allocdirect *)newblk; 5107 newblk->nb_freefrag = freefrag; 5108 adp->ad_offset = off; 5109 adp->ad_oldblkno = oldblkno; 5110 adp->ad_newsize = newsize; 5111 adp->ad_oldsize = oldsize; 5112 5113 /* 5114 * Finish initializing the journal. 5115 */ 5116 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5117 jnewblk->jn_ino = ip->i_number; 5118 jnewblk->jn_lbn = lbn; 5119 add_to_journal(&jnewblk->jn_list); 5120 } 5121 if (freefrag && freefrag->ff_jdep != NULL && 5122 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5123 add_to_journal(freefrag->ff_jdep); 5124 inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep); 5125 adp->ad_inodedep = inodedep; 5126 5127 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); 5128 /* 5129 * The list of allocdirects must be kept in sorted and ascending 5130 * order so that the rollback routines can quickly determine the 5131 * first uncommitted block (the size of the file stored on disk 5132 * ends at the end of the lowest committed fragment, or if there 5133 * are no fragments, at the end of the highest committed block). 5134 * Since files generally grow, the typical case is that the new 5135 * block is to be added at the end of the list. We speed this 5136 * special case by checking against the last allocdirect in the 5137 * list before laboriously traversing the list looking for the 5138 * insertion point. 5139 */ 5140 adphead = &inodedep->id_newinoupdt; 5141 oldadp = TAILQ_LAST(adphead, allocdirectlst); 5142 if (oldadp == NULL || oldadp->ad_offset <= off) { 5143 /* insert at end of list */ 5144 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 5145 if (oldadp != NULL && oldadp->ad_offset == off) 5146 allocdirect_merge(adphead, adp, oldadp); 5147 FREE_LOCK(&lk); 5148 return; 5149 } 5150 TAILQ_FOREACH(oldadp, adphead, ad_next) { 5151 if (oldadp->ad_offset >= off) 5152 break; 5153 } 5154 if (oldadp == NULL) 5155 panic("softdep_setup_allocdirect: lost entry"); 5156 /* insert in middle of list */ 5157 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 5158 if (oldadp->ad_offset == off) 5159 allocdirect_merge(adphead, adp, oldadp); 5160 5161 FREE_LOCK(&lk); 5162 } 5163 5164 /* 5165 * Merge a newer and older journal record to be stored either in a 5166 * newblock or freefrag. This handles aggregating journal records for 5167 * fragment allocation into a second record as well as replacing a 5168 * journal free with an aborted journal allocation. A segment for the 5169 * oldest record will be placed on wkhd if it has been written. If not 5170 * the segment for the newer record will suffice. 5171 */ 5172 static struct worklist * 5173 jnewblk_merge(new, old, wkhd) 5174 struct worklist *new; 5175 struct worklist *old; 5176 struct workhead *wkhd; 5177 { 5178 struct jnewblk *njnewblk; 5179 struct jnewblk *jnewblk; 5180 5181 /* Handle NULLs to simplify callers. */ 5182 if (new == NULL) 5183 return (old); 5184 if (old == NULL) 5185 return (new); 5186 /* Replace a jfreefrag with a jnewblk. */ 5187 if (new->wk_type == D_JFREEFRAG) { 5188 if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno) 5189 panic("jnewblk_merge: blkno mismatch: %p, %p", 5190 old, new); 5191 cancel_jfreefrag(WK_JFREEFRAG(new)); 5192 return (old); 5193 } 5194 if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK) 5195 panic("jnewblk_merge: Bad type: old %d new %d\n", 5196 old->wk_type, new->wk_type); 5197 /* 5198 * Handle merging of two jnewblk records that describe 5199 * different sets of fragments in the same block. 5200 */ 5201 jnewblk = WK_JNEWBLK(old); 5202 njnewblk = WK_JNEWBLK(new); 5203 if (jnewblk->jn_blkno != njnewblk->jn_blkno) 5204 panic("jnewblk_merge: Merging disparate blocks."); 5205 /* 5206 * The record may be rolled back in the cg. 5207 */ 5208 if (jnewblk->jn_state & UNDONE) { 5209 jnewblk->jn_state &= ~UNDONE; 5210 njnewblk->jn_state |= UNDONE; 5211 njnewblk->jn_state &= ~ATTACHED; 5212 } 5213 /* 5214 * We modify the newer addref and free the older so that if neither 5215 * has been written the most up-to-date copy will be on disk. If 5216 * both have been written but rolled back we only temporarily need 5217 * one of them to fix the bits when the cg write completes. 5218 */ 5219 jnewblk->jn_state |= ATTACHED | COMPLETE; 5220 njnewblk->jn_oldfrags = jnewblk->jn_oldfrags; 5221 cancel_jnewblk(jnewblk, wkhd); 5222 WORKLIST_REMOVE(&jnewblk->jn_list); 5223 free_jnewblk(jnewblk); 5224 return (new); 5225 } 5226 5227 /* 5228 * Replace an old allocdirect dependency with a newer one. 5229 * This routine must be called with splbio interrupts blocked. 5230 */ 5231 static void 5232 allocdirect_merge(adphead, newadp, oldadp) 5233 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 5234 struct allocdirect *newadp; /* allocdirect being added */ 5235 struct allocdirect *oldadp; /* existing allocdirect being checked */ 5236 { 5237 struct worklist *wk; 5238 struct freefrag *freefrag; 5239 5240 freefrag = NULL; 5241 mtx_assert(&lk, MA_OWNED); 5242 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 5243 newadp->ad_oldsize != oldadp->ad_newsize || 5244 newadp->ad_offset >= NDADDR) 5245 panic("%s %jd != new %jd || old size %ld != new %ld", 5246 "allocdirect_merge: old blkno", 5247 (intmax_t)newadp->ad_oldblkno, 5248 (intmax_t)oldadp->ad_newblkno, 5249 newadp->ad_oldsize, oldadp->ad_newsize); 5250 newadp->ad_oldblkno = oldadp->ad_oldblkno; 5251 newadp->ad_oldsize = oldadp->ad_oldsize; 5252 /* 5253 * If the old dependency had a fragment to free or had never 5254 * previously had a block allocated, then the new dependency 5255 * can immediately post its freefrag and adopt the old freefrag. 5256 * This action is done by swapping the freefrag dependencies. 5257 * The new dependency gains the old one's freefrag, and the 5258 * old one gets the new one and then immediately puts it on 5259 * the worklist when it is freed by free_newblk. It is 5260 * not possible to do this swap when the old dependency had a 5261 * non-zero size but no previous fragment to free. This condition 5262 * arises when the new block is an extension of the old block. 5263 * Here, the first part of the fragment allocated to the new 5264 * dependency is part of the block currently claimed on disk by 5265 * the old dependency, so cannot legitimately be freed until the 5266 * conditions for the new dependency are fulfilled. 5267 */ 5268 freefrag = newadp->ad_freefrag; 5269 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 5270 newadp->ad_freefrag = oldadp->ad_freefrag; 5271 oldadp->ad_freefrag = freefrag; 5272 } 5273 /* 5274 * If we are tracking a new directory-block allocation, 5275 * move it from the old allocdirect to the new allocdirect. 5276 */ 5277 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { 5278 WORKLIST_REMOVE(wk); 5279 if (!LIST_EMPTY(&oldadp->ad_newdirblk)) 5280 panic("allocdirect_merge: extra newdirblk"); 5281 WORKLIST_INSERT(&newadp->ad_newdirblk, wk); 5282 } 5283 TAILQ_REMOVE(adphead, oldadp, ad_next); 5284 /* 5285 * We need to move any journal dependencies over to the freefrag 5286 * that releases this block if it exists. Otherwise we are 5287 * extending an existing block and we'll wait until that is 5288 * complete to release the journal space and extend the 5289 * new journal to cover this old space as well. 5290 */ 5291 if (freefrag == NULL) { 5292 if (oldadp->ad_newblkno != newadp->ad_newblkno) 5293 panic("allocdirect_merge: %jd != %jd", 5294 oldadp->ad_newblkno, newadp->ad_newblkno); 5295 newadp->ad_block.nb_jnewblk = (struct jnewblk *) 5296 jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list, 5297 &oldadp->ad_block.nb_jnewblk->jn_list, 5298 &newadp->ad_block.nb_jwork); 5299 oldadp->ad_block.nb_jnewblk = NULL; 5300 cancel_newblk(&oldadp->ad_block, NULL, 5301 &newadp->ad_block.nb_jwork); 5302 } else { 5303 wk = (struct worklist *) cancel_newblk(&oldadp->ad_block, 5304 &freefrag->ff_list, &freefrag->ff_jwork); 5305 freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk, 5306 &freefrag->ff_jwork); 5307 } 5308 free_newblk(&oldadp->ad_block); 5309 } 5310 5311 /* 5312 * Allocate a jfreefrag structure to journal a single block free. 5313 */ 5314 static struct jfreefrag * 5315 newjfreefrag(freefrag, ip, blkno, size, lbn) 5316 struct freefrag *freefrag; 5317 struct inode *ip; 5318 ufs2_daddr_t blkno; 5319 long size; 5320 ufs_lbn_t lbn; 5321 { 5322 struct jfreefrag *jfreefrag; 5323 struct fs *fs; 5324 5325 fs = ip->i_fs; 5326 jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG, 5327 M_SOFTDEP_FLAGS); 5328 workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, UFSTOVFS(ip->i_ump)); 5329 jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list); 5330 jfreefrag->fr_state = ATTACHED | DEPCOMPLETE; 5331 jfreefrag->fr_ino = ip->i_number; 5332 jfreefrag->fr_lbn = lbn; 5333 jfreefrag->fr_blkno = blkno; 5334 jfreefrag->fr_frags = numfrags(fs, size); 5335 jfreefrag->fr_freefrag = freefrag; 5336 5337 return (jfreefrag); 5338 } 5339 5340 /* 5341 * Allocate a new freefrag structure. 5342 */ 5343 static struct freefrag * 5344 newfreefrag(ip, blkno, size, lbn) 5345 struct inode *ip; 5346 ufs2_daddr_t blkno; 5347 long size; 5348 ufs_lbn_t lbn; 5349 { 5350 struct freefrag *freefrag; 5351 struct fs *fs; 5352 5353 CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd", 5354 ip->i_number, blkno, size, lbn); 5355 fs = ip->i_fs; 5356 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 5357 panic("newfreefrag: frag size"); 5358 freefrag = malloc(sizeof(struct freefrag), 5359 M_FREEFRAG, M_SOFTDEP_FLAGS); 5360 workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump)); 5361 freefrag->ff_state = ATTACHED; 5362 LIST_INIT(&freefrag->ff_jwork); 5363 freefrag->ff_inum = ip->i_number; 5364 freefrag->ff_vtype = ITOV(ip)->v_type; 5365 freefrag->ff_blkno = blkno; 5366 freefrag->ff_fragsize = size; 5367 5368 if (MOUNTEDSUJ(UFSTOVFS(ip->i_ump))) { 5369 freefrag->ff_jdep = (struct worklist *) 5370 newjfreefrag(freefrag, ip, blkno, size, lbn); 5371 } else { 5372 freefrag->ff_state |= DEPCOMPLETE; 5373 freefrag->ff_jdep = NULL; 5374 } 5375 5376 return (freefrag); 5377 } 5378 5379 /* 5380 * This workitem de-allocates fragments that were replaced during 5381 * file block allocation. 5382 */ 5383 static void 5384 handle_workitem_freefrag(freefrag) 5385 struct freefrag *freefrag; 5386 { 5387 struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp); 5388 struct workhead wkhd; 5389 5390 CTR3(KTR_SUJ, 5391 "handle_workitem_freefrag: ino %d blkno %jd size %ld", 5392 freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize); 5393 /* 5394 * It would be illegal to add new completion items to the 5395 * freefrag after it was schedule to be done so it must be 5396 * safe to modify the list head here. 5397 */ 5398 LIST_INIT(&wkhd); 5399 ACQUIRE_LOCK(&lk); 5400 LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list); 5401 /* 5402 * If the journal has not been written we must cancel it here. 5403 */ 5404 if (freefrag->ff_jdep) { 5405 if (freefrag->ff_jdep->wk_type != D_JNEWBLK) 5406 panic("handle_workitem_freefrag: Unexpected type %d\n", 5407 freefrag->ff_jdep->wk_type); 5408 cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd); 5409 } 5410 FREE_LOCK(&lk); 5411 ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno, 5412 freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd); 5413 ACQUIRE_LOCK(&lk); 5414 WORKITEM_FREE(freefrag, D_FREEFRAG); 5415 FREE_LOCK(&lk); 5416 } 5417 5418 /* 5419 * Set up a dependency structure for an external attributes data block. 5420 * This routine follows much of the structure of softdep_setup_allocdirect. 5421 * See the description of softdep_setup_allocdirect above for details. 5422 */ 5423 void 5424 softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp) 5425 struct inode *ip; 5426 ufs_lbn_t off; 5427 ufs2_daddr_t newblkno; 5428 ufs2_daddr_t oldblkno; 5429 long newsize; 5430 long oldsize; 5431 struct buf *bp; 5432 { 5433 struct allocdirect *adp, *oldadp; 5434 struct allocdirectlst *adphead; 5435 struct freefrag *freefrag; 5436 struct inodedep *inodedep; 5437 struct jnewblk *jnewblk; 5438 struct newblk *newblk; 5439 struct mount *mp; 5440 ufs_lbn_t lbn; 5441 5442 if (off >= NXADDR) 5443 panic("softdep_setup_allocext: lbn %lld > NXADDR", 5444 (long long)off); 5445 5446 lbn = bp->b_lblkno; 5447 mp = UFSTOVFS(ip->i_ump); 5448 if (oldblkno && oldblkno != newblkno) 5449 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn); 5450 else 5451 freefrag = NULL; 5452 5453 ACQUIRE_LOCK(&lk); 5454 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) 5455 panic("softdep_setup_allocext: lost block"); 5456 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5457 ("softdep_setup_allocext: newblk already initialized")); 5458 /* 5459 * Convert the newblk to an allocdirect. 5460 */ 5461 newblk->nb_list.wk_type = D_ALLOCDIRECT; 5462 adp = (struct allocdirect *)newblk; 5463 newblk->nb_freefrag = freefrag; 5464 adp->ad_offset = off; 5465 adp->ad_oldblkno = oldblkno; 5466 adp->ad_newsize = newsize; 5467 adp->ad_oldsize = oldsize; 5468 adp->ad_state |= EXTDATA; 5469 5470 /* 5471 * Finish initializing the journal. 5472 */ 5473 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5474 jnewblk->jn_ino = ip->i_number; 5475 jnewblk->jn_lbn = lbn; 5476 add_to_journal(&jnewblk->jn_list); 5477 } 5478 if (freefrag && freefrag->ff_jdep != NULL && 5479 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5480 add_to_journal(freefrag->ff_jdep); 5481 inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep); 5482 adp->ad_inodedep = inodedep; 5483 5484 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); 5485 /* 5486 * The list of allocdirects must be kept in sorted and ascending 5487 * order so that the rollback routines can quickly determine the 5488 * first uncommitted block (the size of the file stored on disk 5489 * ends at the end of the lowest committed fragment, or if there 5490 * are no fragments, at the end of the highest committed block). 5491 * Since files generally grow, the typical case is that the new 5492 * block is to be added at the end of the list. We speed this 5493 * special case by checking against the last allocdirect in the 5494 * list before laboriously traversing the list looking for the 5495 * insertion point. 5496 */ 5497 adphead = &inodedep->id_newextupdt; 5498 oldadp = TAILQ_LAST(adphead, allocdirectlst); 5499 if (oldadp == NULL || oldadp->ad_offset <= off) { 5500 /* insert at end of list */ 5501 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 5502 if (oldadp != NULL && oldadp->ad_offset == off) 5503 allocdirect_merge(adphead, adp, oldadp); 5504 FREE_LOCK(&lk); 5505 return; 5506 } 5507 TAILQ_FOREACH(oldadp, adphead, ad_next) { 5508 if (oldadp->ad_offset >= off) 5509 break; 5510 } 5511 if (oldadp == NULL) 5512 panic("softdep_setup_allocext: lost entry"); 5513 /* insert in middle of list */ 5514 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 5515 if (oldadp->ad_offset == off) 5516 allocdirect_merge(adphead, adp, oldadp); 5517 FREE_LOCK(&lk); 5518 } 5519 5520 /* 5521 * Indirect block allocation dependencies. 5522 * 5523 * The same dependencies that exist for a direct block also exist when 5524 * a new block is allocated and pointed to by an entry in a block of 5525 * indirect pointers. The undo/redo states described above are also 5526 * used here. Because an indirect block contains many pointers that 5527 * may have dependencies, a second copy of the entire in-memory indirect 5528 * block is kept. The buffer cache copy is always completely up-to-date. 5529 * The second copy, which is used only as a source for disk writes, 5530 * contains only the safe pointers (i.e., those that have no remaining 5531 * update dependencies). The second copy is freed when all pointers 5532 * are safe. The cache is not allowed to replace indirect blocks with 5533 * pending update dependencies. If a buffer containing an indirect 5534 * block with dependencies is written, these routines will mark it 5535 * dirty again. It can only be successfully written once all the 5536 * dependencies are removed. The ffs_fsync routine in conjunction with 5537 * softdep_sync_metadata work together to get all the dependencies 5538 * removed so that a file can be successfully written to disk. Three 5539 * procedures are used when setting up indirect block pointer 5540 * dependencies. The division is necessary because of the organization 5541 * of the "balloc" routine and because of the distinction between file 5542 * pages and file metadata blocks. 5543 */ 5544 5545 /* 5546 * Allocate a new allocindir structure. 5547 */ 5548 static struct allocindir * 5549 newallocindir(ip, ptrno, newblkno, oldblkno, lbn) 5550 struct inode *ip; /* inode for file being extended */ 5551 int ptrno; /* offset of pointer in indirect block */ 5552 ufs2_daddr_t newblkno; /* disk block number being added */ 5553 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 5554 ufs_lbn_t lbn; 5555 { 5556 struct newblk *newblk; 5557 struct allocindir *aip; 5558 struct freefrag *freefrag; 5559 struct jnewblk *jnewblk; 5560 5561 if (oldblkno) 5562 freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize, lbn); 5563 else 5564 freefrag = NULL; 5565 ACQUIRE_LOCK(&lk); 5566 if (newblk_lookup(UFSTOVFS(ip->i_ump), newblkno, 0, &newblk) == 0) 5567 panic("new_allocindir: lost block"); 5568 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5569 ("newallocindir: newblk already initialized")); 5570 newblk->nb_list.wk_type = D_ALLOCINDIR; 5571 newblk->nb_freefrag = freefrag; 5572 aip = (struct allocindir *)newblk; 5573 aip->ai_offset = ptrno; 5574 aip->ai_oldblkno = oldblkno; 5575 aip->ai_lbn = lbn; 5576 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5577 jnewblk->jn_ino = ip->i_number; 5578 jnewblk->jn_lbn = lbn; 5579 add_to_journal(&jnewblk->jn_list); 5580 } 5581 if (freefrag && freefrag->ff_jdep != NULL && 5582 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5583 add_to_journal(freefrag->ff_jdep); 5584 return (aip); 5585 } 5586 5587 /* 5588 * Called just before setting an indirect block pointer 5589 * to a newly allocated file page. 5590 */ 5591 void 5592 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 5593 struct inode *ip; /* inode for file being extended */ 5594 ufs_lbn_t lbn; /* allocated block number within file */ 5595 struct buf *bp; /* buffer with indirect blk referencing page */ 5596 int ptrno; /* offset of pointer in indirect block */ 5597 ufs2_daddr_t newblkno; /* disk block number being added */ 5598 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 5599 struct buf *nbp; /* buffer holding allocated page */ 5600 { 5601 struct inodedep *inodedep; 5602 struct freefrag *freefrag; 5603 struct allocindir *aip; 5604 struct pagedep *pagedep; 5605 struct mount *mp; 5606 int dflags; 5607 5608 if (lbn != nbp->b_lblkno) 5609 panic("softdep_setup_allocindir_page: lbn %jd != lblkno %jd", 5610 lbn, bp->b_lblkno); 5611 CTR4(KTR_SUJ, 5612 "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd " 5613 "lbn %jd", ip->i_number, newblkno, oldblkno, lbn); 5614 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page"); 5615 mp = UFSTOVFS(ip->i_ump); 5616 aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn); 5617 dflags = DEPALLOC; 5618 if (IS_SNAPSHOT(ip)) 5619 dflags |= NODELAY; 5620 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 5621 /* 5622 * If we are allocating a directory page, then we must 5623 * allocate an associated pagedep to track additions and 5624 * deletions. 5625 */ 5626 if ((ip->i_mode & IFMT) == IFDIR) 5627 pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep); 5628 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); 5629 freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn); 5630 FREE_LOCK(&lk); 5631 if (freefrag) 5632 handle_workitem_freefrag(freefrag); 5633 } 5634 5635 /* 5636 * Called just before setting an indirect block pointer to a 5637 * newly allocated indirect block. 5638 */ 5639 void 5640 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 5641 struct buf *nbp; /* newly allocated indirect block */ 5642 struct inode *ip; /* inode for file being extended */ 5643 struct buf *bp; /* indirect block referencing allocated block */ 5644 int ptrno; /* offset of pointer in indirect block */ 5645 ufs2_daddr_t newblkno; /* disk block number being added */ 5646 { 5647 struct inodedep *inodedep; 5648 struct allocindir *aip; 5649 ufs_lbn_t lbn; 5650 int dflags; 5651 5652 CTR3(KTR_SUJ, 5653 "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d", 5654 ip->i_number, newblkno, ptrno); 5655 lbn = nbp->b_lblkno; 5656 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta"); 5657 aip = newallocindir(ip, ptrno, newblkno, 0, lbn); 5658 dflags = DEPALLOC; 5659 if (IS_SNAPSHOT(ip)) 5660 dflags |= NODELAY; 5661 inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep); 5662 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); 5663 if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)) 5664 panic("softdep_setup_allocindir_meta: Block already existed"); 5665 FREE_LOCK(&lk); 5666 } 5667 5668 static void 5669 indirdep_complete(indirdep) 5670 struct indirdep *indirdep; 5671 { 5672 struct allocindir *aip; 5673 5674 LIST_REMOVE(indirdep, ir_next); 5675 indirdep->ir_state |= DEPCOMPLETE; 5676 5677 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) { 5678 LIST_REMOVE(aip, ai_next); 5679 free_newblk(&aip->ai_block); 5680 } 5681 /* 5682 * If this indirdep is not attached to a buf it was simply waiting 5683 * on completion to clear completehd. free_indirdep() asserts 5684 * that nothing is dangling. 5685 */ 5686 if ((indirdep->ir_state & ONWORKLIST) == 0) 5687 free_indirdep(indirdep); 5688 } 5689 5690 static struct indirdep * 5691 indirdep_lookup(mp, ip, bp) 5692 struct mount *mp; 5693 struct inode *ip; 5694 struct buf *bp; 5695 { 5696 struct indirdep *indirdep, *newindirdep; 5697 struct newblk *newblk; 5698 struct worklist *wk; 5699 struct fs *fs; 5700 ufs2_daddr_t blkno; 5701 5702 mtx_assert(&lk, MA_OWNED); 5703 indirdep = NULL; 5704 newindirdep = NULL; 5705 fs = ip->i_fs; 5706 for (;;) { 5707 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5708 if (wk->wk_type != D_INDIRDEP) 5709 continue; 5710 indirdep = WK_INDIRDEP(wk); 5711 break; 5712 } 5713 /* Found on the buffer worklist, no new structure to free. */ 5714 if (indirdep != NULL && newindirdep == NULL) 5715 return (indirdep); 5716 if (indirdep != NULL && newindirdep != NULL) 5717 panic("indirdep_lookup: simultaneous create"); 5718 /* None found on the buffer and a new structure is ready. */ 5719 if (indirdep == NULL && newindirdep != NULL) 5720 break; 5721 /* None found and no new structure available. */ 5722 FREE_LOCK(&lk); 5723 newindirdep = malloc(sizeof(struct indirdep), 5724 M_INDIRDEP, M_SOFTDEP_FLAGS); 5725 workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp); 5726 newindirdep->ir_state = ATTACHED; 5727 if (ip->i_ump->um_fstype == UFS1) 5728 newindirdep->ir_state |= UFS1FMT; 5729 TAILQ_INIT(&newindirdep->ir_trunc); 5730 newindirdep->ir_saveddata = NULL; 5731 LIST_INIT(&newindirdep->ir_deplisthd); 5732 LIST_INIT(&newindirdep->ir_donehd); 5733 LIST_INIT(&newindirdep->ir_writehd); 5734 LIST_INIT(&newindirdep->ir_completehd); 5735 if (bp->b_blkno == bp->b_lblkno) { 5736 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp, 5737 NULL, NULL); 5738 bp->b_blkno = blkno; 5739 } 5740 newindirdep->ir_freeblks = NULL; 5741 newindirdep->ir_savebp = 5742 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0); 5743 newindirdep->ir_bp = bp; 5744 BUF_KERNPROC(newindirdep->ir_savebp); 5745 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 5746 ACQUIRE_LOCK(&lk); 5747 } 5748 indirdep = newindirdep; 5749 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 5750 /* 5751 * If the block is not yet allocated we don't set DEPCOMPLETE so 5752 * that we don't free dependencies until the pointers are valid. 5753 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather 5754 * than using the hash. 5755 */ 5756 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)) 5757 LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next); 5758 else 5759 indirdep->ir_state |= DEPCOMPLETE; 5760 return (indirdep); 5761 } 5762 5763 /* 5764 * Called to finish the allocation of the "aip" allocated 5765 * by one of the two routines above. 5766 */ 5767 static struct freefrag * 5768 setup_allocindir_phase2(bp, ip, inodedep, aip, lbn) 5769 struct buf *bp; /* in-memory copy of the indirect block */ 5770 struct inode *ip; /* inode for file being extended */ 5771 struct inodedep *inodedep; /* Inodedep for ip */ 5772 struct allocindir *aip; /* allocindir allocated by the above routines */ 5773 ufs_lbn_t lbn; /* Logical block number for this block. */ 5774 { 5775 struct fs *fs; 5776 struct indirdep *indirdep; 5777 struct allocindir *oldaip; 5778 struct freefrag *freefrag; 5779 struct mount *mp; 5780 5781 mtx_assert(&lk, MA_OWNED); 5782 mp = UFSTOVFS(ip->i_ump); 5783 fs = ip->i_fs; 5784 if (bp->b_lblkno >= 0) 5785 panic("setup_allocindir_phase2: not indir blk"); 5786 KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs), 5787 ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset)); 5788 indirdep = indirdep_lookup(mp, ip, bp); 5789 KASSERT(indirdep->ir_savebp != NULL, 5790 ("setup_allocindir_phase2 NULL ir_savebp")); 5791 aip->ai_indirdep = indirdep; 5792 /* 5793 * Check for an unwritten dependency for this indirect offset. If 5794 * there is, merge the old dependency into the new one. This happens 5795 * as a result of reallocblk only. 5796 */ 5797 freefrag = NULL; 5798 if (aip->ai_oldblkno != 0) { 5799 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) { 5800 if (oldaip->ai_offset == aip->ai_offset) { 5801 freefrag = allocindir_merge(aip, oldaip); 5802 goto done; 5803 } 5804 } 5805 LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) { 5806 if (oldaip->ai_offset == aip->ai_offset) { 5807 freefrag = allocindir_merge(aip, oldaip); 5808 goto done; 5809 } 5810 } 5811 } 5812 done: 5813 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 5814 return (freefrag); 5815 } 5816 5817 /* 5818 * Merge two allocindirs which refer to the same block. Move newblock 5819 * dependencies and setup the freefrags appropriately. 5820 */ 5821 static struct freefrag * 5822 allocindir_merge(aip, oldaip) 5823 struct allocindir *aip; 5824 struct allocindir *oldaip; 5825 { 5826 struct freefrag *freefrag; 5827 struct worklist *wk; 5828 5829 if (oldaip->ai_newblkno != aip->ai_oldblkno) 5830 panic("allocindir_merge: blkno"); 5831 aip->ai_oldblkno = oldaip->ai_oldblkno; 5832 freefrag = aip->ai_freefrag; 5833 aip->ai_freefrag = oldaip->ai_freefrag; 5834 oldaip->ai_freefrag = NULL; 5835 KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag")); 5836 /* 5837 * If we are tracking a new directory-block allocation, 5838 * move it from the old allocindir to the new allocindir. 5839 */ 5840 if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) { 5841 WORKLIST_REMOVE(wk); 5842 if (!LIST_EMPTY(&oldaip->ai_newdirblk)) 5843 panic("allocindir_merge: extra newdirblk"); 5844 WORKLIST_INSERT(&aip->ai_newdirblk, wk); 5845 } 5846 /* 5847 * We can skip journaling for this freefrag and just complete 5848 * any pending journal work for the allocindir that is being 5849 * removed after the freefrag completes. 5850 */ 5851 if (freefrag->ff_jdep) 5852 cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep)); 5853 LIST_REMOVE(oldaip, ai_next); 5854 freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block, 5855 &freefrag->ff_list, &freefrag->ff_jwork); 5856 free_newblk(&oldaip->ai_block); 5857 5858 return (freefrag); 5859 } 5860 5861 static inline void 5862 setup_freedirect(freeblks, ip, i, needj) 5863 struct freeblks *freeblks; 5864 struct inode *ip; 5865 int i; 5866 int needj; 5867 { 5868 ufs2_daddr_t blkno; 5869 int frags; 5870 5871 blkno = DIP(ip, i_db[i]); 5872 if (blkno == 0) 5873 return; 5874 DIP_SET(ip, i_db[i], 0); 5875 frags = sblksize(ip->i_fs, ip->i_size, i); 5876 frags = numfrags(ip->i_fs, frags); 5877 newfreework(ip->i_ump, freeblks, NULL, i, blkno, frags, 0, needj); 5878 } 5879 5880 static inline void 5881 setup_freeext(freeblks, ip, i, needj) 5882 struct freeblks *freeblks; 5883 struct inode *ip; 5884 int i; 5885 int needj; 5886 { 5887 ufs2_daddr_t blkno; 5888 int frags; 5889 5890 blkno = ip->i_din2->di_extb[i]; 5891 if (blkno == 0) 5892 return; 5893 ip->i_din2->di_extb[i] = 0; 5894 frags = sblksize(ip->i_fs, ip->i_din2->di_extsize, i); 5895 frags = numfrags(ip->i_fs, frags); 5896 newfreework(ip->i_ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj); 5897 } 5898 5899 static inline void 5900 setup_freeindir(freeblks, ip, i, lbn, needj) 5901 struct freeblks *freeblks; 5902 struct inode *ip; 5903 int i; 5904 ufs_lbn_t lbn; 5905 int needj; 5906 { 5907 ufs2_daddr_t blkno; 5908 5909 blkno = DIP(ip, i_ib[i]); 5910 if (blkno == 0) 5911 return; 5912 DIP_SET(ip, i_ib[i], 0); 5913 newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, ip->i_fs->fs_frag, 5914 0, needj); 5915 } 5916 5917 static inline struct freeblks * 5918 newfreeblks(mp, ip) 5919 struct mount *mp; 5920 struct inode *ip; 5921 { 5922 struct freeblks *freeblks; 5923 5924 freeblks = malloc(sizeof(struct freeblks), 5925 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); 5926 workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp); 5927 LIST_INIT(&freeblks->fb_jblkdephd); 5928 LIST_INIT(&freeblks->fb_jwork); 5929 freeblks->fb_ref = 0; 5930 freeblks->fb_cgwait = 0; 5931 freeblks->fb_state = ATTACHED; 5932 freeblks->fb_uid = ip->i_uid; 5933 freeblks->fb_inum = ip->i_number; 5934 freeblks->fb_vtype = ITOV(ip)->v_type; 5935 freeblks->fb_modrev = DIP(ip, i_modrev); 5936 freeblks->fb_devvp = ip->i_devvp; 5937 freeblks->fb_chkcnt = 0; 5938 freeblks->fb_len = 0; 5939 5940 return (freeblks); 5941 } 5942 5943 static void 5944 trunc_indirdep(indirdep, freeblks, bp, off) 5945 struct indirdep *indirdep; 5946 struct freeblks *freeblks; 5947 struct buf *bp; 5948 int off; 5949 { 5950 struct allocindir *aip, *aipn; 5951 5952 /* 5953 * The first set of allocindirs won't be in savedbp. 5954 */ 5955 LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn) 5956 if (aip->ai_offset > off) 5957 cancel_allocindir(aip, bp, freeblks, 1); 5958 LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn) 5959 if (aip->ai_offset > off) 5960 cancel_allocindir(aip, bp, freeblks, 1); 5961 /* 5962 * These will exist in savedbp. 5963 */ 5964 LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn) 5965 if (aip->ai_offset > off) 5966 cancel_allocindir(aip, NULL, freeblks, 0); 5967 LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn) 5968 if (aip->ai_offset > off) 5969 cancel_allocindir(aip, NULL, freeblks, 0); 5970 } 5971 5972 /* 5973 * Follow the chain of indirects down to lastlbn creating a freework 5974 * structure for each. This will be used to start indir_trunc() at 5975 * the right offset and create the journal records for the parrtial 5976 * truncation. A second step will handle the truncated dependencies. 5977 */ 5978 static int 5979 setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno) 5980 struct freeblks *freeblks; 5981 struct inode *ip; 5982 ufs_lbn_t lbn; 5983 ufs_lbn_t lastlbn; 5984 ufs2_daddr_t blkno; 5985 { 5986 struct indirdep *indirdep; 5987 struct indirdep *indirn; 5988 struct freework *freework; 5989 struct newblk *newblk; 5990 struct mount *mp; 5991 struct buf *bp; 5992 uint8_t *start; 5993 uint8_t *end; 5994 ufs_lbn_t lbnadd; 5995 int level; 5996 int error; 5997 int off; 5998 5999 6000 freework = NULL; 6001 if (blkno == 0) 6002 return (0); 6003 mp = freeblks->fb_list.wk_mp; 6004 bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0); 6005 if ((bp->b_flags & B_CACHE) == 0) { 6006 bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno); 6007 bp->b_iocmd = BIO_READ; 6008 bp->b_flags &= ~B_INVAL; 6009 bp->b_ioflags &= ~BIO_ERROR; 6010 vfs_busy_pages(bp, 0); 6011 bp->b_iooffset = dbtob(bp->b_blkno); 6012 bstrategy(bp); 6013 curthread->td_ru.ru_inblock++; 6014 error = bufwait(bp); 6015 if (error) { 6016 brelse(bp); 6017 return (error); 6018 } 6019 } 6020 level = lbn_level(lbn); 6021 lbnadd = lbn_offset(ip->i_fs, level); 6022 /* 6023 * Compute the offset of the last block we want to keep. Store 6024 * in the freework the first block we want to completely free. 6025 */ 6026 off = (lastlbn - -(lbn + level)) / lbnadd; 6027 if (off + 1 == NINDIR(ip->i_fs)) 6028 goto nowork; 6029 freework = newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, 0, off+1, 6030 0); 6031 /* 6032 * Link the freework into the indirdep. This will prevent any new 6033 * allocations from proceeding until we are finished with the 6034 * truncate and the block is written. 6035 */ 6036 ACQUIRE_LOCK(&lk); 6037 indirdep = indirdep_lookup(mp, ip, bp); 6038 if (indirdep->ir_freeblks) 6039 panic("setup_trunc_indir: indirdep already truncated."); 6040 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next); 6041 freework->fw_indir = indirdep; 6042 /* 6043 * Cancel any allocindirs that will not make it to disk. 6044 * We have to do this for all copies of the indirdep that 6045 * live on this newblk. 6046 */ 6047 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 6048 newblk_lookup(mp, dbtofsb(ip->i_fs, bp->b_blkno), 0, &newblk); 6049 LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next) 6050 trunc_indirdep(indirn, freeblks, bp, off); 6051 } else 6052 trunc_indirdep(indirdep, freeblks, bp, off); 6053 FREE_LOCK(&lk); 6054 /* 6055 * Creation is protected by the buf lock. The saveddata is only 6056 * needed if a full truncation follows a partial truncation but it 6057 * is difficult to allocate in that case so we fetch it anyway. 6058 */ 6059 if (indirdep->ir_saveddata == NULL) 6060 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, 6061 M_SOFTDEP_FLAGS); 6062 nowork: 6063 /* Fetch the blkno of the child and the zero start offset. */ 6064 if (ip->i_ump->um_fstype == UFS1) { 6065 blkno = ((ufs1_daddr_t *)bp->b_data)[off]; 6066 start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1]; 6067 } else { 6068 blkno = ((ufs2_daddr_t *)bp->b_data)[off]; 6069 start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1]; 6070 } 6071 if (freework) { 6072 /* Zero the truncated pointers. */ 6073 end = bp->b_data + bp->b_bcount; 6074 bzero(start, end - start); 6075 bdwrite(bp); 6076 } else 6077 bqrelse(bp); 6078 if (level == 0) 6079 return (0); 6080 lbn++; /* adjust level */ 6081 lbn -= (off * lbnadd); 6082 return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno); 6083 } 6084 6085 /* 6086 * Complete the partial truncation of an indirect block setup by 6087 * setup_trunc_indir(). This zeros the truncated pointers in the saved 6088 * copy and writes them to disk before the freeblks is allowed to complete. 6089 */ 6090 static void 6091 complete_trunc_indir(freework) 6092 struct freework *freework; 6093 { 6094 struct freework *fwn; 6095 struct indirdep *indirdep; 6096 struct buf *bp; 6097 uintptr_t start; 6098 int count; 6099 6100 indirdep = freework->fw_indir; 6101 for (;;) { 6102 bp = indirdep->ir_bp; 6103 /* See if the block was discarded. */ 6104 if (bp == NULL) 6105 break; 6106 /* Inline part of getdirtybuf(). We dont want bremfree. */ 6107 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) 6108 break; 6109 if (BUF_LOCK(bp, 6110 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, &lk) == 0) 6111 BUF_UNLOCK(bp); 6112 ACQUIRE_LOCK(&lk); 6113 } 6114 mtx_assert(&lk, MA_OWNED); 6115 freework->fw_state |= DEPCOMPLETE; 6116 TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next); 6117 /* 6118 * Zero the pointers in the saved copy. 6119 */ 6120 if (indirdep->ir_state & UFS1FMT) 6121 start = sizeof(ufs1_daddr_t); 6122 else 6123 start = sizeof(ufs2_daddr_t); 6124 start *= freework->fw_start; 6125 count = indirdep->ir_savebp->b_bcount - start; 6126 start += (uintptr_t)indirdep->ir_savebp->b_data; 6127 bzero((char *)start, count); 6128 /* 6129 * We need to start the next truncation in the list if it has not 6130 * been started yet. 6131 */ 6132 fwn = TAILQ_FIRST(&indirdep->ir_trunc); 6133 if (fwn != NULL) { 6134 if (fwn->fw_freeblks == indirdep->ir_freeblks) 6135 TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next); 6136 if ((fwn->fw_state & ONWORKLIST) == 0) 6137 freework_enqueue(fwn); 6138 } 6139 /* 6140 * If bp is NULL the block was fully truncated, restore 6141 * the saved block list otherwise free it if it is no 6142 * longer needed. 6143 */ 6144 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 6145 if (bp == NULL) 6146 bcopy(indirdep->ir_saveddata, 6147 indirdep->ir_savebp->b_data, 6148 indirdep->ir_savebp->b_bcount); 6149 free(indirdep->ir_saveddata, M_INDIRDEP); 6150 indirdep->ir_saveddata = NULL; 6151 } 6152 /* 6153 * When bp is NULL there is a full truncation pending. We 6154 * must wait for this full truncation to be journaled before 6155 * we can release this freework because the disk pointers will 6156 * never be written as zero. 6157 */ 6158 if (bp == NULL) { 6159 if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd)) 6160 handle_written_freework(freework); 6161 else 6162 WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd, 6163 &freework->fw_list); 6164 } else { 6165 /* Complete when the real copy is written. */ 6166 WORKLIST_INSERT(&bp->b_dep, &freework->fw_list); 6167 BUF_UNLOCK(bp); 6168 } 6169 } 6170 6171 /* 6172 * Calculate the number of blocks we are going to release where datablocks 6173 * is the current total and length is the new file size. 6174 */ 6175 ufs2_daddr_t 6176 blkcount(fs, datablocks, length) 6177 struct fs *fs; 6178 ufs2_daddr_t datablocks; 6179 off_t length; 6180 { 6181 off_t totblks, numblks; 6182 6183 totblks = 0; 6184 numblks = howmany(length, fs->fs_bsize); 6185 if (numblks <= NDADDR) { 6186 totblks = howmany(length, fs->fs_fsize); 6187 goto out; 6188 } 6189 totblks = blkstofrags(fs, numblks); 6190 numblks -= NDADDR; 6191 /* 6192 * Count all single, then double, then triple indirects required. 6193 * Subtracting one indirects worth of blocks for each pass 6194 * acknowledges one of each pointed to by the inode. 6195 */ 6196 for (;;) { 6197 totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs))); 6198 numblks -= NINDIR(fs); 6199 if (numblks <= 0) 6200 break; 6201 numblks = howmany(numblks, NINDIR(fs)); 6202 } 6203 out: 6204 totblks = fsbtodb(fs, totblks); 6205 /* 6206 * Handle sparse files. We can't reclaim more blocks than the inode 6207 * references. We will correct it later in handle_complete_freeblks() 6208 * when we know the real count. 6209 */ 6210 if (totblks > datablocks) 6211 return (0); 6212 return (datablocks - totblks); 6213 } 6214 6215 /* 6216 * Handle freeblocks for journaled softupdate filesystems. 6217 * 6218 * Contrary to normal softupdates, we must preserve the block pointers in 6219 * indirects until their subordinates are free. This is to avoid journaling 6220 * every block that is freed which may consume more space than the journal 6221 * itself. The recovery program will see the free block journals at the 6222 * base of the truncated area and traverse them to reclaim space. The 6223 * pointers in the inode may be cleared immediately after the journal 6224 * records are written because each direct and indirect pointer in the 6225 * inode is recorded in a journal. This permits full truncation to proceed 6226 * asynchronously. The write order is journal -> inode -> cgs -> indirects. 6227 * 6228 * The algorithm is as follows: 6229 * 1) Traverse the in-memory state and create journal entries to release 6230 * the relevant blocks and full indirect trees. 6231 * 2) Traverse the indirect block chain adding partial truncation freework 6232 * records to indirects in the path to lastlbn. The freework will 6233 * prevent new allocation dependencies from being satisfied in this 6234 * indirect until the truncation completes. 6235 * 3) Read and lock the inode block, performing an update with the new size 6236 * and pointers. This prevents truncated data from becoming valid on 6237 * disk through step 4. 6238 * 4) Reap unsatisfied dependencies that are beyond the truncated area, 6239 * eliminate journal work for those records that do not require it. 6240 * 5) Schedule the journal records to be written followed by the inode block. 6241 * 6) Allocate any necessary frags for the end of file. 6242 * 7) Zero any partially truncated blocks. 6243 * 6244 * From this truncation proceeds asynchronously using the freework and 6245 * indir_trunc machinery. The file will not be extended again into a 6246 * partially truncated indirect block until all work is completed but 6247 * the normal dependency mechanism ensures that it is rolled back/forward 6248 * as appropriate. Further truncation may occur without delay and is 6249 * serialized in indir_trunc(). 6250 */ 6251 void 6252 softdep_journal_freeblocks(ip, cred, length, flags) 6253 struct inode *ip; /* The inode whose length is to be reduced */ 6254 struct ucred *cred; 6255 off_t length; /* The new length for the file */ 6256 int flags; /* IO_EXT and/or IO_NORMAL */ 6257 { 6258 struct freeblks *freeblks, *fbn; 6259 struct worklist *wk, *wkn; 6260 struct inodedep *inodedep; 6261 struct jblkdep *jblkdep; 6262 struct allocdirect *adp, *adpn; 6263 struct fs *fs; 6264 struct buf *bp; 6265 struct vnode *vp; 6266 struct mount *mp; 6267 ufs2_daddr_t extblocks, datablocks; 6268 ufs_lbn_t tmpval, lbn, lastlbn; 6269 int frags, lastoff, iboff, allocblock, needj, dflags, error, i; 6270 6271 fs = ip->i_fs; 6272 mp = UFSTOVFS(ip->i_ump); 6273 vp = ITOV(ip); 6274 needj = 1; 6275 iboff = -1; 6276 allocblock = 0; 6277 extblocks = 0; 6278 datablocks = 0; 6279 frags = 0; 6280 freeblks = newfreeblks(mp, ip); 6281 ACQUIRE_LOCK(&lk); 6282 /* 6283 * If we're truncating a removed file that will never be written 6284 * we don't need to journal the block frees. The canceled journals 6285 * for the allocations will suffice. 6286 */ 6287 dflags = DEPALLOC; 6288 if (IS_SNAPSHOT(ip)) 6289 dflags |= NODELAY; 6290 inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6291 if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED && 6292 length == 0) 6293 needj = 0; 6294 CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d", 6295 ip->i_number, length, needj); 6296 FREE_LOCK(&lk); 6297 /* 6298 * Calculate the lbn that we are truncating to. This results in -1 6299 * if we're truncating the 0 bytes. So it is the last lbn we want 6300 * to keep, not the first lbn we want to truncate. 6301 */ 6302 lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1; 6303 lastoff = blkoff(fs, length); 6304 /* 6305 * Compute frags we are keeping in lastlbn. 0 means all. 6306 */ 6307 if (lastlbn >= 0 && lastlbn < NDADDR) { 6308 frags = fragroundup(fs, lastoff); 6309 /* adp offset of last valid allocdirect. */ 6310 iboff = lastlbn; 6311 } else if (lastlbn > 0) 6312 iboff = NDADDR; 6313 if (fs->fs_magic == FS_UFS2_MAGIC) 6314 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 6315 /* 6316 * Handle normal data blocks and indirects. This section saves 6317 * values used after the inode update to complete frag and indirect 6318 * truncation. 6319 */ 6320 if ((flags & IO_NORMAL) != 0) { 6321 /* 6322 * Handle truncation of whole direct and indirect blocks. 6323 */ 6324 for (i = iboff + 1; i < NDADDR; i++) 6325 setup_freedirect(freeblks, ip, i, needj); 6326 for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR; 6327 i++, lbn += tmpval, tmpval *= NINDIR(fs)) { 6328 /* Release a whole indirect tree. */ 6329 if (lbn > lastlbn) { 6330 setup_freeindir(freeblks, ip, i, -lbn -i, 6331 needj); 6332 continue; 6333 } 6334 iboff = i + NDADDR; 6335 /* 6336 * Traverse partially truncated indirect tree. 6337 */ 6338 if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn) 6339 setup_trunc_indir(freeblks, ip, -lbn - i, 6340 lastlbn, DIP(ip, i_ib[i])); 6341 } 6342 /* 6343 * Handle partial truncation to a frag boundary. 6344 */ 6345 if (frags) { 6346 ufs2_daddr_t blkno; 6347 long oldfrags; 6348 6349 oldfrags = blksize(fs, ip, lastlbn); 6350 blkno = DIP(ip, i_db[lastlbn]); 6351 if (blkno && oldfrags != frags) { 6352 oldfrags -= frags; 6353 oldfrags = numfrags(ip->i_fs, oldfrags); 6354 blkno += numfrags(ip->i_fs, frags); 6355 newfreework(ip->i_ump, freeblks, NULL, lastlbn, 6356 blkno, oldfrags, 0, needj); 6357 } else if (blkno == 0) 6358 allocblock = 1; 6359 } 6360 /* 6361 * Add a journal record for partial truncate if we are 6362 * handling indirect blocks. Non-indirects need no extra 6363 * journaling. 6364 */ 6365 if (length != 0 && lastlbn >= NDADDR) { 6366 ip->i_flag |= IN_TRUNCATED; 6367 newjtrunc(freeblks, length, 0); 6368 } 6369 ip->i_size = length; 6370 DIP_SET(ip, i_size, ip->i_size); 6371 datablocks = DIP(ip, i_blocks) - extblocks; 6372 if (length != 0) 6373 datablocks = blkcount(ip->i_fs, datablocks, length); 6374 freeblks->fb_len = length; 6375 } 6376 if ((flags & IO_EXT) != 0) { 6377 for (i = 0; i < NXADDR; i++) 6378 setup_freeext(freeblks, ip, i, needj); 6379 ip->i_din2->di_extsize = 0; 6380 datablocks += extblocks; 6381 } 6382 #ifdef QUOTA 6383 /* Reference the quotas in case the block count is wrong in the end. */ 6384 quotaref(vp, freeblks->fb_quota); 6385 (void) chkdq(ip, -datablocks, NOCRED, 0); 6386 #endif 6387 freeblks->fb_chkcnt = -datablocks; 6388 UFS_LOCK(ip->i_ump); 6389 fs->fs_pendingblocks += datablocks; 6390 UFS_UNLOCK(ip->i_ump); 6391 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); 6392 /* 6393 * Handle truncation of incomplete alloc direct dependencies. We 6394 * hold the inode block locked to prevent incomplete dependencies 6395 * from reaching the disk while we are eliminating those that 6396 * have been truncated. This is a partially inlined ffs_update(). 6397 */ 6398 ufs_itimes(vp); 6399 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 6400 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 6401 (int)fs->fs_bsize, cred, &bp); 6402 if (error) { 6403 brelse(bp); 6404 softdep_error("softdep_journal_freeblocks", error); 6405 return; 6406 } 6407 if (bp->b_bufsize == fs->fs_bsize) 6408 bp->b_flags |= B_CLUSTEROK; 6409 softdep_update_inodeblock(ip, bp, 0); 6410 if (ip->i_ump->um_fstype == UFS1) 6411 *((struct ufs1_dinode *)bp->b_data + 6412 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 6413 else 6414 *((struct ufs2_dinode *)bp->b_data + 6415 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 6416 ACQUIRE_LOCK(&lk); 6417 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6418 if ((inodedep->id_state & IOSTARTED) != 0) 6419 panic("softdep_setup_freeblocks: inode busy"); 6420 /* 6421 * Add the freeblks structure to the list of operations that 6422 * must await the zero'ed inode being written to disk. If we 6423 * still have a bitmap dependency (needj), then the inode 6424 * has never been written to disk, so we can process the 6425 * freeblks below once we have deleted the dependencies. 6426 */ 6427 if (needj) 6428 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); 6429 else 6430 freeblks->fb_state |= COMPLETE; 6431 if ((flags & IO_NORMAL) != 0) { 6432 TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) { 6433 if (adp->ad_offset > iboff) 6434 cancel_allocdirect(&inodedep->id_inoupdt, adp, 6435 freeblks); 6436 /* 6437 * Truncate the allocdirect. We could eliminate 6438 * or modify journal records as well. 6439 */ 6440 else if (adp->ad_offset == iboff && frags) 6441 adp->ad_newsize = frags; 6442 } 6443 } 6444 if ((flags & IO_EXT) != 0) 6445 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 6446 cancel_allocdirect(&inodedep->id_extupdt, adp, 6447 freeblks); 6448 /* 6449 * Scan the bufwait list for newblock dependencies that will never 6450 * make it to disk. 6451 */ 6452 LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) { 6453 if (wk->wk_type != D_ALLOCDIRECT) 6454 continue; 6455 adp = WK_ALLOCDIRECT(wk); 6456 if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) || 6457 ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) { 6458 cancel_jfreeblk(freeblks, adp->ad_newblkno); 6459 cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork); 6460 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); 6461 } 6462 } 6463 /* 6464 * Add journal work. 6465 */ 6466 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) 6467 add_to_journal(&jblkdep->jb_list); 6468 FREE_LOCK(&lk); 6469 bdwrite(bp); 6470 /* 6471 * Truncate dependency structures beyond length. 6472 */ 6473 trunc_dependencies(ip, freeblks, lastlbn, frags, flags); 6474 /* 6475 * This is only set when we need to allocate a fragment because 6476 * none existed at the end of a frag-sized file. It handles only 6477 * allocating a new, zero filled block. 6478 */ 6479 if (allocblock) { 6480 ip->i_size = length - lastoff; 6481 DIP_SET(ip, i_size, ip->i_size); 6482 error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp); 6483 if (error != 0) { 6484 softdep_error("softdep_journal_freeblks", error); 6485 return; 6486 } 6487 ip->i_size = length; 6488 DIP_SET(ip, i_size, length); 6489 ip->i_flag |= IN_CHANGE | IN_UPDATE; 6490 allocbuf(bp, frags); 6491 ffs_update(vp, 0); 6492 bawrite(bp); 6493 } else if (lastoff != 0 && vp->v_type != VDIR) { 6494 int size; 6495 6496 /* 6497 * Zero the end of a truncated frag or block. 6498 */ 6499 size = sblksize(fs, length, lastlbn); 6500 error = bread(vp, lastlbn, size, cred, &bp); 6501 if (error) { 6502 softdep_error("softdep_journal_freeblks", error); 6503 return; 6504 } 6505 bzero((char *)bp->b_data + lastoff, size - lastoff); 6506 bawrite(bp); 6507 6508 } 6509 ACQUIRE_LOCK(&lk); 6510 inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6511 TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next); 6512 freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST; 6513 /* 6514 * We zero earlier truncations so they don't erroneously 6515 * update i_blocks. 6516 */ 6517 if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0) 6518 TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next) 6519 fbn->fb_len = 0; 6520 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE && 6521 LIST_EMPTY(&freeblks->fb_jblkdephd)) 6522 freeblks->fb_state |= INPROGRESS; 6523 else 6524 freeblks = NULL; 6525 FREE_LOCK(&lk); 6526 if (freeblks) 6527 handle_workitem_freeblocks(freeblks, 0); 6528 trunc_pages(ip, length, extblocks, flags); 6529 6530 } 6531 6532 /* 6533 * Flush a JOP_SYNC to the journal. 6534 */ 6535 void 6536 softdep_journal_fsync(ip) 6537 struct inode *ip; 6538 { 6539 struct jfsync *jfsync; 6540 6541 if ((ip->i_flag & IN_TRUNCATED) == 0) 6542 return; 6543 ip->i_flag &= ~IN_TRUNCATED; 6544 jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO); 6545 workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ip->i_ump)); 6546 jfsync->jfs_size = ip->i_size; 6547 jfsync->jfs_ino = ip->i_number; 6548 ACQUIRE_LOCK(&lk); 6549 add_to_journal(&jfsync->jfs_list); 6550 jwait(&jfsync->jfs_list, MNT_WAIT); 6551 FREE_LOCK(&lk); 6552 } 6553 6554 /* 6555 * Block de-allocation dependencies. 6556 * 6557 * When blocks are de-allocated, the on-disk pointers must be nullified before 6558 * the blocks are made available for use by other files. (The true 6559 * requirement is that old pointers must be nullified before new on-disk 6560 * pointers are set. We chose this slightly more stringent requirement to 6561 * reduce complexity.) Our implementation handles this dependency by updating 6562 * the inode (or indirect block) appropriately but delaying the actual block 6563 * de-allocation (i.e., freemap and free space count manipulation) until 6564 * after the updated versions reach stable storage. After the disk is 6565 * updated, the blocks can be safely de-allocated whenever it is convenient. 6566 * This implementation handles only the common case of reducing a file's 6567 * length to zero. Other cases are handled by the conventional synchronous 6568 * write approach. 6569 * 6570 * The ffs implementation with which we worked double-checks 6571 * the state of the block pointers and file size as it reduces 6572 * a file's length. Some of this code is replicated here in our 6573 * soft updates implementation. The freeblks->fb_chkcnt field is 6574 * used to transfer a part of this information to the procedure 6575 * that eventually de-allocates the blocks. 6576 * 6577 * This routine should be called from the routine that shortens 6578 * a file's length, before the inode's size or block pointers 6579 * are modified. It will save the block pointer information for 6580 * later release and zero the inode so that the calling routine 6581 * can release it. 6582 */ 6583 void 6584 softdep_setup_freeblocks(ip, length, flags) 6585 struct inode *ip; /* The inode whose length is to be reduced */ 6586 off_t length; /* The new length for the file */ 6587 int flags; /* IO_EXT and/or IO_NORMAL */ 6588 { 6589 struct ufs1_dinode *dp1; 6590 struct ufs2_dinode *dp2; 6591 struct freeblks *freeblks; 6592 struct inodedep *inodedep; 6593 struct allocdirect *adp; 6594 struct buf *bp; 6595 struct fs *fs; 6596 ufs2_daddr_t extblocks, datablocks; 6597 struct mount *mp; 6598 int i, delay, error, dflags; 6599 ufs_lbn_t tmpval; 6600 ufs_lbn_t lbn; 6601 6602 CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld", 6603 ip->i_number, length); 6604 fs = ip->i_fs; 6605 mp = UFSTOVFS(ip->i_ump); 6606 if (length != 0) 6607 panic("softdep_setup_freeblocks: non-zero length"); 6608 freeblks = newfreeblks(mp, ip); 6609 extblocks = 0; 6610 datablocks = 0; 6611 if (fs->fs_magic == FS_UFS2_MAGIC) 6612 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 6613 if ((flags & IO_NORMAL) != 0) { 6614 for (i = 0; i < NDADDR; i++) 6615 setup_freedirect(freeblks, ip, i, 0); 6616 for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR; 6617 i++, lbn += tmpval, tmpval *= NINDIR(fs)) 6618 setup_freeindir(freeblks, ip, i, -lbn -i, 0); 6619 ip->i_size = 0; 6620 DIP_SET(ip, i_size, 0); 6621 datablocks = DIP(ip, i_blocks) - extblocks; 6622 } 6623 if ((flags & IO_EXT) != 0) { 6624 for (i = 0; i < NXADDR; i++) 6625 setup_freeext(freeblks, ip, i, 0); 6626 ip->i_din2->di_extsize = 0; 6627 datablocks += extblocks; 6628 } 6629 #ifdef QUOTA 6630 /* Reference the quotas in case the block count is wrong in the end. */ 6631 quotaref(ITOV(ip), freeblks->fb_quota); 6632 (void) chkdq(ip, -datablocks, NOCRED, 0); 6633 #endif 6634 freeblks->fb_chkcnt = -datablocks; 6635 UFS_LOCK(ip->i_ump); 6636 fs->fs_pendingblocks += datablocks; 6637 UFS_UNLOCK(ip->i_ump); 6638 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); 6639 /* 6640 * Push the zero'ed inode to to its disk buffer so that we are free 6641 * to delete its dependencies below. Once the dependencies are gone 6642 * the buffer can be safely released. 6643 */ 6644 if ((error = bread(ip->i_devvp, 6645 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 6646 (int)fs->fs_bsize, NOCRED, &bp)) != 0) { 6647 brelse(bp); 6648 softdep_error("softdep_setup_freeblocks", error); 6649 } 6650 if (ip->i_ump->um_fstype == UFS1) { 6651 dp1 = ((struct ufs1_dinode *)bp->b_data + 6652 ino_to_fsbo(fs, ip->i_number)); 6653 ip->i_din1->di_freelink = dp1->di_freelink; 6654 *dp1 = *ip->i_din1; 6655 } else { 6656 dp2 = ((struct ufs2_dinode *)bp->b_data + 6657 ino_to_fsbo(fs, ip->i_number)); 6658 ip->i_din2->di_freelink = dp2->di_freelink; 6659 *dp2 = *ip->i_din2; 6660 } 6661 /* 6662 * Find and eliminate any inode dependencies. 6663 */ 6664 ACQUIRE_LOCK(&lk); 6665 dflags = DEPALLOC; 6666 if (IS_SNAPSHOT(ip)) 6667 dflags |= NODELAY; 6668 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6669 if ((inodedep->id_state & IOSTARTED) != 0) 6670 panic("softdep_setup_freeblocks: inode busy"); 6671 /* 6672 * Add the freeblks structure to the list of operations that 6673 * must await the zero'ed inode being written to disk. If we 6674 * still have a bitmap dependency (delay == 0), then the inode 6675 * has never been written to disk, so we can process the 6676 * freeblks below once we have deleted the dependencies. 6677 */ 6678 delay = (inodedep->id_state & DEPCOMPLETE); 6679 if (delay) 6680 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); 6681 else 6682 freeblks->fb_state |= COMPLETE; 6683 /* 6684 * Because the file length has been truncated to zero, any 6685 * pending block allocation dependency structures associated 6686 * with this inode are obsolete and can simply be de-allocated. 6687 * We must first merge the two dependency lists to get rid of 6688 * any duplicate freefrag structures, then purge the merged list. 6689 * If we still have a bitmap dependency, then the inode has never 6690 * been written to disk, so we can free any fragments without delay. 6691 */ 6692 if (flags & IO_NORMAL) { 6693 merge_inode_lists(&inodedep->id_newinoupdt, 6694 &inodedep->id_inoupdt); 6695 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 6696 cancel_allocdirect(&inodedep->id_inoupdt, adp, 6697 freeblks); 6698 } 6699 if (flags & IO_EXT) { 6700 merge_inode_lists(&inodedep->id_newextupdt, 6701 &inodedep->id_extupdt); 6702 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 6703 cancel_allocdirect(&inodedep->id_extupdt, adp, 6704 freeblks); 6705 } 6706 FREE_LOCK(&lk); 6707 bdwrite(bp); 6708 trunc_dependencies(ip, freeblks, -1, 0, flags); 6709 ACQUIRE_LOCK(&lk); 6710 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) 6711 (void) free_inodedep(inodedep); 6712 freeblks->fb_state |= DEPCOMPLETE; 6713 /* 6714 * If the inode with zeroed block pointers is now on disk 6715 * we can start freeing blocks. 6716 */ 6717 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 6718 freeblks->fb_state |= INPROGRESS; 6719 else 6720 freeblks = NULL; 6721 FREE_LOCK(&lk); 6722 if (freeblks) 6723 handle_workitem_freeblocks(freeblks, 0); 6724 trunc_pages(ip, length, extblocks, flags); 6725 } 6726 6727 /* 6728 * Eliminate pages from the page cache that back parts of this inode and 6729 * adjust the vnode pager's idea of our size. This prevents stale data 6730 * from hanging around in the page cache. 6731 */ 6732 static void 6733 trunc_pages(ip, length, extblocks, flags) 6734 struct inode *ip; 6735 off_t length; 6736 ufs2_daddr_t extblocks; 6737 int flags; 6738 { 6739 struct vnode *vp; 6740 struct fs *fs; 6741 ufs_lbn_t lbn; 6742 off_t end, extend; 6743 6744 vp = ITOV(ip); 6745 fs = ip->i_fs; 6746 extend = OFF_TO_IDX(lblktosize(fs, -extblocks)); 6747 if ((flags & IO_EXT) != 0) 6748 vn_pages_remove(vp, extend, 0); 6749 if ((flags & IO_NORMAL) == 0) 6750 return; 6751 BO_LOCK(&vp->v_bufobj); 6752 drain_output(vp); 6753 BO_UNLOCK(&vp->v_bufobj); 6754 /* 6755 * The vnode pager eliminates file pages we eliminate indirects 6756 * below. 6757 */ 6758 vnode_pager_setsize(vp, length); 6759 /* 6760 * Calculate the end based on the last indirect we want to keep. If 6761 * the block extends into indirects we can just use the negative of 6762 * its lbn. Doubles and triples exist at lower numbers so we must 6763 * be careful not to remove those, if they exist. double and triple 6764 * indirect lbns do not overlap with others so it is not important 6765 * to verify how many levels are required. 6766 */ 6767 lbn = lblkno(fs, length); 6768 if (lbn >= NDADDR) { 6769 /* Calculate the virtual lbn of the triple indirect. */ 6770 lbn = -lbn - (NIADDR - 1); 6771 end = OFF_TO_IDX(lblktosize(fs, lbn)); 6772 } else 6773 end = extend; 6774 vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end); 6775 } 6776 6777 /* 6778 * See if the buf bp is in the range eliminated by truncation. 6779 */ 6780 static int 6781 trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags) 6782 struct buf *bp; 6783 int *blkoffp; 6784 ufs_lbn_t lastlbn; 6785 int lastoff; 6786 int flags; 6787 { 6788 ufs_lbn_t lbn; 6789 6790 *blkoffp = 0; 6791 /* Only match ext/normal blocks as appropriate. */ 6792 if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) || 6793 ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0)) 6794 return (0); 6795 /* ALTDATA is always a full truncation. */ 6796 if ((bp->b_xflags & BX_ALTDATA) != 0) 6797 return (1); 6798 /* -1 is full truncation. */ 6799 if (lastlbn == -1) 6800 return (1); 6801 /* 6802 * If this is a partial truncate we only want those 6803 * blocks and indirect blocks that cover the range 6804 * we're after. 6805 */ 6806 lbn = bp->b_lblkno; 6807 if (lbn < 0) 6808 lbn = -(lbn + lbn_level(lbn)); 6809 if (lbn < lastlbn) 6810 return (0); 6811 /* Here we only truncate lblkno if it's partial. */ 6812 if (lbn == lastlbn) { 6813 if (lastoff == 0) 6814 return (0); 6815 *blkoffp = lastoff; 6816 } 6817 return (1); 6818 } 6819 6820 /* 6821 * Eliminate any dependencies that exist in memory beyond lblkno:off 6822 */ 6823 static void 6824 trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags) 6825 struct inode *ip; 6826 struct freeblks *freeblks; 6827 ufs_lbn_t lastlbn; 6828 int lastoff; 6829 int flags; 6830 { 6831 struct bufobj *bo; 6832 struct vnode *vp; 6833 struct buf *bp; 6834 struct fs *fs; 6835 int blkoff; 6836 6837 /* 6838 * We must wait for any I/O in progress to finish so that 6839 * all potential buffers on the dirty list will be visible. 6840 * Once they are all there, walk the list and get rid of 6841 * any dependencies. 6842 */ 6843 fs = ip->i_fs; 6844 vp = ITOV(ip); 6845 bo = &vp->v_bufobj; 6846 BO_LOCK(bo); 6847 drain_output(vp); 6848 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 6849 bp->b_vflags &= ~BV_SCANNED; 6850 restart: 6851 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 6852 if (bp->b_vflags & BV_SCANNED) 6853 continue; 6854 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { 6855 bp->b_vflags |= BV_SCANNED; 6856 continue; 6857 } 6858 if ((bp = getdirtybuf(bp, BO_MTX(bo), MNT_WAIT)) == NULL) 6859 goto restart; 6860 BO_UNLOCK(bo); 6861 if (deallocate_dependencies(bp, freeblks, blkoff)) 6862 bqrelse(bp); 6863 else 6864 brelse(bp); 6865 BO_LOCK(bo); 6866 goto restart; 6867 } 6868 /* 6869 * Now do the work of vtruncbuf while also matching indirect blocks. 6870 */ 6871 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) 6872 bp->b_vflags &= ~BV_SCANNED; 6873 cleanrestart: 6874 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) { 6875 if (bp->b_vflags & BV_SCANNED) 6876 continue; 6877 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { 6878 bp->b_vflags |= BV_SCANNED; 6879 continue; 6880 } 6881 if (BUF_LOCK(bp, 6882 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 6883 BO_MTX(bo)) == ENOLCK) { 6884 BO_LOCK(bo); 6885 goto cleanrestart; 6886 } 6887 bp->b_vflags |= BV_SCANNED; 6888 BO_LOCK(bo); 6889 bremfree(bp); 6890 BO_UNLOCK(bo); 6891 if (blkoff != 0) { 6892 allocbuf(bp, blkoff); 6893 bqrelse(bp); 6894 } else { 6895 bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF; 6896 brelse(bp); 6897 } 6898 BO_LOCK(bo); 6899 goto cleanrestart; 6900 } 6901 drain_output(vp); 6902 BO_UNLOCK(bo); 6903 } 6904 6905 static int 6906 cancel_pagedep(pagedep, freeblks, blkoff) 6907 struct pagedep *pagedep; 6908 struct freeblks *freeblks; 6909 int blkoff; 6910 { 6911 struct jremref *jremref; 6912 struct jmvref *jmvref; 6913 struct dirrem *dirrem, *tmp; 6914 int i; 6915 6916 /* 6917 * Copy any directory remove dependencies to the list 6918 * to be processed after the freeblks proceeds. If 6919 * directory entry never made it to disk they 6920 * can be dumped directly onto the work list. 6921 */ 6922 LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) { 6923 /* Skip this directory removal if it is intended to remain. */ 6924 if (dirrem->dm_offset < blkoff) 6925 continue; 6926 /* 6927 * If there are any dirrems we wait for the journal write 6928 * to complete and then restart the buf scan as the lock 6929 * has been dropped. 6930 */ 6931 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) { 6932 jwait(&jremref->jr_list, MNT_WAIT); 6933 return (ERESTART); 6934 } 6935 LIST_REMOVE(dirrem, dm_next); 6936 dirrem->dm_dirinum = pagedep->pd_ino; 6937 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list); 6938 } 6939 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) { 6940 jwait(&jmvref->jm_list, MNT_WAIT); 6941 return (ERESTART); 6942 } 6943 /* 6944 * When we're partially truncating a pagedep we just want to flush 6945 * journal entries and return. There can not be any adds in the 6946 * truncated portion of the directory and newblk must remain if 6947 * part of the block remains. 6948 */ 6949 if (blkoff != 0) { 6950 struct diradd *dap; 6951 6952 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 6953 if (dap->da_offset > blkoff) 6954 panic("cancel_pagedep: diradd %p off %d > %d", 6955 dap, dap->da_offset, blkoff); 6956 for (i = 0; i < DAHASHSZ; i++) 6957 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) 6958 if (dap->da_offset > blkoff) 6959 panic("cancel_pagedep: diradd %p off %d > %d", 6960 dap, dap->da_offset, blkoff); 6961 return (0); 6962 } 6963 /* 6964 * There should be no directory add dependencies present 6965 * as the directory could not be truncated until all 6966 * children were removed. 6967 */ 6968 KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL, 6969 ("deallocate_dependencies: pendinghd != NULL")); 6970 for (i = 0; i < DAHASHSZ; i++) 6971 KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL, 6972 ("deallocate_dependencies: diraddhd != NULL")); 6973 if ((pagedep->pd_state & NEWBLOCK) != 0) 6974 free_newdirblk(pagedep->pd_newdirblk); 6975 if (free_pagedep(pagedep) == 0) 6976 panic("Failed to free pagedep %p", pagedep); 6977 return (0); 6978 } 6979 6980 /* 6981 * Reclaim any dependency structures from a buffer that is about to 6982 * be reallocated to a new vnode. The buffer must be locked, thus, 6983 * no I/O completion operations can occur while we are manipulating 6984 * its associated dependencies. The mutex is held so that other I/O's 6985 * associated with related dependencies do not occur. 6986 */ 6987 static int 6988 deallocate_dependencies(bp, freeblks, off) 6989 struct buf *bp; 6990 struct freeblks *freeblks; 6991 int off; 6992 { 6993 struct indirdep *indirdep; 6994 struct pagedep *pagedep; 6995 struct allocdirect *adp; 6996 struct worklist *wk, *wkn; 6997 6998 ACQUIRE_LOCK(&lk); 6999 LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) { 7000 switch (wk->wk_type) { 7001 case D_INDIRDEP: 7002 indirdep = WK_INDIRDEP(wk); 7003 if (bp->b_lblkno >= 0 || 7004 bp->b_blkno != indirdep->ir_savebp->b_lblkno) 7005 panic("deallocate_dependencies: not indir"); 7006 cancel_indirdep(indirdep, bp, freeblks); 7007 continue; 7008 7009 case D_PAGEDEP: 7010 pagedep = WK_PAGEDEP(wk); 7011 if (cancel_pagedep(pagedep, freeblks, off)) { 7012 FREE_LOCK(&lk); 7013 return (ERESTART); 7014 } 7015 continue; 7016 7017 case D_ALLOCINDIR: 7018 /* 7019 * Simply remove the allocindir, we'll find it via 7020 * the indirdep where we can clear pointers if 7021 * needed. 7022 */ 7023 WORKLIST_REMOVE(wk); 7024 continue; 7025 7026 case D_FREEWORK: 7027 /* 7028 * A truncation is waiting for the zero'd pointers 7029 * to be written. It can be freed when the freeblks 7030 * is journaled. 7031 */ 7032 WORKLIST_REMOVE(wk); 7033 wk->wk_state |= ONDEPLIST; 7034 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); 7035 break; 7036 7037 case D_ALLOCDIRECT: 7038 adp = WK_ALLOCDIRECT(wk); 7039 if (off != 0) 7040 continue; 7041 /* FALLTHROUGH */ 7042 default: 7043 panic("deallocate_dependencies: Unexpected type %s", 7044 TYPENAME(wk->wk_type)); 7045 /* NOTREACHED */ 7046 } 7047 } 7048 FREE_LOCK(&lk); 7049 /* 7050 * Don't throw away this buf, we were partially truncating and 7051 * some deps may always remain. 7052 */ 7053 if (off) { 7054 allocbuf(bp, off); 7055 bp->b_vflags |= BV_SCANNED; 7056 return (EBUSY); 7057 } 7058 bp->b_flags |= B_INVAL | B_NOCACHE; 7059 7060 return (0); 7061 } 7062 7063 /* 7064 * An allocdirect is being canceled due to a truncate. We must make sure 7065 * the journal entry is released in concert with the blkfree that releases 7066 * the storage. Completed journal entries must not be released until the 7067 * space is no longer pointed to by the inode or in the bitmap. 7068 */ 7069 static void 7070 cancel_allocdirect(adphead, adp, freeblks) 7071 struct allocdirectlst *adphead; 7072 struct allocdirect *adp; 7073 struct freeblks *freeblks; 7074 { 7075 struct freework *freework; 7076 struct newblk *newblk; 7077 struct worklist *wk; 7078 7079 TAILQ_REMOVE(adphead, adp, ad_next); 7080 newblk = (struct newblk *)adp; 7081 freework = NULL; 7082 /* 7083 * Find the correct freework structure. 7084 */ 7085 LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) { 7086 if (wk->wk_type != D_FREEWORK) 7087 continue; 7088 freework = WK_FREEWORK(wk); 7089 if (freework->fw_blkno == newblk->nb_newblkno) 7090 break; 7091 } 7092 if (freework == NULL) 7093 panic("cancel_allocdirect: Freework not found"); 7094 /* 7095 * If a newblk exists at all we still have the journal entry that 7096 * initiated the allocation so we do not need to journal the free. 7097 */ 7098 cancel_jfreeblk(freeblks, freework->fw_blkno); 7099 /* 7100 * If the journal hasn't been written the jnewblk must be passed 7101 * to the call to ffs_blkfree that reclaims the space. We accomplish 7102 * this by linking the journal dependency into the freework to be 7103 * freed when freework_freeblock() is called. If the journal has 7104 * been written we can simply reclaim the journal space when the 7105 * freeblks work is complete. 7106 */ 7107 freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list, 7108 &freeblks->fb_jwork); 7109 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); 7110 } 7111 7112 7113 /* 7114 * Cancel a new block allocation. May be an indirect or direct block. We 7115 * remove it from various lists and return any journal record that needs to 7116 * be resolved by the caller. 7117 * 7118 * A special consideration is made for indirects which were never pointed 7119 * at on disk and will never be found once this block is released. 7120 */ 7121 static struct jnewblk * 7122 cancel_newblk(newblk, wk, wkhd) 7123 struct newblk *newblk; 7124 struct worklist *wk; 7125 struct workhead *wkhd; 7126 { 7127 struct jnewblk *jnewblk; 7128 7129 CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno); 7130 7131 newblk->nb_state |= GOINGAWAY; 7132 /* 7133 * Previously we traversed the completedhd on each indirdep 7134 * attached to this newblk to cancel them and gather journal 7135 * work. Since we need only the oldest journal segment and 7136 * the lowest point on the tree will always have the oldest 7137 * journal segment we are free to release the segments 7138 * of any subordinates and may leave the indirdep list to 7139 * indirdep_complete() when this newblk is freed. 7140 */ 7141 if (newblk->nb_state & ONDEPLIST) { 7142 newblk->nb_state &= ~ONDEPLIST; 7143 LIST_REMOVE(newblk, nb_deps); 7144 } 7145 if (newblk->nb_state & ONWORKLIST) 7146 WORKLIST_REMOVE(&newblk->nb_list); 7147 /* 7148 * If the journal entry hasn't been written we save a pointer to 7149 * the dependency that frees it until it is written or the 7150 * superseding operation completes. 7151 */ 7152 jnewblk = newblk->nb_jnewblk; 7153 if (jnewblk != NULL && wk != NULL) { 7154 newblk->nb_jnewblk = NULL; 7155 jnewblk->jn_dep = wk; 7156 } 7157 if (!LIST_EMPTY(&newblk->nb_jwork)) 7158 jwork_move(wkhd, &newblk->nb_jwork); 7159 /* 7160 * When truncating we must free the newdirblk early to remove 7161 * the pagedep from the hash before returning. 7162 */ 7163 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) 7164 free_newdirblk(WK_NEWDIRBLK(wk)); 7165 if (!LIST_EMPTY(&newblk->nb_newdirblk)) 7166 panic("cancel_newblk: extra newdirblk"); 7167 7168 return (jnewblk); 7169 } 7170 7171 /* 7172 * Schedule the freefrag associated with a newblk to be released once 7173 * the pointers are written and the previous block is no longer needed. 7174 */ 7175 static void 7176 newblk_freefrag(newblk) 7177 struct newblk *newblk; 7178 { 7179 struct freefrag *freefrag; 7180 7181 if (newblk->nb_freefrag == NULL) 7182 return; 7183 freefrag = newblk->nb_freefrag; 7184 newblk->nb_freefrag = NULL; 7185 freefrag->ff_state |= COMPLETE; 7186 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) 7187 add_to_worklist(&freefrag->ff_list, 0); 7188 } 7189 7190 /* 7191 * Free a newblk. Generate a new freefrag work request if appropriate. 7192 * This must be called after the inode pointer and any direct block pointers 7193 * are valid or fully removed via truncate or frag extension. 7194 */ 7195 static void 7196 free_newblk(newblk) 7197 struct newblk *newblk; 7198 { 7199 struct indirdep *indirdep; 7200 struct worklist *wk; 7201 7202 KASSERT(newblk->nb_jnewblk == NULL, 7203 ("free_newblk; jnewblk %p still attached", newblk->nb_jnewblk)); 7204 mtx_assert(&lk, MA_OWNED); 7205 newblk_freefrag(newblk); 7206 if (newblk->nb_state & ONDEPLIST) 7207 LIST_REMOVE(newblk, nb_deps); 7208 if (newblk->nb_state & ONWORKLIST) 7209 WORKLIST_REMOVE(&newblk->nb_list); 7210 LIST_REMOVE(newblk, nb_hash); 7211 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) 7212 free_newdirblk(WK_NEWDIRBLK(wk)); 7213 if (!LIST_EMPTY(&newblk->nb_newdirblk)) 7214 panic("free_newblk: extra newdirblk"); 7215 while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL) 7216 indirdep_complete(indirdep); 7217 handle_jwork(&newblk->nb_jwork); 7218 newblk->nb_list.wk_type = D_NEWBLK; 7219 WORKITEM_FREE(newblk, D_NEWBLK); 7220 } 7221 7222 /* 7223 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. 7224 * This routine must be called with splbio interrupts blocked. 7225 */ 7226 static void 7227 free_newdirblk(newdirblk) 7228 struct newdirblk *newdirblk; 7229 { 7230 struct pagedep *pagedep; 7231 struct diradd *dap; 7232 struct worklist *wk; 7233 7234 mtx_assert(&lk, MA_OWNED); 7235 WORKLIST_REMOVE(&newdirblk->db_list); 7236 /* 7237 * If the pagedep is still linked onto the directory buffer 7238 * dependency chain, then some of the entries on the 7239 * pd_pendinghd list may not be committed to disk yet. In 7240 * this case, we will simply clear the NEWBLOCK flag and 7241 * let the pd_pendinghd list be processed when the pagedep 7242 * is next written. If the pagedep is no longer on the buffer 7243 * dependency chain, then all the entries on the pd_pending 7244 * list are committed to disk and we can free them here. 7245 */ 7246 pagedep = newdirblk->db_pagedep; 7247 pagedep->pd_state &= ~NEWBLOCK; 7248 if ((pagedep->pd_state & ONWORKLIST) == 0) { 7249 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 7250 free_diradd(dap, NULL); 7251 /* 7252 * If no dependencies remain, the pagedep will be freed. 7253 */ 7254 free_pagedep(pagedep); 7255 } 7256 /* Should only ever be one item in the list. */ 7257 while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) { 7258 WORKLIST_REMOVE(wk); 7259 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 7260 } 7261 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 7262 } 7263 7264 /* 7265 * Prepare an inode to be freed. The actual free operation is not 7266 * done until the zero'ed inode has been written to disk. 7267 */ 7268 void 7269 softdep_freefile(pvp, ino, mode) 7270 struct vnode *pvp; 7271 ino_t ino; 7272 int mode; 7273 { 7274 struct inode *ip = VTOI(pvp); 7275 struct inodedep *inodedep; 7276 struct freefile *freefile; 7277 struct freeblks *freeblks; 7278 7279 /* 7280 * This sets up the inode de-allocation dependency. 7281 */ 7282 freefile = malloc(sizeof(struct freefile), 7283 M_FREEFILE, M_SOFTDEP_FLAGS); 7284 workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount); 7285 freefile->fx_mode = mode; 7286 freefile->fx_oldinum = ino; 7287 freefile->fx_devvp = ip->i_devvp; 7288 LIST_INIT(&freefile->fx_jwork); 7289 UFS_LOCK(ip->i_ump); 7290 ip->i_fs->fs_pendinginodes += 1; 7291 UFS_UNLOCK(ip->i_ump); 7292 7293 /* 7294 * If the inodedep does not exist, then the zero'ed inode has 7295 * been written to disk. If the allocated inode has never been 7296 * written to disk, then the on-disk inode is zero'ed. In either 7297 * case we can free the file immediately. If the journal was 7298 * canceled before being written the inode will never make it to 7299 * disk and we must send the canceled journal entrys to 7300 * ffs_freefile() to be cleared in conjunction with the bitmap. 7301 * Any blocks waiting on the inode to write can be safely freed 7302 * here as it will never been written. 7303 */ 7304 ACQUIRE_LOCK(&lk); 7305 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); 7306 if (inodedep) { 7307 /* 7308 * Clear out freeblks that no longer need to reference 7309 * this inode. 7310 */ 7311 while ((freeblks = 7312 TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) { 7313 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, 7314 fb_next); 7315 freeblks->fb_state &= ~ONDEPLIST; 7316 } 7317 /* 7318 * Remove this inode from the unlinked list. 7319 */ 7320 if (inodedep->id_state & UNLINKED) { 7321 /* 7322 * Save the journal work to be freed with the bitmap 7323 * before we clear UNLINKED. Otherwise it can be lost 7324 * if the inode block is written. 7325 */ 7326 handle_bufwait(inodedep, &freefile->fx_jwork); 7327 clear_unlinked_inodedep(inodedep); 7328 /* Re-acquire inodedep as we've dropped lk. */ 7329 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); 7330 } 7331 } 7332 if (inodedep == NULL || check_inode_unwritten(inodedep)) { 7333 FREE_LOCK(&lk); 7334 handle_workitem_freefile(freefile); 7335 return; 7336 } 7337 if ((inodedep->id_state & DEPCOMPLETE) == 0) 7338 inodedep->id_state |= GOINGAWAY; 7339 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 7340 FREE_LOCK(&lk); 7341 if (ip->i_number == ino) 7342 ip->i_flag |= IN_MODIFIED; 7343 } 7344 7345 /* 7346 * Check to see if an inode has never been written to disk. If 7347 * so free the inodedep and return success, otherwise return failure. 7348 * This routine must be called with splbio interrupts blocked. 7349 * 7350 * If we still have a bitmap dependency, then the inode has never 7351 * been written to disk. Drop the dependency as it is no longer 7352 * necessary since the inode is being deallocated. We set the 7353 * ALLCOMPLETE flags since the bitmap now properly shows that the 7354 * inode is not allocated. Even if the inode is actively being 7355 * written, it has been rolled back to its zero'ed state, so we 7356 * are ensured that a zero inode is what is on the disk. For short 7357 * lived files, this change will usually result in removing all the 7358 * dependencies from the inode so that it can be freed immediately. 7359 */ 7360 static int 7361 check_inode_unwritten(inodedep) 7362 struct inodedep *inodedep; 7363 { 7364 7365 mtx_assert(&lk, MA_OWNED); 7366 7367 if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 || 7368 !LIST_EMPTY(&inodedep->id_dirremhd) || 7369 !LIST_EMPTY(&inodedep->id_pendinghd) || 7370 !LIST_EMPTY(&inodedep->id_bufwait) || 7371 !LIST_EMPTY(&inodedep->id_inowait) || 7372 !TAILQ_EMPTY(&inodedep->id_inoreflst) || 7373 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 7374 !TAILQ_EMPTY(&inodedep->id_newinoupdt) || 7375 !TAILQ_EMPTY(&inodedep->id_extupdt) || 7376 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 7377 !TAILQ_EMPTY(&inodedep->id_freeblklst) || 7378 inodedep->id_mkdiradd != NULL || 7379 inodedep->id_nlinkdelta != 0) 7380 return (0); 7381 /* 7382 * Another process might be in initiate_write_inodeblock_ufs[12] 7383 * trying to allocate memory without holding "Softdep Lock". 7384 */ 7385 if ((inodedep->id_state & IOSTARTED) != 0 && 7386 inodedep->id_savedino1 == NULL) 7387 return (0); 7388 7389 if (inodedep->id_state & ONDEPLIST) 7390 LIST_REMOVE(inodedep, id_deps); 7391 inodedep->id_state &= ~ONDEPLIST; 7392 inodedep->id_state |= ALLCOMPLETE; 7393 inodedep->id_bmsafemap = NULL; 7394 if (inodedep->id_state & ONWORKLIST) 7395 WORKLIST_REMOVE(&inodedep->id_list); 7396 if (inodedep->id_savedino1 != NULL) { 7397 free(inodedep->id_savedino1, M_SAVEDINO); 7398 inodedep->id_savedino1 = NULL; 7399 } 7400 if (free_inodedep(inodedep) == 0) 7401 panic("check_inode_unwritten: busy inode"); 7402 return (1); 7403 } 7404 7405 /* 7406 * Try to free an inodedep structure. Return 1 if it could be freed. 7407 */ 7408 static int 7409 free_inodedep(inodedep) 7410 struct inodedep *inodedep; 7411 { 7412 7413 mtx_assert(&lk, MA_OWNED); 7414 if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 || 7415 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 7416 !LIST_EMPTY(&inodedep->id_dirremhd) || 7417 !LIST_EMPTY(&inodedep->id_pendinghd) || 7418 !LIST_EMPTY(&inodedep->id_bufwait) || 7419 !LIST_EMPTY(&inodedep->id_inowait) || 7420 !TAILQ_EMPTY(&inodedep->id_inoreflst) || 7421 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 7422 !TAILQ_EMPTY(&inodedep->id_newinoupdt) || 7423 !TAILQ_EMPTY(&inodedep->id_extupdt) || 7424 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 7425 !TAILQ_EMPTY(&inodedep->id_freeblklst) || 7426 inodedep->id_mkdiradd != NULL || 7427 inodedep->id_nlinkdelta != 0 || 7428 inodedep->id_savedino1 != NULL) 7429 return (0); 7430 if (inodedep->id_state & ONDEPLIST) 7431 LIST_REMOVE(inodedep, id_deps); 7432 LIST_REMOVE(inodedep, id_hash); 7433 WORKITEM_FREE(inodedep, D_INODEDEP); 7434 return (1); 7435 } 7436 7437 /* 7438 * Free the block referenced by a freework structure. The parent freeblks 7439 * structure is released and completed when the final cg bitmap reaches 7440 * the disk. This routine may be freeing a jnewblk which never made it to 7441 * disk in which case we do not have to wait as the operation is undone 7442 * in memory immediately. 7443 */ 7444 static void 7445 freework_freeblock(freework) 7446 struct freework *freework; 7447 { 7448 struct freeblks *freeblks; 7449 struct jnewblk *jnewblk; 7450 struct ufsmount *ump; 7451 struct workhead wkhd; 7452 struct fs *fs; 7453 int bsize; 7454 int needj; 7455 7456 mtx_assert(&lk, MA_OWNED); 7457 /* 7458 * Handle partial truncate separately. 7459 */ 7460 if (freework->fw_indir) { 7461 complete_trunc_indir(freework); 7462 return; 7463 } 7464 freeblks = freework->fw_freeblks; 7465 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7466 fs = ump->um_fs; 7467 needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0; 7468 bsize = lfragtosize(fs, freework->fw_frags); 7469 LIST_INIT(&wkhd); 7470 /* 7471 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives 7472 * on the indirblk hashtable and prevents premature freeing. 7473 */ 7474 freework->fw_state |= DEPCOMPLETE; 7475 /* 7476 * SUJ needs to wait for the segment referencing freed indirect 7477 * blocks to expire so that we know the checker will not confuse 7478 * a re-allocated indirect block with its old contents. 7479 */ 7480 if (needj && freework->fw_lbn <= -NDADDR) 7481 indirblk_insert(freework); 7482 /* 7483 * If we are canceling an existing jnewblk pass it to the free 7484 * routine, otherwise pass the freeblk which will ultimately 7485 * release the freeblks. If we're not journaling, we can just 7486 * free the freeblks immediately. 7487 */ 7488 jnewblk = freework->fw_jnewblk; 7489 if (jnewblk != NULL) { 7490 cancel_jnewblk(jnewblk, &wkhd); 7491 needj = 0; 7492 } else if (needj) { 7493 freework->fw_state |= DELAYEDFREE; 7494 freeblks->fb_cgwait++; 7495 WORKLIST_INSERT(&wkhd, &freework->fw_list); 7496 } 7497 FREE_LOCK(&lk); 7498 freeblks_free(ump, freeblks, btodb(bsize)); 7499 CTR4(KTR_SUJ, 7500 "freework_freeblock: ino %d blkno %jd lbn %jd size %ld", 7501 freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize); 7502 ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize, 7503 freeblks->fb_inum, freeblks->fb_vtype, &wkhd); 7504 ACQUIRE_LOCK(&lk); 7505 /* 7506 * The jnewblk will be discarded and the bits in the map never 7507 * made it to disk. We can immediately free the freeblk. 7508 */ 7509 if (needj == 0) 7510 handle_written_freework(freework); 7511 } 7512 7513 /* 7514 * We enqueue freework items that need processing back on the freeblks and 7515 * add the freeblks to the worklist. This makes it easier to find all work 7516 * required to flush a truncation in process_truncates(). 7517 */ 7518 static void 7519 freework_enqueue(freework) 7520 struct freework *freework; 7521 { 7522 struct freeblks *freeblks; 7523 7524 freeblks = freework->fw_freeblks; 7525 if ((freework->fw_state & INPROGRESS) == 0) 7526 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); 7527 if ((freeblks->fb_state & 7528 (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE && 7529 LIST_EMPTY(&freeblks->fb_jblkdephd)) 7530 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 7531 } 7532 7533 /* 7534 * Start, continue, or finish the process of freeing an indirect block tree. 7535 * The free operation may be paused at any point with fw_off containing the 7536 * offset to restart from. This enables us to implement some flow control 7537 * for large truncates which may fan out and generate a huge number of 7538 * dependencies. 7539 */ 7540 static void 7541 handle_workitem_indirblk(freework) 7542 struct freework *freework; 7543 { 7544 struct freeblks *freeblks; 7545 struct ufsmount *ump; 7546 struct fs *fs; 7547 7548 freeblks = freework->fw_freeblks; 7549 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7550 fs = ump->um_fs; 7551 if (freework->fw_state & DEPCOMPLETE) { 7552 handle_written_freework(freework); 7553 return; 7554 } 7555 if (freework->fw_off == NINDIR(fs)) { 7556 freework_freeblock(freework); 7557 return; 7558 } 7559 freework->fw_state |= INPROGRESS; 7560 FREE_LOCK(&lk); 7561 indir_trunc(freework, fsbtodb(fs, freework->fw_blkno), 7562 freework->fw_lbn); 7563 ACQUIRE_LOCK(&lk); 7564 } 7565 7566 /* 7567 * Called when a freework structure attached to a cg buf is written. The 7568 * ref on either the parent or the freeblks structure is released and 7569 * the freeblks is added back to the worklist if there is more work to do. 7570 */ 7571 static void 7572 handle_written_freework(freework) 7573 struct freework *freework; 7574 { 7575 struct freeblks *freeblks; 7576 struct freework *parent; 7577 7578 freeblks = freework->fw_freeblks; 7579 parent = freework->fw_parent; 7580 if (freework->fw_state & DELAYEDFREE) 7581 freeblks->fb_cgwait--; 7582 freework->fw_state |= COMPLETE; 7583 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) 7584 WORKITEM_FREE(freework, D_FREEWORK); 7585 if (parent) { 7586 if (--parent->fw_ref == 0) 7587 freework_enqueue(parent); 7588 return; 7589 } 7590 if (--freeblks->fb_ref != 0) 7591 return; 7592 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) == 7593 ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) 7594 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 7595 } 7596 7597 /* 7598 * This workitem routine performs the block de-allocation. 7599 * The workitem is added to the pending list after the updated 7600 * inode block has been written to disk. As mentioned above, 7601 * checks regarding the number of blocks de-allocated (compared 7602 * to the number of blocks allocated for the file) are also 7603 * performed in this function. 7604 */ 7605 static int 7606 handle_workitem_freeblocks(freeblks, flags) 7607 struct freeblks *freeblks; 7608 int flags; 7609 { 7610 struct freework *freework; 7611 struct newblk *newblk; 7612 struct allocindir *aip; 7613 struct ufsmount *ump; 7614 struct worklist *wk; 7615 7616 KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd), 7617 ("handle_workitem_freeblocks: Journal entries not written.")); 7618 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7619 ACQUIRE_LOCK(&lk); 7620 while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) { 7621 WORKLIST_REMOVE(wk); 7622 switch (wk->wk_type) { 7623 case D_DIRREM: 7624 wk->wk_state |= COMPLETE; 7625 add_to_worklist(wk, 0); 7626 continue; 7627 7628 case D_ALLOCDIRECT: 7629 free_newblk(WK_NEWBLK(wk)); 7630 continue; 7631 7632 case D_ALLOCINDIR: 7633 aip = WK_ALLOCINDIR(wk); 7634 freework = NULL; 7635 if (aip->ai_state & DELAYEDFREE) { 7636 FREE_LOCK(&lk); 7637 freework = newfreework(ump, freeblks, NULL, 7638 aip->ai_lbn, aip->ai_newblkno, 7639 ump->um_fs->fs_frag, 0, 0); 7640 ACQUIRE_LOCK(&lk); 7641 } 7642 newblk = WK_NEWBLK(wk); 7643 if (newblk->nb_jnewblk) { 7644 freework->fw_jnewblk = newblk->nb_jnewblk; 7645 newblk->nb_jnewblk->jn_dep = &freework->fw_list; 7646 newblk->nb_jnewblk = NULL; 7647 } 7648 free_newblk(newblk); 7649 continue; 7650 7651 case D_FREEWORK: 7652 freework = WK_FREEWORK(wk); 7653 if (freework->fw_lbn <= -NDADDR) 7654 handle_workitem_indirblk(freework); 7655 else 7656 freework_freeblock(freework); 7657 continue; 7658 default: 7659 panic("handle_workitem_freeblocks: Unknown type %s", 7660 TYPENAME(wk->wk_type)); 7661 } 7662 } 7663 if (freeblks->fb_ref != 0) { 7664 freeblks->fb_state &= ~INPROGRESS; 7665 wake_worklist(&freeblks->fb_list); 7666 freeblks = NULL; 7667 } 7668 FREE_LOCK(&lk); 7669 if (freeblks) 7670 return handle_complete_freeblocks(freeblks, flags); 7671 return (0); 7672 } 7673 7674 /* 7675 * Handle completion of block free via truncate. This allows fs_pending 7676 * to track the actual free block count more closely than if we only updated 7677 * it at the end. We must be careful to handle cases where the block count 7678 * on free was incorrect. 7679 */ 7680 static void 7681 freeblks_free(ump, freeblks, blocks) 7682 struct ufsmount *ump; 7683 struct freeblks *freeblks; 7684 int blocks; 7685 { 7686 struct fs *fs; 7687 ufs2_daddr_t remain; 7688 7689 UFS_LOCK(ump); 7690 remain = -freeblks->fb_chkcnt; 7691 freeblks->fb_chkcnt += blocks; 7692 if (remain > 0) { 7693 if (remain < blocks) 7694 blocks = remain; 7695 fs = ump->um_fs; 7696 fs->fs_pendingblocks -= blocks; 7697 } 7698 UFS_UNLOCK(ump); 7699 } 7700 7701 /* 7702 * Once all of the freework workitems are complete we can retire the 7703 * freeblocks dependency and any journal work awaiting completion. This 7704 * can not be called until all other dependencies are stable on disk. 7705 */ 7706 static int 7707 handle_complete_freeblocks(freeblks, flags) 7708 struct freeblks *freeblks; 7709 int flags; 7710 { 7711 struct inodedep *inodedep; 7712 struct inode *ip; 7713 struct vnode *vp; 7714 struct fs *fs; 7715 struct ufsmount *ump; 7716 ufs2_daddr_t spare; 7717 7718 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7719 fs = ump->um_fs; 7720 flags = LK_EXCLUSIVE | flags; 7721 spare = freeblks->fb_chkcnt; 7722 7723 /* 7724 * If we did not release the expected number of blocks we may have 7725 * to adjust the inode block count here. Only do so if it wasn't 7726 * a truncation to zero and the modrev still matches. 7727 */ 7728 if (spare && freeblks->fb_len != 0) { 7729 if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum, 7730 flags, &vp, FFSV_FORCEINSMQ) != 0) 7731 return (EBUSY); 7732 ip = VTOI(vp); 7733 if (DIP(ip, i_modrev) == freeblks->fb_modrev) { 7734 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare); 7735 ip->i_flag |= IN_CHANGE; 7736 /* 7737 * We must wait so this happens before the 7738 * journal is reclaimed. 7739 */ 7740 ffs_update(vp, 1); 7741 } 7742 vput(vp); 7743 } 7744 if (spare < 0) { 7745 UFS_LOCK(ump); 7746 fs->fs_pendingblocks += spare; 7747 UFS_UNLOCK(ump); 7748 } 7749 #ifdef QUOTA 7750 /* Handle spare. */ 7751 if (spare) 7752 quotaadj(freeblks->fb_quota, ump, -spare); 7753 quotarele(freeblks->fb_quota); 7754 #endif 7755 ACQUIRE_LOCK(&lk); 7756 if (freeblks->fb_state & ONDEPLIST) { 7757 inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum, 7758 0, &inodedep); 7759 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next); 7760 freeblks->fb_state &= ~ONDEPLIST; 7761 if (TAILQ_EMPTY(&inodedep->id_freeblklst)) 7762 free_inodedep(inodedep); 7763 } 7764 /* 7765 * All of the freeblock deps must be complete prior to this call 7766 * so it's now safe to complete earlier outstanding journal entries. 7767 */ 7768 handle_jwork(&freeblks->fb_jwork); 7769 WORKITEM_FREE(freeblks, D_FREEBLKS); 7770 FREE_LOCK(&lk); 7771 return (0); 7772 } 7773 7774 /* 7775 * Release blocks associated with the freeblks and stored in the indirect 7776 * block dbn. If level is greater than SINGLE, the block is an indirect block 7777 * and recursive calls to indirtrunc must be used to cleanse other indirect 7778 * blocks. 7779 * 7780 * This handles partial and complete truncation of blocks. Partial is noted 7781 * with goingaway == 0. In this case the freework is completed after the 7782 * zero'd indirects are written to disk. For full truncation the freework 7783 * is completed after the block is freed. 7784 */ 7785 static void 7786 indir_trunc(freework, dbn, lbn) 7787 struct freework *freework; 7788 ufs2_daddr_t dbn; 7789 ufs_lbn_t lbn; 7790 { 7791 struct freework *nfreework; 7792 struct workhead wkhd; 7793 struct freeblks *freeblks; 7794 struct buf *bp; 7795 struct fs *fs; 7796 struct indirdep *indirdep; 7797 struct ufsmount *ump; 7798 ufs1_daddr_t *bap1 = 0; 7799 ufs2_daddr_t nb, nnb, *bap2 = 0; 7800 ufs_lbn_t lbnadd, nlbn; 7801 int i, nblocks, ufs1fmt; 7802 int freedblocks; 7803 int goingaway; 7804 int freedeps; 7805 int needj; 7806 int level; 7807 int cnt; 7808 7809 freeblks = freework->fw_freeblks; 7810 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7811 fs = ump->um_fs; 7812 /* 7813 * Get buffer of block pointers to be freed. There are three cases: 7814 * 7815 * 1) Partial truncate caches the indirdep pointer in the freework 7816 * which provides us a back copy to the save bp which holds the 7817 * pointers we want to clear. When this completes the zero 7818 * pointers are written to the real copy. 7819 * 2) The indirect is being completely truncated, cancel_indirdep() 7820 * eliminated the real copy and placed the indirdep on the saved 7821 * copy. The indirdep and buf are discarded when this completes. 7822 * 3) The indirect was not in memory, we read a copy off of the disk 7823 * using the devvp and drop and invalidate the buffer when we're 7824 * done. 7825 */ 7826 goingaway = 1; 7827 indirdep = NULL; 7828 if (freework->fw_indir != NULL) { 7829 goingaway = 0; 7830 indirdep = freework->fw_indir; 7831 bp = indirdep->ir_savebp; 7832 if (bp == NULL || bp->b_blkno != dbn) 7833 panic("indir_trunc: Bad saved buf %p blkno %jd", 7834 bp, (intmax_t)dbn); 7835 } else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) { 7836 /* 7837 * The lock prevents the buf dep list from changing and 7838 * indirects on devvp should only ever have one dependency. 7839 */ 7840 indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep)); 7841 if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0) 7842 panic("indir_trunc: Bad indirdep %p from buf %p", 7843 indirdep, bp); 7844 } else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 7845 NOCRED, &bp) != 0) { 7846 brelse(bp); 7847 return; 7848 } 7849 ACQUIRE_LOCK(&lk); 7850 /* Protects against a race with complete_trunc_indir(). */ 7851 freework->fw_state &= ~INPROGRESS; 7852 /* 7853 * If we have an indirdep we need to enforce the truncation order 7854 * and discard it when it is complete. 7855 */ 7856 if (indirdep) { 7857 if (freework != TAILQ_FIRST(&indirdep->ir_trunc) && 7858 !TAILQ_EMPTY(&indirdep->ir_trunc)) { 7859 /* 7860 * Add the complete truncate to the list on the 7861 * indirdep to enforce in-order processing. 7862 */ 7863 if (freework->fw_indir == NULL) 7864 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, 7865 freework, fw_next); 7866 FREE_LOCK(&lk); 7867 return; 7868 } 7869 /* 7870 * If we're goingaway, free the indirdep. Otherwise it will 7871 * linger until the write completes. 7872 */ 7873 if (goingaway) { 7874 free_indirdep(indirdep); 7875 ump->um_numindirdeps -= 1; 7876 } 7877 } 7878 FREE_LOCK(&lk); 7879 /* Initialize pointers depending on block size. */ 7880 if (ump->um_fstype == UFS1) { 7881 bap1 = (ufs1_daddr_t *)bp->b_data; 7882 nb = bap1[freework->fw_off]; 7883 ufs1fmt = 1; 7884 } else { 7885 bap2 = (ufs2_daddr_t *)bp->b_data; 7886 nb = bap2[freework->fw_off]; 7887 ufs1fmt = 0; 7888 } 7889 level = lbn_level(lbn); 7890 needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0; 7891 lbnadd = lbn_offset(fs, level); 7892 nblocks = btodb(fs->fs_bsize); 7893 nfreework = freework; 7894 freedeps = 0; 7895 cnt = 0; 7896 /* 7897 * Reclaim blocks. Traverses into nested indirect levels and 7898 * arranges for the current level to be freed when subordinates 7899 * are free when journaling. 7900 */ 7901 for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) { 7902 if (i != NINDIR(fs) - 1) { 7903 if (ufs1fmt) 7904 nnb = bap1[i+1]; 7905 else 7906 nnb = bap2[i+1]; 7907 } else 7908 nnb = 0; 7909 if (nb == 0) 7910 continue; 7911 cnt++; 7912 if (level != 0) { 7913 nlbn = (lbn + 1) - (i * lbnadd); 7914 if (needj != 0) { 7915 nfreework = newfreework(ump, freeblks, freework, 7916 nlbn, nb, fs->fs_frag, 0, 0); 7917 freedeps++; 7918 } 7919 indir_trunc(nfreework, fsbtodb(fs, nb), nlbn); 7920 } else { 7921 struct freedep *freedep; 7922 7923 /* 7924 * Attempt to aggregate freedep dependencies for 7925 * all blocks being released to the same CG. 7926 */ 7927 LIST_INIT(&wkhd); 7928 if (needj != 0 && 7929 (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) { 7930 freedep = newfreedep(freework); 7931 WORKLIST_INSERT_UNLOCKED(&wkhd, 7932 &freedep->fd_list); 7933 freedeps++; 7934 } 7935 CTR3(KTR_SUJ, 7936 "indir_trunc: ino %d blkno %jd size %ld", 7937 freeblks->fb_inum, nb, fs->fs_bsize); 7938 ffs_blkfree(ump, fs, freeblks->fb_devvp, nb, 7939 fs->fs_bsize, freeblks->fb_inum, 7940 freeblks->fb_vtype, &wkhd); 7941 } 7942 } 7943 if (goingaway) { 7944 bp->b_flags |= B_INVAL | B_NOCACHE; 7945 brelse(bp); 7946 } 7947 freedblocks = 0; 7948 if (level == 0) 7949 freedblocks = (nblocks * cnt); 7950 if (needj == 0) 7951 freedblocks += nblocks; 7952 freeblks_free(ump, freeblks, freedblocks); 7953 /* 7954 * If we are journaling set up the ref counts and offset so this 7955 * indirect can be completed when its children are free. 7956 */ 7957 if (needj) { 7958 ACQUIRE_LOCK(&lk); 7959 freework->fw_off = i; 7960 freework->fw_ref += freedeps; 7961 freework->fw_ref -= NINDIR(fs) + 1; 7962 if (level == 0) 7963 freeblks->fb_cgwait += freedeps; 7964 if (freework->fw_ref == 0) 7965 freework_freeblock(freework); 7966 FREE_LOCK(&lk); 7967 return; 7968 } 7969 /* 7970 * If we're not journaling we can free the indirect now. 7971 */ 7972 dbn = dbtofsb(fs, dbn); 7973 CTR3(KTR_SUJ, 7974 "indir_trunc 2: ino %d blkno %jd size %ld", 7975 freeblks->fb_inum, dbn, fs->fs_bsize); 7976 ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize, 7977 freeblks->fb_inum, freeblks->fb_vtype, NULL); 7978 /* Non SUJ softdep does single-threaded truncations. */ 7979 if (freework->fw_blkno == dbn) { 7980 freework->fw_state |= ALLCOMPLETE; 7981 ACQUIRE_LOCK(&lk); 7982 handle_written_freework(freework); 7983 FREE_LOCK(&lk); 7984 } 7985 return; 7986 } 7987 7988 /* 7989 * Cancel an allocindir when it is removed via truncation. When bp is not 7990 * NULL the indirect never appeared on disk and is scheduled to be freed 7991 * independently of the indir so we can more easily track journal work. 7992 */ 7993 static void 7994 cancel_allocindir(aip, bp, freeblks, trunc) 7995 struct allocindir *aip; 7996 struct buf *bp; 7997 struct freeblks *freeblks; 7998 int trunc; 7999 { 8000 struct indirdep *indirdep; 8001 struct freefrag *freefrag; 8002 struct newblk *newblk; 8003 8004 newblk = (struct newblk *)aip; 8005 LIST_REMOVE(aip, ai_next); 8006 /* 8007 * We must eliminate the pointer in bp if it must be freed on its 8008 * own due to partial truncate or pending journal work. 8009 */ 8010 if (bp && (trunc || newblk->nb_jnewblk)) { 8011 /* 8012 * Clear the pointer and mark the aip to be freed 8013 * directly if it never existed on disk. 8014 */ 8015 aip->ai_state |= DELAYEDFREE; 8016 indirdep = aip->ai_indirdep; 8017 if (indirdep->ir_state & UFS1FMT) 8018 ((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0; 8019 else 8020 ((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0; 8021 } 8022 /* 8023 * When truncating the previous pointer will be freed via 8024 * savedbp. Eliminate the freefrag which would dup free. 8025 */ 8026 if (trunc && (freefrag = newblk->nb_freefrag) != NULL) { 8027 newblk->nb_freefrag = NULL; 8028 if (freefrag->ff_jdep) 8029 cancel_jfreefrag( 8030 WK_JFREEFRAG(freefrag->ff_jdep)); 8031 jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork); 8032 WORKITEM_FREE(freefrag, D_FREEFRAG); 8033 } 8034 /* 8035 * If the journal hasn't been written the jnewblk must be passed 8036 * to the call to ffs_blkfree that reclaims the space. We accomplish 8037 * this by leaving the journal dependency on the newblk to be freed 8038 * when a freework is created in handle_workitem_freeblocks(). 8039 */ 8040 cancel_newblk(newblk, NULL, &freeblks->fb_jwork); 8041 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); 8042 } 8043 8044 /* 8045 * Create the mkdir dependencies for . and .. in a new directory. Link them 8046 * in to a newdirblk so any subsequent additions are tracked properly. The 8047 * caller is responsible for adding the mkdir1 dependency to the journal 8048 * and updating id_mkdiradd. This function returns with lk held. 8049 */ 8050 static struct mkdir * 8051 setup_newdir(dap, newinum, dinum, newdirbp, mkdirp) 8052 struct diradd *dap; 8053 ino_t newinum; 8054 ino_t dinum; 8055 struct buf *newdirbp; 8056 struct mkdir **mkdirp; 8057 { 8058 struct newblk *newblk; 8059 struct pagedep *pagedep; 8060 struct inodedep *inodedep; 8061 struct newdirblk *newdirblk = 0; 8062 struct mkdir *mkdir1, *mkdir2; 8063 struct worklist *wk; 8064 struct jaddref *jaddref; 8065 struct mount *mp; 8066 8067 mp = dap->da_list.wk_mp; 8068 newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK, 8069 M_SOFTDEP_FLAGS); 8070 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); 8071 LIST_INIT(&newdirblk->db_mkdir); 8072 mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); 8073 workitem_alloc(&mkdir1->md_list, D_MKDIR, mp); 8074 mkdir1->md_state = ATTACHED | MKDIR_BODY; 8075 mkdir1->md_diradd = dap; 8076 mkdir1->md_jaddref = NULL; 8077 mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); 8078 workitem_alloc(&mkdir2->md_list, D_MKDIR, mp); 8079 mkdir2->md_state = ATTACHED | MKDIR_PARENT; 8080 mkdir2->md_diradd = dap; 8081 mkdir2->md_jaddref = NULL; 8082 if (MOUNTEDSUJ(mp) == 0) { 8083 mkdir1->md_state |= DEPCOMPLETE; 8084 mkdir2->md_state |= DEPCOMPLETE; 8085 } 8086 /* 8087 * Dependency on "." and ".." being written to disk. 8088 */ 8089 mkdir1->md_buf = newdirbp; 8090 ACQUIRE_LOCK(&lk); 8091 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 8092 /* 8093 * We must link the pagedep, allocdirect, and newdirblk for 8094 * the initial file page so the pointer to the new directory 8095 * is not written until the directory contents are live and 8096 * any subsequent additions are not marked live until the 8097 * block is reachable via the inode. 8098 */ 8099 if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0) 8100 panic("setup_newdir: lost pagedep"); 8101 LIST_FOREACH(wk, &newdirbp->b_dep, wk_list) 8102 if (wk->wk_type == D_ALLOCDIRECT) 8103 break; 8104 if (wk == NULL) 8105 panic("setup_newdir: lost allocdirect"); 8106 if (pagedep->pd_state & NEWBLOCK) 8107 panic("setup_newdir: NEWBLOCK already set"); 8108 newblk = WK_NEWBLK(wk); 8109 pagedep->pd_state |= NEWBLOCK; 8110 pagedep->pd_newdirblk = newdirblk; 8111 newdirblk->db_pagedep = pagedep; 8112 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); 8113 WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list); 8114 /* 8115 * Look up the inodedep for the parent directory so that we 8116 * can link mkdir2 into the pending dotdot jaddref or 8117 * the inode write if there is none. If the inode is 8118 * ALLCOMPLETE and no jaddref is present all dependencies have 8119 * been satisfied and mkdir2 can be freed. 8120 */ 8121 inodedep_lookup(mp, dinum, 0, &inodedep); 8122 if (MOUNTEDSUJ(mp)) { 8123 if (inodedep == NULL) 8124 panic("setup_newdir: Lost parent."); 8125 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 8126 inoreflst); 8127 KASSERT(jaddref != NULL && jaddref->ja_parent == newinum && 8128 (jaddref->ja_state & MKDIR_PARENT), 8129 ("setup_newdir: bad dotdot jaddref %p", jaddref)); 8130 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 8131 mkdir2->md_jaddref = jaddref; 8132 jaddref->ja_mkdir = mkdir2; 8133 } else if (inodedep == NULL || 8134 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 8135 dap->da_state &= ~MKDIR_PARENT; 8136 WORKITEM_FREE(mkdir2, D_MKDIR); 8137 } else { 8138 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 8139 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list); 8140 } 8141 *mkdirp = mkdir2; 8142 8143 return (mkdir1); 8144 } 8145 8146 /* 8147 * Directory entry addition dependencies. 8148 * 8149 * When adding a new directory entry, the inode (with its incremented link 8150 * count) must be written to disk before the directory entry's pointer to it. 8151 * Also, if the inode is newly allocated, the corresponding freemap must be 8152 * updated (on disk) before the directory entry's pointer. These requirements 8153 * are met via undo/redo on the directory entry's pointer, which consists 8154 * simply of the inode number. 8155 * 8156 * As directory entries are added and deleted, the free space within a 8157 * directory block can become fragmented. The ufs filesystem will compact 8158 * a fragmented directory block to make space for a new entry. When this 8159 * occurs, the offsets of previously added entries change. Any "diradd" 8160 * dependency structures corresponding to these entries must be updated with 8161 * the new offsets. 8162 */ 8163 8164 /* 8165 * This routine is called after the in-memory inode's link 8166 * count has been incremented, but before the directory entry's 8167 * pointer to the inode has been set. 8168 */ 8169 int 8170 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 8171 struct buf *bp; /* buffer containing directory block */ 8172 struct inode *dp; /* inode for directory */ 8173 off_t diroffset; /* offset of new entry in directory */ 8174 ino_t newinum; /* inode referenced by new directory entry */ 8175 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 8176 int isnewblk; /* entry is in a newly allocated block */ 8177 { 8178 int offset; /* offset of new entry within directory block */ 8179 ufs_lbn_t lbn; /* block in directory containing new entry */ 8180 struct fs *fs; 8181 struct diradd *dap; 8182 struct newblk *newblk; 8183 struct pagedep *pagedep; 8184 struct inodedep *inodedep; 8185 struct newdirblk *newdirblk = 0; 8186 struct mkdir *mkdir1, *mkdir2; 8187 struct jaddref *jaddref; 8188 struct mount *mp; 8189 int isindir; 8190 8191 /* 8192 * Whiteouts have no dependencies. 8193 */ 8194 if (newinum == WINO) { 8195 if (newdirbp != NULL) 8196 bdwrite(newdirbp); 8197 return (0); 8198 } 8199 jaddref = NULL; 8200 mkdir1 = mkdir2 = NULL; 8201 mp = UFSTOVFS(dp->i_ump); 8202 fs = dp->i_fs; 8203 lbn = lblkno(fs, diroffset); 8204 offset = blkoff(fs, diroffset); 8205 dap = malloc(sizeof(struct diradd), M_DIRADD, 8206 M_SOFTDEP_FLAGS|M_ZERO); 8207 workitem_alloc(&dap->da_list, D_DIRADD, mp); 8208 dap->da_offset = offset; 8209 dap->da_newinum = newinum; 8210 dap->da_state = ATTACHED; 8211 LIST_INIT(&dap->da_jwork); 8212 isindir = bp->b_lblkno >= NDADDR; 8213 if (isnewblk && 8214 (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) { 8215 newdirblk = malloc(sizeof(struct newdirblk), 8216 M_NEWDIRBLK, M_SOFTDEP_FLAGS); 8217 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); 8218 LIST_INIT(&newdirblk->db_mkdir); 8219 } 8220 /* 8221 * If we're creating a new directory setup the dependencies and set 8222 * the dap state to wait for them. Otherwise it's COMPLETE and 8223 * we can move on. 8224 */ 8225 if (newdirbp == NULL) { 8226 dap->da_state |= DEPCOMPLETE; 8227 ACQUIRE_LOCK(&lk); 8228 } else { 8229 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 8230 mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp, 8231 &mkdir2); 8232 } 8233 /* 8234 * Link into parent directory pagedep to await its being written. 8235 */ 8236 pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep); 8237 #ifdef DEBUG 8238 if (diradd_lookup(pagedep, offset) != NULL) 8239 panic("softdep_setup_directory_add: %p already at off %d\n", 8240 diradd_lookup(pagedep, offset), offset); 8241 #endif 8242 dap->da_pagedep = pagedep; 8243 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 8244 da_pdlist); 8245 inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep); 8246 /* 8247 * If we're journaling, link the diradd into the jaddref so it 8248 * may be completed after the journal entry is written. Otherwise, 8249 * link the diradd into its inodedep. If the inode is not yet 8250 * written place it on the bufwait list, otherwise do the post-inode 8251 * write processing to put it on the id_pendinghd list. 8252 */ 8253 if (MOUNTEDSUJ(mp)) { 8254 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 8255 inoreflst); 8256 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 8257 ("softdep_setup_directory_add: bad jaddref %p", jaddref)); 8258 jaddref->ja_diroff = diroffset; 8259 jaddref->ja_diradd = dap; 8260 add_to_journal(&jaddref->ja_list); 8261 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 8262 diradd_inode_written(dap, inodedep); 8263 else 8264 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 8265 /* 8266 * Add the journal entries for . and .. links now that the primary 8267 * link is written. 8268 */ 8269 if (mkdir1 != NULL && MOUNTEDSUJ(mp)) { 8270 jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, 8271 inoreflst, if_deps); 8272 KASSERT(jaddref != NULL && 8273 jaddref->ja_ino == jaddref->ja_parent && 8274 (jaddref->ja_state & MKDIR_BODY), 8275 ("softdep_setup_directory_add: bad dot jaddref %p", 8276 jaddref)); 8277 mkdir1->md_jaddref = jaddref; 8278 jaddref->ja_mkdir = mkdir1; 8279 /* 8280 * It is important that the dotdot journal entry 8281 * is added prior to the dot entry since dot writes 8282 * both the dot and dotdot links. These both must 8283 * be added after the primary link for the journal 8284 * to remain consistent. 8285 */ 8286 add_to_journal(&mkdir2->md_jaddref->ja_list); 8287 add_to_journal(&jaddref->ja_list); 8288 } 8289 /* 8290 * If we are adding a new directory remember this diradd so that if 8291 * we rename it we can keep the dot and dotdot dependencies. If 8292 * we are adding a new name for an inode that has a mkdiradd we 8293 * must be in rename and we have to move the dot and dotdot 8294 * dependencies to this new name. The old name is being orphaned 8295 * soon. 8296 */ 8297 if (mkdir1 != NULL) { 8298 if (inodedep->id_mkdiradd != NULL) 8299 panic("softdep_setup_directory_add: Existing mkdir"); 8300 inodedep->id_mkdiradd = dap; 8301 } else if (inodedep->id_mkdiradd) 8302 merge_diradd(inodedep, dap); 8303 if (newdirblk) { 8304 /* 8305 * There is nothing to do if we are already tracking 8306 * this block. 8307 */ 8308 if ((pagedep->pd_state & NEWBLOCK) != 0) { 8309 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 8310 FREE_LOCK(&lk); 8311 return (0); 8312 } 8313 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk) 8314 == 0) 8315 panic("softdep_setup_directory_add: lost entry"); 8316 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); 8317 pagedep->pd_state |= NEWBLOCK; 8318 pagedep->pd_newdirblk = newdirblk; 8319 newdirblk->db_pagedep = pagedep; 8320 FREE_LOCK(&lk); 8321 /* 8322 * If we extended into an indirect signal direnter to sync. 8323 */ 8324 if (isindir) 8325 return (1); 8326 return (0); 8327 } 8328 FREE_LOCK(&lk); 8329 return (0); 8330 } 8331 8332 /* 8333 * This procedure is called to change the offset of a directory 8334 * entry when compacting a directory block which must be owned 8335 * exclusively by the caller. Note that the actual entry movement 8336 * must be done in this procedure to ensure that no I/O completions 8337 * occur while the move is in progress. 8338 */ 8339 void 8340 softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) 8341 struct buf *bp; /* Buffer holding directory block. */ 8342 struct inode *dp; /* inode for directory */ 8343 caddr_t base; /* address of dp->i_offset */ 8344 caddr_t oldloc; /* address of old directory location */ 8345 caddr_t newloc; /* address of new directory location */ 8346 int entrysize; /* size of directory entry */ 8347 { 8348 int offset, oldoffset, newoffset; 8349 struct pagedep *pagedep; 8350 struct jmvref *jmvref; 8351 struct diradd *dap; 8352 struct direct *de; 8353 struct mount *mp; 8354 ufs_lbn_t lbn; 8355 int flags; 8356 8357 mp = UFSTOVFS(dp->i_ump); 8358 de = (struct direct *)oldloc; 8359 jmvref = NULL; 8360 flags = 0; 8361 /* 8362 * Moves are always journaled as it would be too complex to 8363 * determine if any affected adds or removes are present in the 8364 * journal. 8365 */ 8366 if (MOUNTEDSUJ(mp)) { 8367 flags = DEPALLOC; 8368 jmvref = newjmvref(dp, de->d_ino, 8369 dp->i_offset + (oldloc - base), 8370 dp->i_offset + (newloc - base)); 8371 } 8372 lbn = lblkno(dp->i_fs, dp->i_offset); 8373 offset = blkoff(dp->i_fs, dp->i_offset); 8374 oldoffset = offset + (oldloc - base); 8375 newoffset = offset + (newloc - base); 8376 ACQUIRE_LOCK(&lk); 8377 if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0) 8378 goto done; 8379 dap = diradd_lookup(pagedep, oldoffset); 8380 if (dap) { 8381 dap->da_offset = newoffset; 8382 newoffset = DIRADDHASH(newoffset); 8383 oldoffset = DIRADDHASH(oldoffset); 8384 if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE && 8385 newoffset != oldoffset) { 8386 LIST_REMOVE(dap, da_pdlist); 8387 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset], 8388 dap, da_pdlist); 8389 } 8390 } 8391 done: 8392 if (jmvref) { 8393 jmvref->jm_pagedep = pagedep; 8394 LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps); 8395 add_to_journal(&jmvref->jm_list); 8396 } 8397 bcopy(oldloc, newloc, entrysize); 8398 FREE_LOCK(&lk); 8399 } 8400 8401 /* 8402 * Move the mkdir dependencies and journal work from one diradd to another 8403 * when renaming a directory. The new name must depend on the mkdir deps 8404 * completing as the old name did. Directories can only have one valid link 8405 * at a time so one must be canonical. 8406 */ 8407 static void 8408 merge_diradd(inodedep, newdap) 8409 struct inodedep *inodedep; 8410 struct diradd *newdap; 8411 { 8412 struct diradd *olddap; 8413 struct mkdir *mkdir, *nextmd; 8414 short state; 8415 8416 olddap = inodedep->id_mkdiradd; 8417 inodedep->id_mkdiradd = newdap; 8418 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8419 newdap->da_state &= ~DEPCOMPLETE; 8420 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 8421 nextmd = LIST_NEXT(mkdir, md_mkdirs); 8422 if (mkdir->md_diradd != olddap) 8423 continue; 8424 mkdir->md_diradd = newdap; 8425 state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY); 8426 newdap->da_state |= state; 8427 olddap->da_state &= ~state; 8428 if ((olddap->da_state & 8429 (MKDIR_PARENT | MKDIR_BODY)) == 0) 8430 break; 8431 } 8432 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) 8433 panic("merge_diradd: unfound ref"); 8434 } 8435 /* 8436 * Any mkdir related journal items are not safe to be freed until 8437 * the new name is stable. 8438 */ 8439 jwork_move(&newdap->da_jwork, &olddap->da_jwork); 8440 olddap->da_state |= DEPCOMPLETE; 8441 complete_diradd(olddap); 8442 } 8443 8444 /* 8445 * Move the diradd to the pending list when all diradd dependencies are 8446 * complete. 8447 */ 8448 static void 8449 complete_diradd(dap) 8450 struct diradd *dap; 8451 { 8452 struct pagedep *pagedep; 8453 8454 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 8455 if (dap->da_state & DIRCHG) 8456 pagedep = dap->da_previous->dm_pagedep; 8457 else 8458 pagedep = dap->da_pagedep; 8459 LIST_REMOVE(dap, da_pdlist); 8460 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 8461 } 8462 } 8463 8464 /* 8465 * Cancel a diradd when a dirrem overlaps with it. We must cancel the journal 8466 * add entries and conditonally journal the remove. 8467 */ 8468 static void 8469 cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref) 8470 struct diradd *dap; 8471 struct dirrem *dirrem; 8472 struct jremref *jremref; 8473 struct jremref *dotremref; 8474 struct jremref *dotdotremref; 8475 { 8476 struct inodedep *inodedep; 8477 struct jaddref *jaddref; 8478 struct inoref *inoref; 8479 struct mkdir *mkdir; 8480 8481 /* 8482 * If no remove references were allocated we're on a non-journaled 8483 * filesystem and can skip the cancel step. 8484 */ 8485 if (jremref == NULL) { 8486 free_diradd(dap, NULL); 8487 return; 8488 } 8489 /* 8490 * Cancel the primary name an free it if it does not require 8491 * journaling. 8492 */ 8493 if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum, 8494 0, &inodedep) != 0) { 8495 /* Abort the addref that reference this diradd. */ 8496 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 8497 if (inoref->if_list.wk_type != D_JADDREF) 8498 continue; 8499 jaddref = (struct jaddref *)inoref; 8500 if (jaddref->ja_diradd != dap) 8501 continue; 8502 if (cancel_jaddref(jaddref, inodedep, 8503 &dirrem->dm_jwork) == 0) { 8504 free_jremref(jremref); 8505 jremref = NULL; 8506 } 8507 break; 8508 } 8509 } 8510 /* 8511 * Cancel subordinate names and free them if they do not require 8512 * journaling. 8513 */ 8514 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8515 LIST_FOREACH(mkdir, &mkdirlisthd, md_mkdirs) { 8516 if (mkdir->md_diradd != dap) 8517 continue; 8518 if ((jaddref = mkdir->md_jaddref) == NULL) 8519 continue; 8520 mkdir->md_jaddref = NULL; 8521 if (mkdir->md_state & MKDIR_PARENT) { 8522 if (cancel_jaddref(jaddref, NULL, 8523 &dirrem->dm_jwork) == 0) { 8524 free_jremref(dotdotremref); 8525 dotdotremref = NULL; 8526 } 8527 } else { 8528 if (cancel_jaddref(jaddref, inodedep, 8529 &dirrem->dm_jwork) == 0) { 8530 free_jremref(dotremref); 8531 dotremref = NULL; 8532 } 8533 } 8534 } 8535 } 8536 8537 if (jremref) 8538 journal_jremref(dirrem, jremref, inodedep); 8539 if (dotremref) 8540 journal_jremref(dirrem, dotremref, inodedep); 8541 if (dotdotremref) 8542 journal_jremref(dirrem, dotdotremref, NULL); 8543 jwork_move(&dirrem->dm_jwork, &dap->da_jwork); 8544 free_diradd(dap, &dirrem->dm_jwork); 8545 } 8546 8547 /* 8548 * Free a diradd dependency structure. This routine must be called 8549 * with splbio interrupts blocked. 8550 */ 8551 static void 8552 free_diradd(dap, wkhd) 8553 struct diradd *dap; 8554 struct workhead *wkhd; 8555 { 8556 struct dirrem *dirrem; 8557 struct pagedep *pagedep; 8558 struct inodedep *inodedep; 8559 struct mkdir *mkdir, *nextmd; 8560 8561 mtx_assert(&lk, MA_OWNED); 8562 LIST_REMOVE(dap, da_pdlist); 8563 if (dap->da_state & ONWORKLIST) 8564 WORKLIST_REMOVE(&dap->da_list); 8565 if ((dap->da_state & DIRCHG) == 0) { 8566 pagedep = dap->da_pagedep; 8567 } else { 8568 dirrem = dap->da_previous; 8569 pagedep = dirrem->dm_pagedep; 8570 dirrem->dm_dirinum = pagedep->pd_ino; 8571 dirrem->dm_state |= COMPLETE; 8572 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 8573 add_to_worklist(&dirrem->dm_list, 0); 8574 } 8575 if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum, 8576 0, &inodedep) != 0) 8577 if (inodedep->id_mkdiradd == dap) 8578 inodedep->id_mkdiradd = NULL; 8579 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8580 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 8581 nextmd = LIST_NEXT(mkdir, md_mkdirs); 8582 if (mkdir->md_diradd != dap) 8583 continue; 8584 dap->da_state &= 8585 ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); 8586 LIST_REMOVE(mkdir, md_mkdirs); 8587 if (mkdir->md_state & ONWORKLIST) 8588 WORKLIST_REMOVE(&mkdir->md_list); 8589 if (mkdir->md_jaddref != NULL) 8590 panic("free_diradd: Unexpected jaddref"); 8591 WORKITEM_FREE(mkdir, D_MKDIR); 8592 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 8593 break; 8594 } 8595 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) 8596 panic("free_diradd: unfound ref"); 8597 } 8598 if (inodedep) 8599 free_inodedep(inodedep); 8600 /* 8601 * Free any journal segments waiting for the directory write. 8602 */ 8603 handle_jwork(&dap->da_jwork); 8604 WORKITEM_FREE(dap, D_DIRADD); 8605 } 8606 8607 /* 8608 * Directory entry removal dependencies. 8609 * 8610 * When removing a directory entry, the entry's inode pointer must be 8611 * zero'ed on disk before the corresponding inode's link count is decremented 8612 * (possibly freeing the inode for re-use). This dependency is handled by 8613 * updating the directory entry but delaying the inode count reduction until 8614 * after the directory block has been written to disk. After this point, the 8615 * inode count can be decremented whenever it is convenient. 8616 */ 8617 8618 /* 8619 * This routine should be called immediately after removing 8620 * a directory entry. The inode's link count should not be 8621 * decremented by the calling procedure -- the soft updates 8622 * code will do this task when it is safe. 8623 */ 8624 void 8625 softdep_setup_remove(bp, dp, ip, isrmdir) 8626 struct buf *bp; /* buffer containing directory block */ 8627 struct inode *dp; /* inode for the directory being modified */ 8628 struct inode *ip; /* inode for directory entry being removed */ 8629 int isrmdir; /* indicates if doing RMDIR */ 8630 { 8631 struct dirrem *dirrem, *prevdirrem; 8632 struct inodedep *inodedep; 8633 int direct; 8634 8635 /* 8636 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. We want 8637 * newdirrem() to setup the full directory remove which requires 8638 * isrmdir > 1. 8639 */ 8640 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 8641 /* 8642 * Add the dirrem to the inodedep's pending remove list for quick 8643 * discovery later. 8644 */ 8645 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 8646 &inodedep) == 0) 8647 panic("softdep_setup_remove: Lost inodedep."); 8648 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); 8649 dirrem->dm_state |= ONDEPLIST; 8650 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 8651 8652 /* 8653 * If the COMPLETE flag is clear, then there were no active 8654 * entries and we want to roll back to a zeroed entry until 8655 * the new inode is committed to disk. If the COMPLETE flag is 8656 * set then we have deleted an entry that never made it to 8657 * disk. If the entry we deleted resulted from a name change, 8658 * then the old name still resides on disk. We cannot delete 8659 * its inode (returned to us in prevdirrem) until the zeroed 8660 * directory entry gets to disk. The new inode has never been 8661 * referenced on the disk, so can be deleted immediately. 8662 */ 8663 if ((dirrem->dm_state & COMPLETE) == 0) { 8664 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 8665 dm_next); 8666 FREE_LOCK(&lk); 8667 } else { 8668 if (prevdirrem != NULL) 8669 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 8670 prevdirrem, dm_next); 8671 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 8672 direct = LIST_EMPTY(&dirrem->dm_jremrefhd); 8673 FREE_LOCK(&lk); 8674 if (direct) 8675 handle_workitem_remove(dirrem, 0); 8676 } 8677 } 8678 8679 /* 8680 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the 8681 * pd_pendinghd list of a pagedep. 8682 */ 8683 static struct diradd * 8684 diradd_lookup(pagedep, offset) 8685 struct pagedep *pagedep; 8686 int offset; 8687 { 8688 struct diradd *dap; 8689 8690 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 8691 if (dap->da_offset == offset) 8692 return (dap); 8693 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 8694 if (dap->da_offset == offset) 8695 return (dap); 8696 return (NULL); 8697 } 8698 8699 /* 8700 * Search for a .. diradd dependency in a directory that is being removed. 8701 * If the directory was renamed to a new parent we have a diradd rather 8702 * than a mkdir for the .. entry. We need to cancel it now before 8703 * it is found in truncate(). 8704 */ 8705 static struct jremref * 8706 cancel_diradd_dotdot(ip, dirrem, jremref) 8707 struct inode *ip; 8708 struct dirrem *dirrem; 8709 struct jremref *jremref; 8710 { 8711 struct pagedep *pagedep; 8712 struct diradd *dap; 8713 struct worklist *wk; 8714 8715 if (pagedep_lookup(UFSTOVFS(ip->i_ump), NULL, ip->i_number, 0, 0, 8716 &pagedep) == 0) 8717 return (jremref); 8718 dap = diradd_lookup(pagedep, DOTDOT_OFFSET); 8719 if (dap == NULL) 8720 return (jremref); 8721 cancel_diradd(dap, dirrem, jremref, NULL, NULL); 8722 /* 8723 * Mark any journal work as belonging to the parent so it is freed 8724 * with the .. reference. 8725 */ 8726 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) 8727 wk->wk_state |= MKDIR_PARENT; 8728 return (NULL); 8729 } 8730 8731 /* 8732 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to 8733 * replace it with a dirrem/diradd pair as a result of re-parenting a 8734 * directory. This ensures that we don't simultaneously have a mkdir and 8735 * a diradd for the same .. entry. 8736 */ 8737 static struct jremref * 8738 cancel_mkdir_dotdot(ip, dirrem, jremref) 8739 struct inode *ip; 8740 struct dirrem *dirrem; 8741 struct jremref *jremref; 8742 { 8743 struct inodedep *inodedep; 8744 struct jaddref *jaddref; 8745 struct mkdir *mkdir; 8746 struct diradd *dap; 8747 8748 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 8749 &inodedep) == 0) 8750 return (jremref); 8751 dap = inodedep->id_mkdiradd; 8752 if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0) 8753 return (jremref); 8754 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; 8755 mkdir = LIST_NEXT(mkdir, md_mkdirs)) 8756 if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT) 8757 break; 8758 if (mkdir == NULL) 8759 panic("cancel_mkdir_dotdot: Unable to find mkdir\n"); 8760 if ((jaddref = mkdir->md_jaddref) != NULL) { 8761 mkdir->md_jaddref = NULL; 8762 jaddref->ja_state &= ~MKDIR_PARENT; 8763 if (inodedep_lookup(UFSTOVFS(ip->i_ump), jaddref->ja_ino, 0, 8764 &inodedep) == 0) 8765 panic("cancel_mkdir_dotdot: Lost parent inodedep"); 8766 if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) { 8767 journal_jremref(dirrem, jremref, inodedep); 8768 jremref = NULL; 8769 } 8770 } 8771 if (mkdir->md_state & ONWORKLIST) 8772 WORKLIST_REMOVE(&mkdir->md_list); 8773 mkdir->md_state |= ALLCOMPLETE; 8774 complete_mkdir(mkdir); 8775 return (jremref); 8776 } 8777 8778 static void 8779 journal_jremref(dirrem, jremref, inodedep) 8780 struct dirrem *dirrem; 8781 struct jremref *jremref; 8782 struct inodedep *inodedep; 8783 { 8784 8785 if (inodedep == NULL) 8786 if (inodedep_lookup(jremref->jr_list.wk_mp, 8787 jremref->jr_ref.if_ino, 0, &inodedep) == 0) 8788 panic("journal_jremref: Lost inodedep"); 8789 LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps); 8790 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); 8791 add_to_journal(&jremref->jr_list); 8792 } 8793 8794 static void 8795 dirrem_journal(dirrem, jremref, dotremref, dotdotremref) 8796 struct dirrem *dirrem; 8797 struct jremref *jremref; 8798 struct jremref *dotremref; 8799 struct jremref *dotdotremref; 8800 { 8801 struct inodedep *inodedep; 8802 8803 8804 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, 8805 &inodedep) == 0) 8806 panic("dirrem_journal: Lost inodedep"); 8807 journal_jremref(dirrem, jremref, inodedep); 8808 if (dotremref) 8809 journal_jremref(dirrem, dotremref, inodedep); 8810 if (dotdotremref) 8811 journal_jremref(dirrem, dotdotremref, NULL); 8812 } 8813 8814 /* 8815 * Allocate a new dirrem if appropriate and return it along with 8816 * its associated pagedep. Called without a lock, returns with lock. 8817 */ 8818 static struct dirrem * 8819 newdirrem(bp, dp, ip, isrmdir, prevdirremp) 8820 struct buf *bp; /* buffer containing directory block */ 8821 struct inode *dp; /* inode for the directory being modified */ 8822 struct inode *ip; /* inode for directory entry being removed */ 8823 int isrmdir; /* indicates if doing RMDIR */ 8824 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 8825 { 8826 int offset; 8827 ufs_lbn_t lbn; 8828 struct diradd *dap; 8829 struct dirrem *dirrem; 8830 struct pagedep *pagedep; 8831 struct jremref *jremref; 8832 struct jremref *dotremref; 8833 struct jremref *dotdotremref; 8834 struct vnode *dvp; 8835 8836 /* 8837 * Whiteouts have no deletion dependencies. 8838 */ 8839 if (ip == NULL) 8840 panic("newdirrem: whiteout"); 8841 dvp = ITOV(dp); 8842 /* 8843 * If we are over our limit, try to improve the situation. 8844 * Limiting the number of dirrem structures will also limit 8845 * the number of freefile and freeblks structures. 8846 */ 8847 ACQUIRE_LOCK(&lk); 8848 if (!IS_SNAPSHOT(ip) && dep_current[D_DIRREM] > max_softdeps / 2) 8849 (void) request_cleanup(ITOV(dp)->v_mount, FLUSH_BLOCKS); 8850 FREE_LOCK(&lk); 8851 dirrem = malloc(sizeof(struct dirrem), 8852 M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO); 8853 workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount); 8854 LIST_INIT(&dirrem->dm_jremrefhd); 8855 LIST_INIT(&dirrem->dm_jwork); 8856 dirrem->dm_state = isrmdir ? RMDIR : 0; 8857 dirrem->dm_oldinum = ip->i_number; 8858 *prevdirremp = NULL; 8859 /* 8860 * Allocate remove reference structures to track journal write 8861 * dependencies. We will always have one for the link and 8862 * when doing directories we will always have one more for dot. 8863 * When renaming a directory we skip the dotdot link change so 8864 * this is not needed. 8865 */ 8866 jremref = dotremref = dotdotremref = NULL; 8867 if (DOINGSUJ(dvp)) { 8868 if (isrmdir) { 8869 jremref = newjremref(dirrem, dp, ip, dp->i_offset, 8870 ip->i_effnlink + 2); 8871 dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET, 8872 ip->i_effnlink + 1); 8873 dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET, 8874 dp->i_effnlink + 1); 8875 dotdotremref->jr_state |= MKDIR_PARENT; 8876 } else 8877 jremref = newjremref(dirrem, dp, ip, dp->i_offset, 8878 ip->i_effnlink + 1); 8879 } 8880 ACQUIRE_LOCK(&lk); 8881 lbn = lblkno(dp->i_fs, dp->i_offset); 8882 offset = blkoff(dp->i_fs, dp->i_offset); 8883 pagedep_lookup(UFSTOVFS(dp->i_ump), bp, dp->i_number, lbn, DEPALLOC, 8884 &pagedep); 8885 dirrem->dm_pagedep = pagedep; 8886 dirrem->dm_offset = offset; 8887 /* 8888 * If we're renaming a .. link to a new directory, cancel any 8889 * existing MKDIR_PARENT mkdir. If it has already been canceled 8890 * the jremref is preserved for any potential diradd in this 8891 * location. This can not coincide with a rmdir. 8892 */ 8893 if (dp->i_offset == DOTDOT_OFFSET) { 8894 if (isrmdir) 8895 panic("newdirrem: .. directory change during remove?"); 8896 jremref = cancel_mkdir_dotdot(dp, dirrem, jremref); 8897 } 8898 /* 8899 * If we're removing a directory search for the .. dependency now and 8900 * cancel it. Any pending journal work will be added to the dirrem 8901 * to be completed when the workitem remove completes. 8902 */ 8903 if (isrmdir) 8904 dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref); 8905 /* 8906 * Check for a diradd dependency for the same directory entry. 8907 * If present, then both dependencies become obsolete and can 8908 * be de-allocated. 8909 */ 8910 dap = diradd_lookup(pagedep, offset); 8911 if (dap == NULL) { 8912 /* 8913 * Link the jremref structures into the dirrem so they are 8914 * written prior to the pagedep. 8915 */ 8916 if (jremref) 8917 dirrem_journal(dirrem, jremref, dotremref, 8918 dotdotremref); 8919 return (dirrem); 8920 } 8921 /* 8922 * Must be ATTACHED at this point. 8923 */ 8924 if ((dap->da_state & ATTACHED) == 0) 8925 panic("newdirrem: not ATTACHED"); 8926 if (dap->da_newinum != ip->i_number) 8927 panic("newdirrem: inum %ju should be %ju", 8928 (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum); 8929 /* 8930 * If we are deleting a changed name that never made it to disk, 8931 * then return the dirrem describing the previous inode (which 8932 * represents the inode currently referenced from this entry on disk). 8933 */ 8934 if ((dap->da_state & DIRCHG) != 0) { 8935 *prevdirremp = dap->da_previous; 8936 dap->da_state &= ~DIRCHG; 8937 dap->da_pagedep = pagedep; 8938 } 8939 /* 8940 * We are deleting an entry that never made it to disk. 8941 * Mark it COMPLETE so we can delete its inode immediately. 8942 */ 8943 dirrem->dm_state |= COMPLETE; 8944 cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref); 8945 #ifdef SUJ_DEBUG 8946 if (isrmdir == 0) { 8947 struct worklist *wk; 8948 8949 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) 8950 if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT)) 8951 panic("bad wk %p (0x%X)\n", wk, wk->wk_state); 8952 } 8953 #endif 8954 8955 return (dirrem); 8956 } 8957 8958 /* 8959 * Directory entry change dependencies. 8960 * 8961 * Changing an existing directory entry requires that an add operation 8962 * be completed first followed by a deletion. The semantics for the addition 8963 * are identical to the description of adding a new entry above except 8964 * that the rollback is to the old inode number rather than zero. Once 8965 * the addition dependency is completed, the removal is done as described 8966 * in the removal routine above. 8967 */ 8968 8969 /* 8970 * This routine should be called immediately after changing 8971 * a directory entry. The inode's link count should not be 8972 * decremented by the calling procedure -- the soft updates 8973 * code will perform this task when it is safe. 8974 */ 8975 void 8976 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 8977 struct buf *bp; /* buffer containing directory block */ 8978 struct inode *dp; /* inode for the directory being modified */ 8979 struct inode *ip; /* inode for directory entry being removed */ 8980 ino_t newinum; /* new inode number for changed entry */ 8981 int isrmdir; /* indicates if doing RMDIR */ 8982 { 8983 int offset; 8984 struct diradd *dap = NULL; 8985 struct dirrem *dirrem, *prevdirrem; 8986 struct pagedep *pagedep; 8987 struct inodedep *inodedep; 8988 struct jaddref *jaddref; 8989 struct mount *mp; 8990 8991 offset = blkoff(dp->i_fs, dp->i_offset); 8992 mp = UFSTOVFS(dp->i_ump); 8993 8994 /* 8995 * Whiteouts do not need diradd dependencies. 8996 */ 8997 if (newinum != WINO) { 8998 dap = malloc(sizeof(struct diradd), 8999 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); 9000 workitem_alloc(&dap->da_list, D_DIRADD, mp); 9001 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 9002 dap->da_offset = offset; 9003 dap->da_newinum = newinum; 9004 LIST_INIT(&dap->da_jwork); 9005 } 9006 9007 /* 9008 * Allocate a new dirrem and ACQUIRE_LOCK. 9009 */ 9010 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 9011 pagedep = dirrem->dm_pagedep; 9012 /* 9013 * The possible values for isrmdir: 9014 * 0 - non-directory file rename 9015 * 1 - directory rename within same directory 9016 * inum - directory rename to new directory of given inode number 9017 * When renaming to a new directory, we are both deleting and 9018 * creating a new directory entry, so the link count on the new 9019 * directory should not change. Thus we do not need the followup 9020 * dirrem which is usually done in handle_workitem_remove. We set 9021 * the DIRCHG flag to tell handle_workitem_remove to skip the 9022 * followup dirrem. 9023 */ 9024 if (isrmdir > 1) 9025 dirrem->dm_state |= DIRCHG; 9026 9027 /* 9028 * Whiteouts have no additional dependencies, 9029 * so just put the dirrem on the correct list. 9030 */ 9031 if (newinum == WINO) { 9032 if ((dirrem->dm_state & COMPLETE) == 0) { 9033 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 9034 dm_next); 9035 } else { 9036 dirrem->dm_dirinum = pagedep->pd_ino; 9037 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 9038 add_to_worklist(&dirrem->dm_list, 0); 9039 } 9040 FREE_LOCK(&lk); 9041 return; 9042 } 9043 /* 9044 * Add the dirrem to the inodedep's pending remove list for quick 9045 * discovery later. A valid nlinkdelta ensures that this lookup 9046 * will not fail. 9047 */ 9048 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 9049 panic("softdep_setup_directory_change: Lost inodedep."); 9050 dirrem->dm_state |= ONDEPLIST; 9051 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9052 9053 /* 9054 * If the COMPLETE flag is clear, then there were no active 9055 * entries and we want to roll back to the previous inode until 9056 * the new inode is committed to disk. If the COMPLETE flag is 9057 * set, then we have deleted an entry that never made it to disk. 9058 * If the entry we deleted resulted from a name change, then the old 9059 * inode reference still resides on disk. Any rollback that we do 9060 * needs to be to that old inode (returned to us in prevdirrem). If 9061 * the entry we deleted resulted from a create, then there is 9062 * no entry on the disk, so we want to roll back to zero rather 9063 * than the uncommitted inode. In either of the COMPLETE cases we 9064 * want to immediately free the unwritten and unreferenced inode. 9065 */ 9066 if ((dirrem->dm_state & COMPLETE) == 0) { 9067 dap->da_previous = dirrem; 9068 } else { 9069 if (prevdirrem != NULL) { 9070 dap->da_previous = prevdirrem; 9071 } else { 9072 dap->da_state &= ~DIRCHG; 9073 dap->da_pagedep = pagedep; 9074 } 9075 dirrem->dm_dirinum = pagedep->pd_ino; 9076 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 9077 add_to_worklist(&dirrem->dm_list, 0); 9078 } 9079 /* 9080 * Lookup the jaddref for this journal entry. We must finish 9081 * initializing it and make the diradd write dependent on it. 9082 * If we're not journaling, put it on the id_bufwait list if the 9083 * inode is not yet written. If it is written, do the post-inode 9084 * write processing to put it on the id_pendinghd list. 9085 */ 9086 inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep); 9087 if (MOUNTEDSUJ(mp)) { 9088 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 9089 inoreflst); 9090 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 9091 ("softdep_setup_directory_change: bad jaddref %p", 9092 jaddref)); 9093 jaddref->ja_diroff = dp->i_offset; 9094 jaddref->ja_diradd = dap; 9095 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 9096 dap, da_pdlist); 9097 add_to_journal(&jaddref->ja_list); 9098 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 9099 dap->da_state |= COMPLETE; 9100 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 9101 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 9102 } else { 9103 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 9104 dap, da_pdlist); 9105 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 9106 } 9107 /* 9108 * If we're making a new name for a directory that has not been 9109 * committed when need to move the dot and dotdot references to 9110 * this new name. 9111 */ 9112 if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET) 9113 merge_diradd(inodedep, dap); 9114 FREE_LOCK(&lk); 9115 } 9116 9117 /* 9118 * Called whenever the link count on an inode is changed. 9119 * It creates an inode dependency so that the new reference(s) 9120 * to the inode cannot be committed to disk until the updated 9121 * inode has been written. 9122 */ 9123 void 9124 softdep_change_linkcnt(ip) 9125 struct inode *ip; /* the inode with the increased link count */ 9126 { 9127 struct inodedep *inodedep; 9128 int dflags; 9129 9130 ACQUIRE_LOCK(&lk); 9131 dflags = DEPALLOC; 9132 if (IS_SNAPSHOT(ip)) 9133 dflags |= NODELAY; 9134 inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep); 9135 if (ip->i_nlink < ip->i_effnlink) 9136 panic("softdep_change_linkcnt: bad delta"); 9137 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9138 FREE_LOCK(&lk); 9139 } 9140 9141 /* 9142 * Attach a sbdep dependency to the superblock buf so that we can keep 9143 * track of the head of the linked list of referenced but unlinked inodes. 9144 */ 9145 void 9146 softdep_setup_sbupdate(ump, fs, bp) 9147 struct ufsmount *ump; 9148 struct fs *fs; 9149 struct buf *bp; 9150 { 9151 struct sbdep *sbdep; 9152 struct worklist *wk; 9153 9154 if (MOUNTEDSUJ(UFSTOVFS(ump)) == 0) 9155 return; 9156 LIST_FOREACH(wk, &bp->b_dep, wk_list) 9157 if (wk->wk_type == D_SBDEP) 9158 break; 9159 if (wk != NULL) 9160 return; 9161 sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS); 9162 workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump)); 9163 sbdep->sb_fs = fs; 9164 sbdep->sb_ump = ump; 9165 ACQUIRE_LOCK(&lk); 9166 WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list); 9167 FREE_LOCK(&lk); 9168 } 9169 9170 /* 9171 * Return the first unlinked inodedep which is ready to be the head of the 9172 * list. The inodedep and all those after it must have valid next pointers. 9173 */ 9174 static struct inodedep * 9175 first_unlinked_inodedep(ump) 9176 struct ufsmount *ump; 9177 { 9178 struct inodedep *inodedep; 9179 struct inodedep *idp; 9180 9181 mtx_assert(&lk, MA_OWNED); 9182 for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst); 9183 inodedep; inodedep = idp) { 9184 if ((inodedep->id_state & UNLINKNEXT) == 0) 9185 return (NULL); 9186 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9187 if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0) 9188 break; 9189 if ((inodedep->id_state & UNLINKPREV) == 0) 9190 break; 9191 } 9192 return (inodedep); 9193 } 9194 9195 /* 9196 * Set the sujfree unlinked head pointer prior to writing a superblock. 9197 */ 9198 static void 9199 initiate_write_sbdep(sbdep) 9200 struct sbdep *sbdep; 9201 { 9202 struct inodedep *inodedep; 9203 struct fs *bpfs; 9204 struct fs *fs; 9205 9206 bpfs = sbdep->sb_fs; 9207 fs = sbdep->sb_ump->um_fs; 9208 inodedep = first_unlinked_inodedep(sbdep->sb_ump); 9209 if (inodedep) { 9210 fs->fs_sujfree = inodedep->id_ino; 9211 inodedep->id_state |= UNLINKPREV; 9212 } else 9213 fs->fs_sujfree = 0; 9214 bpfs->fs_sujfree = fs->fs_sujfree; 9215 } 9216 9217 /* 9218 * After a superblock is written determine whether it must be written again 9219 * due to a changing unlinked list head. 9220 */ 9221 static int 9222 handle_written_sbdep(sbdep, bp) 9223 struct sbdep *sbdep; 9224 struct buf *bp; 9225 { 9226 struct inodedep *inodedep; 9227 struct mount *mp; 9228 struct fs *fs; 9229 9230 mtx_assert(&lk, MA_OWNED); 9231 fs = sbdep->sb_fs; 9232 mp = UFSTOVFS(sbdep->sb_ump); 9233 /* 9234 * If the superblock doesn't match the in-memory list start over. 9235 */ 9236 inodedep = first_unlinked_inodedep(sbdep->sb_ump); 9237 if ((inodedep && fs->fs_sujfree != inodedep->id_ino) || 9238 (inodedep == NULL && fs->fs_sujfree != 0)) { 9239 bdirty(bp); 9240 return (1); 9241 } 9242 WORKITEM_FREE(sbdep, D_SBDEP); 9243 if (fs->fs_sujfree == 0) 9244 return (0); 9245 /* 9246 * Now that we have a record of this inode in stable store allow it 9247 * to be written to free up pending work. Inodes may see a lot of 9248 * write activity after they are unlinked which we must not hold up. 9249 */ 9250 for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { 9251 if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS) 9252 panic("handle_written_sbdep: Bad inodedep %p (0x%X)", 9253 inodedep, inodedep->id_state); 9254 if (inodedep->id_state & UNLINKONLIST) 9255 break; 9256 inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST; 9257 } 9258 9259 return (0); 9260 } 9261 9262 /* 9263 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list. 9264 */ 9265 static void 9266 unlinked_inodedep(mp, inodedep) 9267 struct mount *mp; 9268 struct inodedep *inodedep; 9269 { 9270 struct ufsmount *ump; 9271 9272 mtx_assert(&lk, MA_OWNED); 9273 if (MOUNTEDSUJ(mp) == 0) 9274 return; 9275 ump = VFSTOUFS(mp); 9276 ump->um_fs->fs_fmod = 1; 9277 if (inodedep->id_state & UNLINKED) 9278 panic("unlinked_inodedep: %p already unlinked\n", inodedep); 9279 inodedep->id_state |= UNLINKED; 9280 TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked); 9281 } 9282 9283 /* 9284 * Remove an inodedep from the unlinked inodedep list. This may require 9285 * disk writes if the inode has made it that far. 9286 */ 9287 static void 9288 clear_unlinked_inodedep(inodedep) 9289 struct inodedep *inodedep; 9290 { 9291 struct ufsmount *ump; 9292 struct inodedep *idp; 9293 struct inodedep *idn; 9294 struct fs *fs; 9295 struct buf *bp; 9296 ino_t ino; 9297 ino_t nino; 9298 ino_t pino; 9299 int error; 9300 9301 ump = VFSTOUFS(inodedep->id_list.wk_mp); 9302 fs = ump->um_fs; 9303 ino = inodedep->id_ino; 9304 error = 0; 9305 for (;;) { 9306 mtx_assert(&lk, MA_OWNED); 9307 KASSERT((inodedep->id_state & UNLINKED) != 0, 9308 ("clear_unlinked_inodedep: inodedep %p not unlinked", 9309 inodedep)); 9310 /* 9311 * If nothing has yet been written simply remove us from 9312 * the in memory list and return. This is the most common 9313 * case where handle_workitem_remove() loses the final 9314 * reference. 9315 */ 9316 if ((inodedep->id_state & UNLINKLINKS) == 0) 9317 break; 9318 /* 9319 * If we have a NEXT pointer and no PREV pointer we can simply 9320 * clear NEXT's PREV and remove ourselves from the list. Be 9321 * careful not to clear PREV if the superblock points at 9322 * next as well. 9323 */ 9324 idn = TAILQ_NEXT(inodedep, id_unlinked); 9325 if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) { 9326 if (idn && fs->fs_sujfree != idn->id_ino) 9327 idn->id_state &= ~UNLINKPREV; 9328 break; 9329 } 9330 /* 9331 * Here we have an inodedep which is actually linked into 9332 * the list. We must remove it by forcing a write to the 9333 * link before us, whether it be the superblock or an inode. 9334 * Unfortunately the list may change while we're waiting 9335 * on the buf lock for either resource so we must loop until 9336 * we lock the right one. If both the superblock and an 9337 * inode point to this inode we must clear the inode first 9338 * followed by the superblock. 9339 */ 9340 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9341 pino = 0; 9342 if (idp && (idp->id_state & UNLINKNEXT)) 9343 pino = idp->id_ino; 9344 FREE_LOCK(&lk); 9345 if (pino == 0) 9346 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 9347 (int)fs->fs_sbsize, 0, 0, 0); 9348 else 9349 error = bread(ump->um_devvp, 9350 fsbtodb(fs, ino_to_fsba(fs, pino)), 9351 (int)fs->fs_bsize, NOCRED, &bp); 9352 ACQUIRE_LOCK(&lk); 9353 if (error) 9354 break; 9355 /* If the list has changed restart the loop. */ 9356 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9357 nino = 0; 9358 if (idp && (idp->id_state & UNLINKNEXT)) 9359 nino = idp->id_ino; 9360 if (nino != pino || 9361 (inodedep->id_state & UNLINKPREV) != UNLINKPREV) { 9362 FREE_LOCK(&lk); 9363 brelse(bp); 9364 ACQUIRE_LOCK(&lk); 9365 continue; 9366 } 9367 nino = 0; 9368 idn = TAILQ_NEXT(inodedep, id_unlinked); 9369 if (idn) 9370 nino = idn->id_ino; 9371 /* 9372 * Remove us from the in memory list. After this we cannot 9373 * access the inodedep. 9374 */ 9375 KASSERT((inodedep->id_state & UNLINKED) != 0, 9376 ("clear_unlinked_inodedep: inodedep %p not unlinked", 9377 inodedep)); 9378 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); 9379 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); 9380 FREE_LOCK(&lk); 9381 /* 9382 * The predecessor's next pointer is manually updated here 9383 * so that the NEXT flag is never cleared for an element 9384 * that is in the list. 9385 */ 9386 if (pino == 0) { 9387 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 9388 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 9389 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, 9390 bp); 9391 } else if (fs->fs_magic == FS_UFS1_MAGIC) 9392 ((struct ufs1_dinode *)bp->b_data + 9393 ino_to_fsbo(fs, pino))->di_freelink = nino; 9394 else 9395 ((struct ufs2_dinode *)bp->b_data + 9396 ino_to_fsbo(fs, pino))->di_freelink = nino; 9397 /* 9398 * If the bwrite fails we have no recourse to recover. The 9399 * filesystem is corrupted already. 9400 */ 9401 bwrite(bp); 9402 ACQUIRE_LOCK(&lk); 9403 /* 9404 * If the superblock pointer still needs to be cleared force 9405 * a write here. 9406 */ 9407 if (fs->fs_sujfree == ino) { 9408 FREE_LOCK(&lk); 9409 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 9410 (int)fs->fs_sbsize, 0, 0, 0); 9411 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 9412 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 9413 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, 9414 bp); 9415 bwrite(bp); 9416 ACQUIRE_LOCK(&lk); 9417 } 9418 9419 if (fs->fs_sujfree != ino) 9420 return; 9421 panic("clear_unlinked_inodedep: Failed to clear free head"); 9422 } 9423 if (inodedep->id_ino == fs->fs_sujfree) 9424 panic("clear_unlinked_inodedep: Freeing head of free list"); 9425 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); 9426 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); 9427 return; 9428 } 9429 9430 /* 9431 * This workitem decrements the inode's link count. 9432 * If the link count reaches zero, the file is removed. 9433 */ 9434 static int 9435 handle_workitem_remove(dirrem, flags) 9436 struct dirrem *dirrem; 9437 int flags; 9438 { 9439 struct inodedep *inodedep; 9440 struct workhead dotdotwk; 9441 struct worklist *wk; 9442 struct ufsmount *ump; 9443 struct mount *mp; 9444 struct vnode *vp; 9445 struct inode *ip; 9446 ino_t oldinum; 9447 9448 if (dirrem->dm_state & ONWORKLIST) 9449 panic("handle_workitem_remove: dirrem %p still on worklist", 9450 dirrem); 9451 oldinum = dirrem->dm_oldinum; 9452 mp = dirrem->dm_list.wk_mp; 9453 ump = VFSTOUFS(mp); 9454 flags |= LK_EXCLUSIVE; 9455 if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0) 9456 return (EBUSY); 9457 ip = VTOI(vp); 9458 ACQUIRE_LOCK(&lk); 9459 if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0) 9460 panic("handle_workitem_remove: lost inodedep"); 9461 if (dirrem->dm_state & ONDEPLIST) 9462 LIST_REMOVE(dirrem, dm_inonext); 9463 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), 9464 ("handle_workitem_remove: Journal entries not written.")); 9465 9466 /* 9467 * Move all dependencies waiting on the remove to complete 9468 * from the dirrem to the inode inowait list to be completed 9469 * after the inode has been updated and written to disk. Any 9470 * marked MKDIR_PARENT are saved to be completed when the .. ref 9471 * is removed. 9472 */ 9473 LIST_INIT(&dotdotwk); 9474 while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) { 9475 WORKLIST_REMOVE(wk); 9476 if (wk->wk_state & MKDIR_PARENT) { 9477 wk->wk_state &= ~MKDIR_PARENT; 9478 WORKLIST_INSERT(&dotdotwk, wk); 9479 continue; 9480 } 9481 WORKLIST_INSERT(&inodedep->id_inowait, wk); 9482 } 9483 LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list); 9484 /* 9485 * Normal file deletion. 9486 */ 9487 if ((dirrem->dm_state & RMDIR) == 0) { 9488 ip->i_nlink--; 9489 DIP_SET(ip, i_nlink, ip->i_nlink); 9490 ip->i_flag |= IN_CHANGE; 9491 if (ip->i_nlink < ip->i_effnlink) 9492 panic("handle_workitem_remove: bad file delta"); 9493 if (ip->i_nlink == 0) 9494 unlinked_inodedep(mp, inodedep); 9495 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9496 KASSERT(LIST_EMPTY(&dirrem->dm_jwork), 9497 ("handle_workitem_remove: worklist not empty. %s", 9498 TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type))); 9499 WORKITEM_FREE(dirrem, D_DIRREM); 9500 FREE_LOCK(&lk); 9501 goto out; 9502 } 9503 /* 9504 * Directory deletion. Decrement reference count for both the 9505 * just deleted parent directory entry and the reference for ".". 9506 * Arrange to have the reference count on the parent decremented 9507 * to account for the loss of "..". 9508 */ 9509 ip->i_nlink -= 2; 9510 DIP_SET(ip, i_nlink, ip->i_nlink); 9511 ip->i_flag |= IN_CHANGE; 9512 if (ip->i_nlink < ip->i_effnlink) 9513 panic("handle_workitem_remove: bad dir delta"); 9514 if (ip->i_nlink == 0) 9515 unlinked_inodedep(mp, inodedep); 9516 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9517 /* 9518 * Rename a directory to a new parent. Since, we are both deleting 9519 * and creating a new directory entry, the link count on the new 9520 * directory should not change. Thus we skip the followup dirrem. 9521 */ 9522 if (dirrem->dm_state & DIRCHG) { 9523 KASSERT(LIST_EMPTY(&dirrem->dm_jwork), 9524 ("handle_workitem_remove: DIRCHG and worklist not empty.")); 9525 WORKITEM_FREE(dirrem, D_DIRREM); 9526 FREE_LOCK(&lk); 9527 goto out; 9528 } 9529 dirrem->dm_state = ONDEPLIST; 9530 dirrem->dm_oldinum = dirrem->dm_dirinum; 9531 /* 9532 * Place the dirrem on the parent's diremhd list. 9533 */ 9534 if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0) 9535 panic("handle_workitem_remove: lost dir inodedep"); 9536 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9537 /* 9538 * If the allocated inode has never been written to disk, then 9539 * the on-disk inode is zero'ed and we can remove the file 9540 * immediately. When journaling if the inode has been marked 9541 * unlinked and not DEPCOMPLETE we know it can never be written. 9542 */ 9543 inodedep_lookup(mp, oldinum, 0, &inodedep); 9544 if (inodedep == NULL || 9545 (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED || 9546 check_inode_unwritten(inodedep)) { 9547 FREE_LOCK(&lk); 9548 vput(vp); 9549 return handle_workitem_remove(dirrem, flags); 9550 } 9551 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 9552 FREE_LOCK(&lk); 9553 ip->i_flag |= IN_CHANGE; 9554 out: 9555 ffs_update(vp, 0); 9556 vput(vp); 9557 return (0); 9558 } 9559 9560 /* 9561 * Inode de-allocation dependencies. 9562 * 9563 * When an inode's link count is reduced to zero, it can be de-allocated. We 9564 * found it convenient to postpone de-allocation until after the inode is 9565 * written to disk with its new link count (zero). At this point, all of the 9566 * on-disk inode's block pointers are nullified and, with careful dependency 9567 * list ordering, all dependencies related to the inode will be satisfied and 9568 * the corresponding dependency structures de-allocated. So, if/when the 9569 * inode is reused, there will be no mixing of old dependencies with new 9570 * ones. This artificial dependency is set up by the block de-allocation 9571 * procedure above (softdep_setup_freeblocks) and completed by the 9572 * following procedure. 9573 */ 9574 static void 9575 handle_workitem_freefile(freefile) 9576 struct freefile *freefile; 9577 { 9578 struct workhead wkhd; 9579 struct fs *fs; 9580 struct inodedep *idp; 9581 struct ufsmount *ump; 9582 int error; 9583 9584 ump = VFSTOUFS(freefile->fx_list.wk_mp); 9585 fs = ump->um_fs; 9586 #ifdef DEBUG 9587 ACQUIRE_LOCK(&lk); 9588 error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp); 9589 FREE_LOCK(&lk); 9590 if (error) 9591 panic("handle_workitem_freefile: inodedep %p survived", idp); 9592 #endif 9593 UFS_LOCK(ump); 9594 fs->fs_pendinginodes -= 1; 9595 UFS_UNLOCK(ump); 9596 LIST_INIT(&wkhd); 9597 LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list); 9598 if ((error = ffs_freefile(ump, fs, freefile->fx_devvp, 9599 freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0) 9600 softdep_error("handle_workitem_freefile", error); 9601 ACQUIRE_LOCK(&lk); 9602 WORKITEM_FREE(freefile, D_FREEFILE); 9603 FREE_LOCK(&lk); 9604 } 9605 9606 9607 /* 9608 * Helper function which unlinks marker element from work list and returns 9609 * the next element on the list. 9610 */ 9611 static __inline struct worklist * 9612 markernext(struct worklist *marker) 9613 { 9614 struct worklist *next; 9615 9616 next = LIST_NEXT(marker, wk_list); 9617 LIST_REMOVE(marker, wk_list); 9618 return next; 9619 } 9620 9621 /* 9622 * Disk writes. 9623 * 9624 * The dependency structures constructed above are most actively used when file 9625 * system blocks are written to disk. No constraints are placed on when a 9626 * block can be written, but unsatisfied update dependencies are made safe by 9627 * modifying (or replacing) the source memory for the duration of the disk 9628 * write. When the disk write completes, the memory block is again brought 9629 * up-to-date. 9630 * 9631 * In-core inode structure reclamation. 9632 * 9633 * Because there are a finite number of "in-core" inode structures, they are 9634 * reused regularly. By transferring all inode-related dependencies to the 9635 * in-memory inode block and indexing them separately (via "inodedep"s), we 9636 * can allow "in-core" inode structures to be reused at any time and avoid 9637 * any increase in contention. 9638 * 9639 * Called just before entering the device driver to initiate a new disk I/O. 9640 * The buffer must be locked, thus, no I/O completion operations can occur 9641 * while we are manipulating its associated dependencies. 9642 */ 9643 static void 9644 softdep_disk_io_initiation(bp) 9645 struct buf *bp; /* structure describing disk write to occur */ 9646 { 9647 struct worklist *wk; 9648 struct worklist marker; 9649 struct inodedep *inodedep; 9650 struct freeblks *freeblks; 9651 struct jblkdep *jblkdep; 9652 struct newblk *newblk; 9653 9654 /* 9655 * We only care about write operations. There should never 9656 * be dependencies for reads. 9657 */ 9658 if (bp->b_iocmd != BIO_WRITE) 9659 panic("softdep_disk_io_initiation: not write"); 9660 9661 if (bp->b_vflags & BV_BKGRDINPROG) 9662 panic("softdep_disk_io_initiation: Writing buffer with " 9663 "background write in progress: %p", bp); 9664 9665 marker.wk_type = D_LAST + 1; /* Not a normal workitem */ 9666 PHOLD(curproc); /* Don't swap out kernel stack */ 9667 9668 ACQUIRE_LOCK(&lk); 9669 /* 9670 * Do any necessary pre-I/O processing. 9671 */ 9672 for (wk = LIST_FIRST(&bp->b_dep); wk != NULL; 9673 wk = markernext(&marker)) { 9674 LIST_INSERT_AFTER(wk, &marker, wk_list); 9675 switch (wk->wk_type) { 9676 9677 case D_PAGEDEP: 9678 initiate_write_filepage(WK_PAGEDEP(wk), bp); 9679 continue; 9680 9681 case D_INODEDEP: 9682 inodedep = WK_INODEDEP(wk); 9683 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) 9684 initiate_write_inodeblock_ufs1(inodedep, bp); 9685 else 9686 initiate_write_inodeblock_ufs2(inodedep, bp); 9687 continue; 9688 9689 case D_INDIRDEP: 9690 initiate_write_indirdep(WK_INDIRDEP(wk), bp); 9691 continue; 9692 9693 case D_BMSAFEMAP: 9694 initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp); 9695 continue; 9696 9697 case D_JSEG: 9698 WK_JSEG(wk)->js_buf = NULL; 9699 continue; 9700 9701 case D_FREEBLKS: 9702 freeblks = WK_FREEBLKS(wk); 9703 jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd); 9704 /* 9705 * We have to wait for the freeblks to be journaled 9706 * before we can write an inodeblock with updated 9707 * pointers. Be careful to arrange the marker so 9708 * we revisit the freeblks if it's not removed by 9709 * the first jwait(). 9710 */ 9711 if (jblkdep != NULL) { 9712 LIST_REMOVE(&marker, wk_list); 9713 LIST_INSERT_BEFORE(wk, &marker, wk_list); 9714 jwait(&jblkdep->jb_list, MNT_WAIT); 9715 } 9716 continue; 9717 case D_ALLOCDIRECT: 9718 case D_ALLOCINDIR: 9719 /* 9720 * We have to wait for the jnewblk to be journaled 9721 * before we can write to a block if the contents 9722 * may be confused with an earlier file's indirect 9723 * at recovery time. Handle the marker as described 9724 * above. 9725 */ 9726 newblk = WK_NEWBLK(wk); 9727 if (newblk->nb_jnewblk != NULL && 9728 indirblk_lookup(newblk->nb_list.wk_mp, 9729 newblk->nb_newblkno)) { 9730 LIST_REMOVE(&marker, wk_list); 9731 LIST_INSERT_BEFORE(wk, &marker, wk_list); 9732 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 9733 } 9734 continue; 9735 9736 case D_SBDEP: 9737 initiate_write_sbdep(WK_SBDEP(wk)); 9738 continue; 9739 9740 case D_MKDIR: 9741 case D_FREEWORK: 9742 case D_FREEDEP: 9743 case D_JSEGDEP: 9744 continue; 9745 9746 default: 9747 panic("handle_disk_io_initiation: Unexpected type %s", 9748 TYPENAME(wk->wk_type)); 9749 /* NOTREACHED */ 9750 } 9751 } 9752 FREE_LOCK(&lk); 9753 PRELE(curproc); /* Allow swapout of kernel stack */ 9754 } 9755 9756 /* 9757 * Called from within the procedure above to deal with unsatisfied 9758 * allocation dependencies in a directory. The buffer must be locked, 9759 * thus, no I/O completion operations can occur while we are 9760 * manipulating its associated dependencies. 9761 */ 9762 static void 9763 initiate_write_filepage(pagedep, bp) 9764 struct pagedep *pagedep; 9765 struct buf *bp; 9766 { 9767 struct jremref *jremref; 9768 struct jmvref *jmvref; 9769 struct dirrem *dirrem; 9770 struct diradd *dap; 9771 struct direct *ep; 9772 int i; 9773 9774 if (pagedep->pd_state & IOSTARTED) { 9775 /* 9776 * This can only happen if there is a driver that does not 9777 * understand chaining. Here biodone will reissue the call 9778 * to strategy for the incomplete buffers. 9779 */ 9780 printf("initiate_write_filepage: already started\n"); 9781 return; 9782 } 9783 pagedep->pd_state |= IOSTARTED; 9784 /* 9785 * Wait for all journal remove dependencies to hit the disk. 9786 * We can not allow any potentially conflicting directory adds 9787 * to be visible before removes and rollback is too difficult. 9788 * lk may be dropped and re-acquired, however we hold the buf 9789 * locked so the dependency can not go away. 9790 */ 9791 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) 9792 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) 9793 jwait(&jremref->jr_list, MNT_WAIT); 9794 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) 9795 jwait(&jmvref->jm_list, MNT_WAIT); 9796 for (i = 0; i < DAHASHSZ; i++) { 9797 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 9798 ep = (struct direct *) 9799 ((char *)bp->b_data + dap->da_offset); 9800 if (ep->d_ino != dap->da_newinum) 9801 panic("%s: dir inum %ju != new %ju", 9802 "initiate_write_filepage", 9803 (uintmax_t)ep->d_ino, 9804 (uintmax_t)dap->da_newinum); 9805 if (dap->da_state & DIRCHG) 9806 ep->d_ino = dap->da_previous->dm_oldinum; 9807 else 9808 ep->d_ino = 0; 9809 dap->da_state &= ~ATTACHED; 9810 dap->da_state |= UNDONE; 9811 } 9812 } 9813 } 9814 9815 /* 9816 * Version of initiate_write_inodeblock that handles UFS1 dinodes. 9817 * Note that any bug fixes made to this routine must be done in the 9818 * version found below. 9819 * 9820 * Called from within the procedure above to deal with unsatisfied 9821 * allocation dependencies in an inodeblock. The buffer must be 9822 * locked, thus, no I/O completion operations can occur while we 9823 * are manipulating its associated dependencies. 9824 */ 9825 static void 9826 initiate_write_inodeblock_ufs1(inodedep, bp) 9827 struct inodedep *inodedep; 9828 struct buf *bp; /* The inode block */ 9829 { 9830 struct allocdirect *adp, *lastadp; 9831 struct ufs1_dinode *dp; 9832 struct ufs1_dinode *sip; 9833 struct inoref *inoref; 9834 struct fs *fs; 9835 ufs_lbn_t i; 9836 #ifdef INVARIANTS 9837 ufs_lbn_t prevlbn = 0; 9838 #endif 9839 int deplist; 9840 9841 if (inodedep->id_state & IOSTARTED) 9842 panic("initiate_write_inodeblock_ufs1: already started"); 9843 inodedep->id_state |= IOSTARTED; 9844 fs = inodedep->id_fs; 9845 dp = (struct ufs1_dinode *)bp->b_data + 9846 ino_to_fsbo(fs, inodedep->id_ino); 9847 9848 /* 9849 * If we're on the unlinked list but have not yet written our 9850 * next pointer initialize it here. 9851 */ 9852 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { 9853 struct inodedep *inon; 9854 9855 inon = TAILQ_NEXT(inodedep, id_unlinked); 9856 dp->di_freelink = inon ? inon->id_ino : 0; 9857 } 9858 /* 9859 * If the bitmap is not yet written, then the allocated 9860 * inode cannot be written to disk. 9861 */ 9862 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 9863 if (inodedep->id_savedino1 != NULL) 9864 panic("initiate_write_inodeblock_ufs1: I/O underway"); 9865 FREE_LOCK(&lk); 9866 sip = malloc(sizeof(struct ufs1_dinode), 9867 M_SAVEDINO, M_SOFTDEP_FLAGS); 9868 ACQUIRE_LOCK(&lk); 9869 inodedep->id_savedino1 = sip; 9870 *inodedep->id_savedino1 = *dp; 9871 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 9872 dp->di_gen = inodedep->id_savedino1->di_gen; 9873 dp->di_freelink = inodedep->id_savedino1->di_freelink; 9874 return; 9875 } 9876 /* 9877 * If no dependencies, then there is nothing to roll back. 9878 */ 9879 inodedep->id_savedsize = dp->di_size; 9880 inodedep->id_savedextsize = 0; 9881 inodedep->id_savednlink = dp->di_nlink; 9882 if (TAILQ_EMPTY(&inodedep->id_inoupdt) && 9883 TAILQ_EMPTY(&inodedep->id_inoreflst)) 9884 return; 9885 /* 9886 * Revert the link count to that of the first unwritten journal entry. 9887 */ 9888 inoref = TAILQ_FIRST(&inodedep->id_inoreflst); 9889 if (inoref) 9890 dp->di_nlink = inoref->if_nlink; 9891 /* 9892 * Set the dependencies to busy. 9893 */ 9894 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 9895 adp = TAILQ_NEXT(adp, ad_next)) { 9896 #ifdef INVARIANTS 9897 if (deplist != 0 && prevlbn >= adp->ad_offset) 9898 panic("softdep_write_inodeblock: lbn order"); 9899 prevlbn = adp->ad_offset; 9900 if (adp->ad_offset < NDADDR && 9901 dp->di_db[adp->ad_offset] != adp->ad_newblkno) 9902 panic("%s: direct pointer #%jd mismatch %d != %jd", 9903 "softdep_write_inodeblock", 9904 (intmax_t)adp->ad_offset, 9905 dp->di_db[adp->ad_offset], 9906 (intmax_t)adp->ad_newblkno); 9907 if (adp->ad_offset >= NDADDR && 9908 dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno) 9909 panic("%s: indirect pointer #%jd mismatch %d != %jd", 9910 "softdep_write_inodeblock", 9911 (intmax_t)adp->ad_offset - NDADDR, 9912 dp->di_ib[adp->ad_offset - NDADDR], 9913 (intmax_t)adp->ad_newblkno); 9914 deplist |= 1 << adp->ad_offset; 9915 if ((adp->ad_state & ATTACHED) == 0) 9916 panic("softdep_write_inodeblock: Unknown state 0x%x", 9917 adp->ad_state); 9918 #endif /* INVARIANTS */ 9919 adp->ad_state &= ~ATTACHED; 9920 adp->ad_state |= UNDONE; 9921 } 9922 /* 9923 * The on-disk inode cannot claim to be any larger than the last 9924 * fragment that has been written. Otherwise, the on-disk inode 9925 * might have fragments that were not the last block in the file 9926 * which would corrupt the filesystem. 9927 */ 9928 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 9929 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 9930 if (adp->ad_offset >= NDADDR) 9931 break; 9932 dp->di_db[adp->ad_offset] = adp->ad_oldblkno; 9933 /* keep going until hitting a rollback to a frag */ 9934 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 9935 continue; 9936 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 9937 for (i = adp->ad_offset + 1; i < NDADDR; i++) { 9938 #ifdef INVARIANTS 9939 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) 9940 panic("softdep_write_inodeblock: lost dep1"); 9941 #endif /* INVARIANTS */ 9942 dp->di_db[i] = 0; 9943 } 9944 for (i = 0; i < NIADDR; i++) { 9945 #ifdef INVARIANTS 9946 if (dp->di_ib[i] != 0 && 9947 (deplist & ((1 << NDADDR) << i)) == 0) 9948 panic("softdep_write_inodeblock: lost dep2"); 9949 #endif /* INVARIANTS */ 9950 dp->di_ib[i] = 0; 9951 } 9952 return; 9953 } 9954 /* 9955 * If we have zero'ed out the last allocated block of the file, 9956 * roll back the size to the last currently allocated block. 9957 * We know that this last allocated block is a full-sized as 9958 * we already checked for fragments in the loop above. 9959 */ 9960 if (lastadp != NULL && 9961 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 9962 for (i = lastadp->ad_offset; i >= 0; i--) 9963 if (dp->di_db[i] != 0) 9964 break; 9965 dp->di_size = (i + 1) * fs->fs_bsize; 9966 } 9967 /* 9968 * The only dependencies are for indirect blocks. 9969 * 9970 * The file size for indirect block additions is not guaranteed. 9971 * Such a guarantee would be non-trivial to achieve. The conventional 9972 * synchronous write implementation also does not make this guarantee. 9973 * Fsck should catch and fix discrepancies. Arguably, the file size 9974 * can be over-estimated without destroying integrity when the file 9975 * moves into the indirect blocks (i.e., is large). If we want to 9976 * postpone fsck, we are stuck with this argument. 9977 */ 9978 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 9979 dp->di_ib[adp->ad_offset - NDADDR] = 0; 9980 } 9981 9982 /* 9983 * Version of initiate_write_inodeblock that handles UFS2 dinodes. 9984 * Note that any bug fixes made to this routine must be done in the 9985 * version found above. 9986 * 9987 * Called from within the procedure above to deal with unsatisfied 9988 * allocation dependencies in an inodeblock. The buffer must be 9989 * locked, thus, no I/O completion operations can occur while we 9990 * are manipulating its associated dependencies. 9991 */ 9992 static void 9993 initiate_write_inodeblock_ufs2(inodedep, bp) 9994 struct inodedep *inodedep; 9995 struct buf *bp; /* The inode block */ 9996 { 9997 struct allocdirect *adp, *lastadp; 9998 struct ufs2_dinode *dp; 9999 struct ufs2_dinode *sip; 10000 struct inoref *inoref; 10001 struct fs *fs; 10002 ufs_lbn_t i; 10003 #ifdef INVARIANTS 10004 ufs_lbn_t prevlbn = 0; 10005 #endif 10006 int deplist; 10007 10008 if (inodedep->id_state & IOSTARTED) 10009 panic("initiate_write_inodeblock_ufs2: already started"); 10010 inodedep->id_state |= IOSTARTED; 10011 fs = inodedep->id_fs; 10012 dp = (struct ufs2_dinode *)bp->b_data + 10013 ino_to_fsbo(fs, inodedep->id_ino); 10014 10015 /* 10016 * If we're on the unlinked list but have not yet written our 10017 * next pointer initialize it here. 10018 */ 10019 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { 10020 struct inodedep *inon; 10021 10022 inon = TAILQ_NEXT(inodedep, id_unlinked); 10023 dp->di_freelink = inon ? inon->id_ino : 0; 10024 } 10025 /* 10026 * If the bitmap is not yet written, then the allocated 10027 * inode cannot be written to disk. 10028 */ 10029 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 10030 if (inodedep->id_savedino2 != NULL) 10031 panic("initiate_write_inodeblock_ufs2: I/O underway"); 10032 FREE_LOCK(&lk); 10033 sip = malloc(sizeof(struct ufs2_dinode), 10034 M_SAVEDINO, M_SOFTDEP_FLAGS); 10035 ACQUIRE_LOCK(&lk); 10036 inodedep->id_savedino2 = sip; 10037 *inodedep->id_savedino2 = *dp; 10038 bzero((caddr_t)dp, sizeof(struct ufs2_dinode)); 10039 dp->di_gen = inodedep->id_savedino2->di_gen; 10040 dp->di_freelink = inodedep->id_savedino2->di_freelink; 10041 return; 10042 } 10043 /* 10044 * If no dependencies, then there is nothing to roll back. 10045 */ 10046 inodedep->id_savedsize = dp->di_size; 10047 inodedep->id_savedextsize = dp->di_extsize; 10048 inodedep->id_savednlink = dp->di_nlink; 10049 if (TAILQ_EMPTY(&inodedep->id_inoupdt) && 10050 TAILQ_EMPTY(&inodedep->id_extupdt) && 10051 TAILQ_EMPTY(&inodedep->id_inoreflst)) 10052 return; 10053 /* 10054 * Revert the link count to that of the first unwritten journal entry. 10055 */ 10056 inoref = TAILQ_FIRST(&inodedep->id_inoreflst); 10057 if (inoref) 10058 dp->di_nlink = inoref->if_nlink; 10059 10060 /* 10061 * Set the ext data dependencies to busy. 10062 */ 10063 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 10064 adp = TAILQ_NEXT(adp, ad_next)) { 10065 #ifdef INVARIANTS 10066 if (deplist != 0 && prevlbn >= adp->ad_offset) 10067 panic("softdep_write_inodeblock: lbn order"); 10068 prevlbn = adp->ad_offset; 10069 if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno) 10070 panic("%s: direct pointer #%jd mismatch %jd != %jd", 10071 "softdep_write_inodeblock", 10072 (intmax_t)adp->ad_offset, 10073 (intmax_t)dp->di_extb[adp->ad_offset], 10074 (intmax_t)adp->ad_newblkno); 10075 deplist |= 1 << adp->ad_offset; 10076 if ((adp->ad_state & ATTACHED) == 0) 10077 panic("softdep_write_inodeblock: Unknown state 0x%x", 10078 adp->ad_state); 10079 #endif /* INVARIANTS */ 10080 adp->ad_state &= ~ATTACHED; 10081 adp->ad_state |= UNDONE; 10082 } 10083 /* 10084 * The on-disk inode cannot claim to be any larger than the last 10085 * fragment that has been written. Otherwise, the on-disk inode 10086 * might have fragments that were not the last block in the ext 10087 * data which would corrupt the filesystem. 10088 */ 10089 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 10090 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10091 dp->di_extb[adp->ad_offset] = adp->ad_oldblkno; 10092 /* keep going until hitting a rollback to a frag */ 10093 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10094 continue; 10095 dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10096 for (i = adp->ad_offset + 1; i < NXADDR; i++) { 10097 #ifdef INVARIANTS 10098 if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0) 10099 panic("softdep_write_inodeblock: lost dep1"); 10100 #endif /* INVARIANTS */ 10101 dp->di_extb[i] = 0; 10102 } 10103 lastadp = NULL; 10104 break; 10105 } 10106 /* 10107 * If we have zero'ed out the last allocated block of the ext 10108 * data, roll back the size to the last currently allocated block. 10109 * We know that this last allocated block is a full-sized as 10110 * we already checked for fragments in the loop above. 10111 */ 10112 if (lastadp != NULL && 10113 dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10114 for (i = lastadp->ad_offset; i >= 0; i--) 10115 if (dp->di_extb[i] != 0) 10116 break; 10117 dp->di_extsize = (i + 1) * fs->fs_bsize; 10118 } 10119 /* 10120 * Set the file data dependencies to busy. 10121 */ 10122 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10123 adp = TAILQ_NEXT(adp, ad_next)) { 10124 #ifdef INVARIANTS 10125 if (deplist != 0 && prevlbn >= adp->ad_offset) 10126 panic("softdep_write_inodeblock: lbn order"); 10127 if ((adp->ad_state & ATTACHED) == 0) 10128 panic("inodedep %p and adp %p not attached", inodedep, adp); 10129 prevlbn = adp->ad_offset; 10130 if (adp->ad_offset < NDADDR && 10131 dp->di_db[adp->ad_offset] != adp->ad_newblkno) 10132 panic("%s: direct pointer #%jd mismatch %jd != %jd", 10133 "softdep_write_inodeblock", 10134 (intmax_t)adp->ad_offset, 10135 (intmax_t)dp->di_db[adp->ad_offset], 10136 (intmax_t)adp->ad_newblkno); 10137 if (adp->ad_offset >= NDADDR && 10138 dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno) 10139 panic("%s indirect pointer #%jd mismatch %jd != %jd", 10140 "softdep_write_inodeblock:", 10141 (intmax_t)adp->ad_offset - NDADDR, 10142 (intmax_t)dp->di_ib[adp->ad_offset - NDADDR], 10143 (intmax_t)adp->ad_newblkno); 10144 deplist |= 1 << adp->ad_offset; 10145 if ((adp->ad_state & ATTACHED) == 0) 10146 panic("softdep_write_inodeblock: Unknown state 0x%x", 10147 adp->ad_state); 10148 #endif /* INVARIANTS */ 10149 adp->ad_state &= ~ATTACHED; 10150 adp->ad_state |= UNDONE; 10151 } 10152 /* 10153 * The on-disk inode cannot claim to be any larger than the last 10154 * fragment that has been written. Otherwise, the on-disk inode 10155 * might have fragments that were not the last block in the file 10156 * which would corrupt the filesystem. 10157 */ 10158 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10159 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10160 if (adp->ad_offset >= NDADDR) 10161 break; 10162 dp->di_db[adp->ad_offset] = adp->ad_oldblkno; 10163 /* keep going until hitting a rollback to a frag */ 10164 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10165 continue; 10166 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10167 for (i = adp->ad_offset + 1; i < NDADDR; i++) { 10168 #ifdef INVARIANTS 10169 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) 10170 panic("softdep_write_inodeblock: lost dep2"); 10171 #endif /* INVARIANTS */ 10172 dp->di_db[i] = 0; 10173 } 10174 for (i = 0; i < NIADDR; i++) { 10175 #ifdef INVARIANTS 10176 if (dp->di_ib[i] != 0 && 10177 (deplist & ((1 << NDADDR) << i)) == 0) 10178 panic("softdep_write_inodeblock: lost dep3"); 10179 #endif /* INVARIANTS */ 10180 dp->di_ib[i] = 0; 10181 } 10182 return; 10183 } 10184 /* 10185 * If we have zero'ed out the last allocated block of the file, 10186 * roll back the size to the last currently allocated block. 10187 * We know that this last allocated block is a full-sized as 10188 * we already checked for fragments in the loop above. 10189 */ 10190 if (lastadp != NULL && 10191 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10192 for (i = lastadp->ad_offset; i >= 0; i--) 10193 if (dp->di_db[i] != 0) 10194 break; 10195 dp->di_size = (i + 1) * fs->fs_bsize; 10196 } 10197 /* 10198 * The only dependencies are for indirect blocks. 10199 * 10200 * The file size for indirect block additions is not guaranteed. 10201 * Such a guarantee would be non-trivial to achieve. The conventional 10202 * synchronous write implementation also does not make this guarantee. 10203 * Fsck should catch and fix discrepancies. Arguably, the file size 10204 * can be over-estimated without destroying integrity when the file 10205 * moves into the indirect blocks (i.e., is large). If we want to 10206 * postpone fsck, we are stuck with this argument. 10207 */ 10208 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 10209 dp->di_ib[adp->ad_offset - NDADDR] = 0; 10210 } 10211 10212 /* 10213 * Cancel an indirdep as a result of truncation. Release all of the 10214 * children allocindirs and place their journal work on the appropriate 10215 * list. 10216 */ 10217 static void 10218 cancel_indirdep(indirdep, bp, freeblks) 10219 struct indirdep *indirdep; 10220 struct buf *bp; 10221 struct freeblks *freeblks; 10222 { 10223 struct allocindir *aip; 10224 10225 /* 10226 * None of the indirect pointers will ever be visible, 10227 * so they can simply be tossed. GOINGAWAY ensures 10228 * that allocated pointers will be saved in the buffer 10229 * cache until they are freed. Note that they will 10230 * only be able to be found by their physical address 10231 * since the inode mapping the logical address will 10232 * be gone. The save buffer used for the safe copy 10233 * was allocated in setup_allocindir_phase2 using 10234 * the physical address so it could be used for this 10235 * purpose. Hence we swap the safe copy with the real 10236 * copy, allowing the safe copy to be freed and holding 10237 * on to the real copy for later use in indir_trunc. 10238 */ 10239 if (indirdep->ir_state & GOINGAWAY) 10240 panic("cancel_indirdep: already gone"); 10241 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 10242 indirdep->ir_state |= DEPCOMPLETE; 10243 LIST_REMOVE(indirdep, ir_next); 10244 } 10245 indirdep->ir_state |= GOINGAWAY; 10246 VFSTOUFS(indirdep->ir_list.wk_mp)->um_numindirdeps += 1; 10247 /* 10248 * Pass in bp for blocks still have journal writes 10249 * pending so we can cancel them on their own. 10250 */ 10251 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 10252 cancel_allocindir(aip, bp, freeblks, 0); 10253 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) 10254 cancel_allocindir(aip, NULL, freeblks, 0); 10255 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) 10256 cancel_allocindir(aip, NULL, freeblks, 0); 10257 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != 0) 10258 cancel_allocindir(aip, NULL, freeblks, 0); 10259 /* 10260 * If there are pending partial truncations we need to keep the 10261 * old block copy around until they complete. This is because 10262 * the current b_data is not a perfect superset of the available 10263 * blocks. 10264 */ 10265 if (TAILQ_EMPTY(&indirdep->ir_trunc)) 10266 bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount); 10267 else 10268 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 10269 WORKLIST_REMOVE(&indirdep->ir_list); 10270 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list); 10271 indirdep->ir_bp = NULL; 10272 indirdep->ir_freeblks = freeblks; 10273 } 10274 10275 /* 10276 * Free an indirdep once it no longer has new pointers to track. 10277 */ 10278 static void 10279 free_indirdep(indirdep) 10280 struct indirdep *indirdep; 10281 { 10282 10283 KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc), 10284 ("free_indirdep: Indir trunc list not empty.")); 10285 KASSERT(LIST_EMPTY(&indirdep->ir_completehd), 10286 ("free_indirdep: Complete head not empty.")); 10287 KASSERT(LIST_EMPTY(&indirdep->ir_writehd), 10288 ("free_indirdep: write head not empty.")); 10289 KASSERT(LIST_EMPTY(&indirdep->ir_donehd), 10290 ("free_indirdep: done head not empty.")); 10291 KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd), 10292 ("free_indirdep: deplist head not empty.")); 10293 KASSERT((indirdep->ir_state & DEPCOMPLETE), 10294 ("free_indirdep: %p still on newblk list.", indirdep)); 10295 KASSERT(indirdep->ir_saveddata == NULL, 10296 ("free_indirdep: %p still has saved data.", indirdep)); 10297 if (indirdep->ir_state & ONWORKLIST) 10298 WORKLIST_REMOVE(&indirdep->ir_list); 10299 WORKITEM_FREE(indirdep, D_INDIRDEP); 10300 } 10301 10302 /* 10303 * Called before a write to an indirdep. This routine is responsible for 10304 * rolling back pointers to a safe state which includes only those 10305 * allocindirs which have been completed. 10306 */ 10307 static void 10308 initiate_write_indirdep(indirdep, bp) 10309 struct indirdep *indirdep; 10310 struct buf *bp; 10311 { 10312 10313 indirdep->ir_state |= IOSTARTED; 10314 if (indirdep->ir_state & GOINGAWAY) 10315 panic("disk_io_initiation: indirdep gone"); 10316 /* 10317 * If there are no remaining dependencies, this will be writing 10318 * the real pointers. 10319 */ 10320 if (LIST_EMPTY(&indirdep->ir_deplisthd) && 10321 TAILQ_EMPTY(&indirdep->ir_trunc)) 10322 return; 10323 /* 10324 * Replace up-to-date version with safe version. 10325 */ 10326 if (indirdep->ir_saveddata == NULL) { 10327 FREE_LOCK(&lk); 10328 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, 10329 M_SOFTDEP_FLAGS); 10330 ACQUIRE_LOCK(&lk); 10331 } 10332 indirdep->ir_state &= ~ATTACHED; 10333 indirdep->ir_state |= UNDONE; 10334 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 10335 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 10336 bp->b_bcount); 10337 } 10338 10339 /* 10340 * Called when an inode has been cleared in a cg bitmap. This finally 10341 * eliminates any canceled jaddrefs 10342 */ 10343 void 10344 softdep_setup_inofree(mp, bp, ino, wkhd) 10345 struct mount *mp; 10346 struct buf *bp; 10347 ino_t ino; 10348 struct workhead *wkhd; 10349 { 10350 struct worklist *wk, *wkn; 10351 struct inodedep *inodedep; 10352 uint8_t *inosused; 10353 struct cg *cgp; 10354 struct fs *fs; 10355 10356 ACQUIRE_LOCK(&lk); 10357 fs = VFSTOUFS(mp)->um_fs; 10358 cgp = (struct cg *)bp->b_data; 10359 inosused = cg_inosused(cgp); 10360 if (isset(inosused, ino % fs->fs_ipg)) 10361 panic("softdep_setup_inofree: inode %ju not freed.", 10362 (uintmax_t)ino); 10363 if (inodedep_lookup(mp, ino, 0, &inodedep)) 10364 panic("softdep_setup_inofree: ino %ju has existing inodedep %p", 10365 (uintmax_t)ino, inodedep); 10366 if (wkhd) { 10367 LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) { 10368 if (wk->wk_type != D_JADDREF) 10369 continue; 10370 WORKLIST_REMOVE(wk); 10371 /* 10372 * We can free immediately even if the jaddref 10373 * isn't attached in a background write as now 10374 * the bitmaps are reconciled. 10375 */ 10376 wk->wk_state |= COMPLETE | ATTACHED; 10377 free_jaddref(WK_JADDREF(wk)); 10378 } 10379 jwork_move(&bp->b_dep, wkhd); 10380 } 10381 FREE_LOCK(&lk); 10382 } 10383 10384 10385 /* 10386 * Called via ffs_blkfree() after a set of frags has been cleared from a cg 10387 * map. Any dependencies waiting for the write to clear are added to the 10388 * buf's list and any jnewblks that are being canceled are discarded 10389 * immediately. 10390 */ 10391 void 10392 softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) 10393 struct mount *mp; 10394 struct buf *bp; 10395 ufs2_daddr_t blkno; 10396 int frags; 10397 struct workhead *wkhd; 10398 { 10399 struct bmsafemap *bmsafemap; 10400 struct jnewblk *jnewblk; 10401 struct worklist *wk; 10402 struct fs *fs; 10403 #ifdef SUJ_DEBUG 10404 uint8_t *blksfree; 10405 struct cg *cgp; 10406 ufs2_daddr_t jstart; 10407 ufs2_daddr_t jend; 10408 ufs2_daddr_t end; 10409 long bno; 10410 int i; 10411 #endif 10412 10413 CTR3(KTR_SUJ, 10414 "softdep_setup_blkfree: blkno %jd frags %d wk head %p", 10415 blkno, frags, wkhd); 10416 10417 ACQUIRE_LOCK(&lk); 10418 /* Lookup the bmsafemap so we track when it is dirty. */ 10419 fs = VFSTOUFS(mp)->um_fs; 10420 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); 10421 /* 10422 * Detach any jnewblks which have been canceled. They must linger 10423 * until the bitmap is cleared again by ffs_blkfree() to prevent 10424 * an unjournaled allocation from hitting the disk. 10425 */ 10426 if (wkhd) { 10427 while ((wk = LIST_FIRST(wkhd)) != NULL) { 10428 CTR2(KTR_SUJ, 10429 "softdep_setup_blkfree: blkno %jd wk type %d", 10430 blkno, wk->wk_type); 10431 WORKLIST_REMOVE(wk); 10432 if (wk->wk_type != D_JNEWBLK) { 10433 WORKLIST_INSERT(&bmsafemap->sm_freehd, wk); 10434 continue; 10435 } 10436 jnewblk = WK_JNEWBLK(wk); 10437 KASSERT(jnewblk->jn_state & GOINGAWAY, 10438 ("softdep_setup_blkfree: jnewblk not canceled.")); 10439 #ifdef SUJ_DEBUG 10440 /* 10441 * Assert that this block is free in the bitmap 10442 * before we discard the jnewblk. 10443 */ 10444 cgp = (struct cg *)bp->b_data; 10445 blksfree = cg_blksfree(cgp); 10446 bno = dtogd(fs, jnewblk->jn_blkno); 10447 for (i = jnewblk->jn_oldfrags; 10448 i < jnewblk->jn_frags; i++) { 10449 if (isset(blksfree, bno + i)) 10450 continue; 10451 panic("softdep_setup_blkfree: not free"); 10452 } 10453 #endif 10454 /* 10455 * Even if it's not attached we can free immediately 10456 * as the new bitmap is correct. 10457 */ 10458 wk->wk_state |= COMPLETE | ATTACHED; 10459 free_jnewblk(jnewblk); 10460 } 10461 } 10462 10463 #ifdef SUJ_DEBUG 10464 /* 10465 * Assert that we are not freeing a block which has an outstanding 10466 * allocation dependency. 10467 */ 10468 fs = VFSTOUFS(mp)->um_fs; 10469 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); 10470 end = blkno + frags; 10471 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { 10472 /* 10473 * Don't match against blocks that will be freed when the 10474 * background write is done. 10475 */ 10476 if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) == 10477 (COMPLETE | DEPCOMPLETE)) 10478 continue; 10479 jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags; 10480 jend = jnewblk->jn_blkno + jnewblk->jn_frags; 10481 if ((blkno >= jstart && blkno < jend) || 10482 (end > jstart && end <= jend)) { 10483 printf("state 0x%X %jd - %d %d dep %p\n", 10484 jnewblk->jn_state, jnewblk->jn_blkno, 10485 jnewblk->jn_oldfrags, jnewblk->jn_frags, 10486 jnewblk->jn_dep); 10487 panic("softdep_setup_blkfree: " 10488 "%jd-%jd(%d) overlaps with %jd-%jd", 10489 blkno, end, frags, jstart, jend); 10490 } 10491 } 10492 #endif 10493 FREE_LOCK(&lk); 10494 } 10495 10496 /* 10497 * Revert a block allocation when the journal record that describes it 10498 * is not yet written. 10499 */ 10500 int 10501 jnewblk_rollback(jnewblk, fs, cgp, blksfree) 10502 struct jnewblk *jnewblk; 10503 struct fs *fs; 10504 struct cg *cgp; 10505 uint8_t *blksfree; 10506 { 10507 ufs1_daddr_t fragno; 10508 long cgbno, bbase; 10509 int frags, blk; 10510 int i; 10511 10512 frags = 0; 10513 cgbno = dtogd(fs, jnewblk->jn_blkno); 10514 /* 10515 * We have to test which frags need to be rolled back. We may 10516 * be operating on a stale copy when doing background writes. 10517 */ 10518 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) 10519 if (isclr(blksfree, cgbno + i)) 10520 frags++; 10521 if (frags == 0) 10522 return (0); 10523 /* 10524 * This is mostly ffs_blkfree() sans some validation and 10525 * superblock updates. 10526 */ 10527 if (frags == fs->fs_frag) { 10528 fragno = fragstoblks(fs, cgbno); 10529 ffs_setblock(fs, blksfree, fragno); 10530 ffs_clusteracct(fs, cgp, fragno, 1); 10531 cgp->cg_cs.cs_nbfree++; 10532 } else { 10533 cgbno += jnewblk->jn_oldfrags; 10534 bbase = cgbno - fragnum(fs, cgbno); 10535 /* Decrement the old frags. */ 10536 blk = blkmap(fs, blksfree, bbase); 10537 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 10538 /* Deallocate the fragment */ 10539 for (i = 0; i < frags; i++) 10540 setbit(blksfree, cgbno + i); 10541 cgp->cg_cs.cs_nffree += frags; 10542 /* Add back in counts associated with the new frags */ 10543 blk = blkmap(fs, blksfree, bbase); 10544 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 10545 /* If a complete block has been reassembled, account for it. */ 10546 fragno = fragstoblks(fs, bbase); 10547 if (ffs_isblock(fs, blksfree, fragno)) { 10548 cgp->cg_cs.cs_nffree -= fs->fs_frag; 10549 ffs_clusteracct(fs, cgp, fragno, 1); 10550 cgp->cg_cs.cs_nbfree++; 10551 } 10552 } 10553 stat_jnewblk++; 10554 jnewblk->jn_state &= ~ATTACHED; 10555 jnewblk->jn_state |= UNDONE; 10556 10557 return (frags); 10558 } 10559 10560 static void 10561 initiate_write_bmsafemap(bmsafemap, bp) 10562 struct bmsafemap *bmsafemap; 10563 struct buf *bp; /* The cg block. */ 10564 { 10565 struct jaddref *jaddref; 10566 struct jnewblk *jnewblk; 10567 uint8_t *inosused; 10568 uint8_t *blksfree; 10569 struct cg *cgp; 10570 struct fs *fs; 10571 ino_t ino; 10572 10573 if (bmsafemap->sm_state & IOSTARTED) 10574 return; 10575 bmsafemap->sm_state |= IOSTARTED; 10576 /* 10577 * Clear any inode allocations which are pending journal writes. 10578 */ 10579 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) { 10580 cgp = (struct cg *)bp->b_data; 10581 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 10582 inosused = cg_inosused(cgp); 10583 LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) { 10584 ino = jaddref->ja_ino % fs->fs_ipg; 10585 if (isset(inosused, ino)) { 10586 if ((jaddref->ja_mode & IFMT) == IFDIR) 10587 cgp->cg_cs.cs_ndir--; 10588 cgp->cg_cs.cs_nifree++; 10589 clrbit(inosused, ino); 10590 jaddref->ja_state &= ~ATTACHED; 10591 jaddref->ja_state |= UNDONE; 10592 stat_jaddref++; 10593 } else 10594 panic("initiate_write_bmsafemap: inode %ju " 10595 "marked free", (uintmax_t)jaddref->ja_ino); 10596 } 10597 } 10598 /* 10599 * Clear any block allocations which are pending journal writes. 10600 */ 10601 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { 10602 cgp = (struct cg *)bp->b_data; 10603 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 10604 blksfree = cg_blksfree(cgp); 10605 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { 10606 if (jnewblk_rollback(jnewblk, fs, cgp, blksfree)) 10607 continue; 10608 panic("initiate_write_bmsafemap: block %jd " 10609 "marked free", jnewblk->jn_blkno); 10610 } 10611 } 10612 /* 10613 * Move allocation lists to the written lists so they can be 10614 * cleared once the block write is complete. 10615 */ 10616 LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr, 10617 inodedep, id_deps); 10618 LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr, 10619 newblk, nb_deps); 10620 LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, 10621 wk_list); 10622 } 10623 10624 /* 10625 * This routine is called during the completion interrupt 10626 * service routine for a disk write (from the procedure called 10627 * by the device driver to inform the filesystem caches of 10628 * a request completion). It should be called early in this 10629 * procedure, before the block is made available to other 10630 * processes or other routines are called. 10631 * 10632 */ 10633 static void 10634 softdep_disk_write_complete(bp) 10635 struct buf *bp; /* describes the completed disk write */ 10636 { 10637 struct worklist *wk; 10638 struct worklist *owk; 10639 struct workhead reattach; 10640 struct freeblks *freeblks; 10641 struct buf *sbp; 10642 10643 /* 10644 * If an error occurred while doing the write, then the data 10645 * has not hit the disk and the dependencies cannot be unrolled. 10646 */ 10647 if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0) 10648 return; 10649 LIST_INIT(&reattach); 10650 /* 10651 * This lock must not be released anywhere in this code segment. 10652 */ 10653 sbp = NULL; 10654 owk = NULL; 10655 ACQUIRE_LOCK(&lk); 10656 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 10657 WORKLIST_REMOVE(wk); 10658 dep_write[wk->wk_type]++; 10659 if (wk == owk) 10660 panic("duplicate worklist: %p\n", wk); 10661 owk = wk; 10662 switch (wk->wk_type) { 10663 10664 case D_PAGEDEP: 10665 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 10666 WORKLIST_INSERT(&reattach, wk); 10667 continue; 10668 10669 case D_INODEDEP: 10670 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 10671 WORKLIST_INSERT(&reattach, wk); 10672 continue; 10673 10674 case D_BMSAFEMAP: 10675 if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp)) 10676 WORKLIST_INSERT(&reattach, wk); 10677 continue; 10678 10679 case D_MKDIR: 10680 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 10681 continue; 10682 10683 case D_ALLOCDIRECT: 10684 wk->wk_state |= COMPLETE; 10685 handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL); 10686 continue; 10687 10688 case D_ALLOCINDIR: 10689 wk->wk_state |= COMPLETE; 10690 handle_allocindir_partdone(WK_ALLOCINDIR(wk)); 10691 continue; 10692 10693 case D_INDIRDEP: 10694 if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp)) 10695 WORKLIST_INSERT(&reattach, wk); 10696 continue; 10697 10698 case D_FREEBLKS: 10699 wk->wk_state |= COMPLETE; 10700 freeblks = WK_FREEBLKS(wk); 10701 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE && 10702 LIST_EMPTY(&freeblks->fb_jblkdephd)) 10703 add_to_worklist(wk, WK_NODELAY); 10704 continue; 10705 10706 case D_FREEWORK: 10707 handle_written_freework(WK_FREEWORK(wk)); 10708 break; 10709 10710 case D_JSEGDEP: 10711 free_jsegdep(WK_JSEGDEP(wk)); 10712 continue; 10713 10714 case D_JSEG: 10715 handle_written_jseg(WK_JSEG(wk), bp); 10716 continue; 10717 10718 case D_SBDEP: 10719 if (handle_written_sbdep(WK_SBDEP(wk), bp)) 10720 WORKLIST_INSERT(&reattach, wk); 10721 continue; 10722 10723 case D_FREEDEP: 10724 free_freedep(WK_FREEDEP(wk)); 10725 continue; 10726 10727 default: 10728 panic("handle_disk_write_complete: Unknown type %s", 10729 TYPENAME(wk->wk_type)); 10730 /* NOTREACHED */ 10731 } 10732 } 10733 /* 10734 * Reattach any requests that must be redone. 10735 */ 10736 while ((wk = LIST_FIRST(&reattach)) != NULL) { 10737 WORKLIST_REMOVE(wk); 10738 WORKLIST_INSERT(&bp->b_dep, wk); 10739 } 10740 FREE_LOCK(&lk); 10741 if (sbp) 10742 brelse(sbp); 10743 } 10744 10745 /* 10746 * Called from within softdep_disk_write_complete above. Note that 10747 * this routine is always called from interrupt level with further 10748 * splbio interrupts blocked. 10749 */ 10750 static void 10751 handle_allocdirect_partdone(adp, wkhd) 10752 struct allocdirect *adp; /* the completed allocdirect */ 10753 struct workhead *wkhd; /* Work to do when inode is writtne. */ 10754 { 10755 struct allocdirectlst *listhead; 10756 struct allocdirect *listadp; 10757 struct inodedep *inodedep; 10758 long bsize; 10759 10760 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 10761 return; 10762 /* 10763 * The on-disk inode cannot claim to be any larger than the last 10764 * fragment that has been written. Otherwise, the on-disk inode 10765 * might have fragments that were not the last block in the file 10766 * which would corrupt the filesystem. Thus, we cannot free any 10767 * allocdirects after one whose ad_oldblkno claims a fragment as 10768 * these blocks must be rolled back to zero before writing the inode. 10769 * We check the currently active set of allocdirects in id_inoupdt 10770 * or id_extupdt as appropriate. 10771 */ 10772 inodedep = adp->ad_inodedep; 10773 bsize = inodedep->id_fs->fs_bsize; 10774 if (adp->ad_state & EXTDATA) 10775 listhead = &inodedep->id_extupdt; 10776 else 10777 listhead = &inodedep->id_inoupdt; 10778 TAILQ_FOREACH(listadp, listhead, ad_next) { 10779 /* found our block */ 10780 if (listadp == adp) 10781 break; 10782 /* continue if ad_oldlbn is not a fragment */ 10783 if (listadp->ad_oldsize == 0 || 10784 listadp->ad_oldsize == bsize) 10785 continue; 10786 /* hit a fragment */ 10787 return; 10788 } 10789 /* 10790 * If we have reached the end of the current list without 10791 * finding the just finished dependency, then it must be 10792 * on the future dependency list. Future dependencies cannot 10793 * be freed until they are moved to the current list. 10794 */ 10795 if (listadp == NULL) { 10796 #ifdef DEBUG 10797 if (adp->ad_state & EXTDATA) 10798 listhead = &inodedep->id_newextupdt; 10799 else 10800 listhead = &inodedep->id_newinoupdt; 10801 TAILQ_FOREACH(listadp, listhead, ad_next) 10802 /* found our block */ 10803 if (listadp == adp) 10804 break; 10805 if (listadp == NULL) 10806 panic("handle_allocdirect_partdone: lost dep"); 10807 #endif /* DEBUG */ 10808 return; 10809 } 10810 /* 10811 * If we have found the just finished dependency, then queue 10812 * it along with anything that follows it that is complete. 10813 * Since the pointer has not yet been written in the inode 10814 * as the dependency prevents it, place the allocdirect on the 10815 * bufwait list where it will be freed once the pointer is 10816 * valid. 10817 */ 10818 if (wkhd == NULL) 10819 wkhd = &inodedep->id_bufwait; 10820 for (; adp; adp = listadp) { 10821 listadp = TAILQ_NEXT(adp, ad_next); 10822 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 10823 return; 10824 TAILQ_REMOVE(listhead, adp, ad_next); 10825 WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list); 10826 } 10827 } 10828 10829 /* 10830 * Called from within softdep_disk_write_complete above. This routine 10831 * completes successfully written allocindirs. 10832 */ 10833 static void 10834 handle_allocindir_partdone(aip) 10835 struct allocindir *aip; /* the completed allocindir */ 10836 { 10837 struct indirdep *indirdep; 10838 10839 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 10840 return; 10841 indirdep = aip->ai_indirdep; 10842 LIST_REMOVE(aip, ai_next); 10843 /* 10844 * Don't set a pointer while the buffer is undergoing IO or while 10845 * we have active truncations. 10846 */ 10847 if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) { 10848 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 10849 return; 10850 } 10851 if (indirdep->ir_state & UFS1FMT) 10852 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 10853 aip->ai_newblkno; 10854 else 10855 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 10856 aip->ai_newblkno; 10857 /* 10858 * Await the pointer write before freeing the allocindir. 10859 */ 10860 LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next); 10861 } 10862 10863 /* 10864 * Release segments held on a jwork list. 10865 */ 10866 static void 10867 handle_jwork(wkhd) 10868 struct workhead *wkhd; 10869 { 10870 struct worklist *wk; 10871 10872 while ((wk = LIST_FIRST(wkhd)) != NULL) { 10873 WORKLIST_REMOVE(wk); 10874 switch (wk->wk_type) { 10875 case D_JSEGDEP: 10876 free_jsegdep(WK_JSEGDEP(wk)); 10877 continue; 10878 case D_FREEDEP: 10879 free_freedep(WK_FREEDEP(wk)); 10880 continue; 10881 case D_FREEFRAG: 10882 rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep)); 10883 WORKITEM_FREE(wk, D_FREEFRAG); 10884 continue; 10885 case D_FREEWORK: 10886 handle_written_freework(WK_FREEWORK(wk)); 10887 continue; 10888 default: 10889 panic("handle_jwork: Unknown type %s\n", 10890 TYPENAME(wk->wk_type)); 10891 } 10892 } 10893 } 10894 10895 /* 10896 * Handle the bufwait list on an inode when it is safe to release items 10897 * held there. This normally happens after an inode block is written but 10898 * may be delayed and handled later if there are pending journal items that 10899 * are not yet safe to be released. 10900 */ 10901 static struct freefile * 10902 handle_bufwait(inodedep, refhd) 10903 struct inodedep *inodedep; 10904 struct workhead *refhd; 10905 { 10906 struct jaddref *jaddref; 10907 struct freefile *freefile; 10908 struct worklist *wk; 10909 10910 freefile = NULL; 10911 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 10912 WORKLIST_REMOVE(wk); 10913 switch (wk->wk_type) { 10914 case D_FREEFILE: 10915 /* 10916 * We defer adding freefile to the worklist 10917 * until all other additions have been made to 10918 * ensure that it will be done after all the 10919 * old blocks have been freed. 10920 */ 10921 if (freefile != NULL) 10922 panic("handle_bufwait: freefile"); 10923 freefile = WK_FREEFILE(wk); 10924 continue; 10925 10926 case D_MKDIR: 10927 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 10928 continue; 10929 10930 case D_DIRADD: 10931 diradd_inode_written(WK_DIRADD(wk), inodedep); 10932 continue; 10933 10934 case D_FREEFRAG: 10935 wk->wk_state |= COMPLETE; 10936 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE) 10937 add_to_worklist(wk, 0); 10938 continue; 10939 10940 case D_DIRREM: 10941 wk->wk_state |= COMPLETE; 10942 add_to_worklist(wk, 0); 10943 continue; 10944 10945 case D_ALLOCDIRECT: 10946 case D_ALLOCINDIR: 10947 free_newblk(WK_NEWBLK(wk)); 10948 continue; 10949 10950 case D_JNEWBLK: 10951 wk->wk_state |= COMPLETE; 10952 free_jnewblk(WK_JNEWBLK(wk)); 10953 continue; 10954 10955 /* 10956 * Save freed journal segments and add references on 10957 * the supplied list which will delay their release 10958 * until the cg bitmap is cleared on disk. 10959 */ 10960 case D_JSEGDEP: 10961 if (refhd == NULL) 10962 free_jsegdep(WK_JSEGDEP(wk)); 10963 else 10964 WORKLIST_INSERT(refhd, wk); 10965 continue; 10966 10967 case D_JADDREF: 10968 jaddref = WK_JADDREF(wk); 10969 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, 10970 if_deps); 10971 /* 10972 * Transfer any jaddrefs to the list to be freed with 10973 * the bitmap if we're handling a removed file. 10974 */ 10975 if (refhd == NULL) { 10976 wk->wk_state |= COMPLETE; 10977 free_jaddref(jaddref); 10978 } else 10979 WORKLIST_INSERT(refhd, wk); 10980 continue; 10981 10982 default: 10983 panic("handle_bufwait: Unknown type %p(%s)", 10984 wk, TYPENAME(wk->wk_type)); 10985 /* NOTREACHED */ 10986 } 10987 } 10988 return (freefile); 10989 } 10990 /* 10991 * Called from within softdep_disk_write_complete above to restore 10992 * in-memory inode block contents to their most up-to-date state. Note 10993 * that this routine is always called from interrupt level with further 10994 * splbio interrupts blocked. 10995 */ 10996 static int 10997 handle_written_inodeblock(inodedep, bp) 10998 struct inodedep *inodedep; 10999 struct buf *bp; /* buffer containing the inode block */ 11000 { 11001 struct freefile *freefile; 11002 struct allocdirect *adp, *nextadp; 11003 struct ufs1_dinode *dp1 = NULL; 11004 struct ufs2_dinode *dp2 = NULL; 11005 struct workhead wkhd; 11006 int hadchanges, fstype; 11007 ino_t freelink; 11008 11009 LIST_INIT(&wkhd); 11010 hadchanges = 0; 11011 freefile = NULL; 11012 if ((inodedep->id_state & IOSTARTED) == 0) 11013 panic("handle_written_inodeblock: not started"); 11014 inodedep->id_state &= ~IOSTARTED; 11015 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) { 11016 fstype = UFS1; 11017 dp1 = (struct ufs1_dinode *)bp->b_data + 11018 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 11019 freelink = dp1->di_freelink; 11020 } else { 11021 fstype = UFS2; 11022 dp2 = (struct ufs2_dinode *)bp->b_data + 11023 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 11024 freelink = dp2->di_freelink; 11025 } 11026 /* 11027 * Leave this inodeblock dirty until it's in the list. 11028 */ 11029 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED) { 11030 struct inodedep *inon; 11031 11032 inon = TAILQ_NEXT(inodedep, id_unlinked); 11033 if ((inon == NULL && freelink == 0) || 11034 (inon && inon->id_ino == freelink)) { 11035 if (inon) 11036 inon->id_state |= UNLINKPREV; 11037 inodedep->id_state |= UNLINKNEXT; 11038 } 11039 hadchanges = 1; 11040 } 11041 /* 11042 * If we had to rollback the inode allocation because of 11043 * bitmaps being incomplete, then simply restore it. 11044 * Keep the block dirty so that it will not be reclaimed until 11045 * all associated dependencies have been cleared and the 11046 * corresponding updates written to disk. 11047 */ 11048 if (inodedep->id_savedino1 != NULL) { 11049 hadchanges = 1; 11050 if (fstype == UFS1) 11051 *dp1 = *inodedep->id_savedino1; 11052 else 11053 *dp2 = *inodedep->id_savedino2; 11054 free(inodedep->id_savedino1, M_SAVEDINO); 11055 inodedep->id_savedino1 = NULL; 11056 if ((bp->b_flags & B_DELWRI) == 0) 11057 stat_inode_bitmap++; 11058 bdirty(bp); 11059 /* 11060 * If the inode is clear here and GOINGAWAY it will never 11061 * be written. Process the bufwait and clear any pending 11062 * work which may include the freefile. 11063 */ 11064 if (inodedep->id_state & GOINGAWAY) 11065 goto bufwait; 11066 return (1); 11067 } 11068 inodedep->id_state |= COMPLETE; 11069 /* 11070 * Roll forward anything that had to be rolled back before 11071 * the inode could be updated. 11072 */ 11073 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 11074 nextadp = TAILQ_NEXT(adp, ad_next); 11075 if (adp->ad_state & ATTACHED) 11076 panic("handle_written_inodeblock: new entry"); 11077 if (fstype == UFS1) { 11078 if (adp->ad_offset < NDADDR) { 11079 if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno) 11080 panic("%s %s #%jd mismatch %d != %jd", 11081 "handle_written_inodeblock:", 11082 "direct pointer", 11083 (intmax_t)adp->ad_offset, 11084 dp1->di_db[adp->ad_offset], 11085 (intmax_t)adp->ad_oldblkno); 11086 dp1->di_db[adp->ad_offset] = adp->ad_newblkno; 11087 } else { 11088 if (dp1->di_ib[adp->ad_offset - NDADDR] != 0) 11089 panic("%s: %s #%jd allocated as %d", 11090 "handle_written_inodeblock", 11091 "indirect pointer", 11092 (intmax_t)adp->ad_offset - NDADDR, 11093 dp1->di_ib[adp->ad_offset - NDADDR]); 11094 dp1->di_ib[adp->ad_offset - NDADDR] = 11095 adp->ad_newblkno; 11096 } 11097 } else { 11098 if (adp->ad_offset < NDADDR) { 11099 if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno) 11100 panic("%s: %s #%jd %s %jd != %jd", 11101 "handle_written_inodeblock", 11102 "direct pointer", 11103 (intmax_t)adp->ad_offset, "mismatch", 11104 (intmax_t)dp2->di_db[adp->ad_offset], 11105 (intmax_t)adp->ad_oldblkno); 11106 dp2->di_db[adp->ad_offset] = adp->ad_newblkno; 11107 } else { 11108 if (dp2->di_ib[adp->ad_offset - NDADDR] != 0) 11109 panic("%s: %s #%jd allocated as %jd", 11110 "handle_written_inodeblock", 11111 "indirect pointer", 11112 (intmax_t)adp->ad_offset - NDADDR, 11113 (intmax_t) 11114 dp2->di_ib[adp->ad_offset - NDADDR]); 11115 dp2->di_ib[adp->ad_offset - NDADDR] = 11116 adp->ad_newblkno; 11117 } 11118 } 11119 adp->ad_state &= ~UNDONE; 11120 adp->ad_state |= ATTACHED; 11121 hadchanges = 1; 11122 } 11123 for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) { 11124 nextadp = TAILQ_NEXT(adp, ad_next); 11125 if (adp->ad_state & ATTACHED) 11126 panic("handle_written_inodeblock: new entry"); 11127 if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno) 11128 panic("%s: direct pointers #%jd %s %jd != %jd", 11129 "handle_written_inodeblock", 11130 (intmax_t)adp->ad_offset, "mismatch", 11131 (intmax_t)dp2->di_extb[adp->ad_offset], 11132 (intmax_t)adp->ad_oldblkno); 11133 dp2->di_extb[adp->ad_offset] = adp->ad_newblkno; 11134 adp->ad_state &= ~UNDONE; 11135 adp->ad_state |= ATTACHED; 11136 hadchanges = 1; 11137 } 11138 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 11139 stat_direct_blk_ptrs++; 11140 /* 11141 * Reset the file size to its most up-to-date value. 11142 */ 11143 if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1) 11144 panic("handle_written_inodeblock: bad size"); 11145 if (inodedep->id_savednlink > LINK_MAX) 11146 panic("handle_written_inodeblock: Invalid link count " 11147 "%d for inodedep %p", inodedep->id_savednlink, inodedep); 11148 if (fstype == UFS1) { 11149 if (dp1->di_nlink != inodedep->id_savednlink) { 11150 dp1->di_nlink = inodedep->id_savednlink; 11151 hadchanges = 1; 11152 } 11153 if (dp1->di_size != inodedep->id_savedsize) { 11154 dp1->di_size = inodedep->id_savedsize; 11155 hadchanges = 1; 11156 } 11157 } else { 11158 if (dp2->di_nlink != inodedep->id_savednlink) { 11159 dp2->di_nlink = inodedep->id_savednlink; 11160 hadchanges = 1; 11161 } 11162 if (dp2->di_size != inodedep->id_savedsize) { 11163 dp2->di_size = inodedep->id_savedsize; 11164 hadchanges = 1; 11165 } 11166 if (dp2->di_extsize != inodedep->id_savedextsize) { 11167 dp2->di_extsize = inodedep->id_savedextsize; 11168 hadchanges = 1; 11169 } 11170 } 11171 inodedep->id_savedsize = -1; 11172 inodedep->id_savedextsize = -1; 11173 inodedep->id_savednlink = -1; 11174 /* 11175 * If there were any rollbacks in the inode block, then it must be 11176 * marked dirty so that its will eventually get written back in 11177 * its correct form. 11178 */ 11179 if (hadchanges) 11180 bdirty(bp); 11181 bufwait: 11182 /* 11183 * Process any allocdirects that completed during the update. 11184 */ 11185 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 11186 handle_allocdirect_partdone(adp, &wkhd); 11187 if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) 11188 handle_allocdirect_partdone(adp, &wkhd); 11189 /* 11190 * Process deallocations that were held pending until the 11191 * inode had been written to disk. Freeing of the inode 11192 * is delayed until after all blocks have been freed to 11193 * avoid creation of new <vfsid, inum, lbn> triples 11194 * before the old ones have been deleted. Completely 11195 * unlinked inodes are not processed until the unlinked 11196 * inode list is written or the last reference is removed. 11197 */ 11198 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) { 11199 freefile = handle_bufwait(inodedep, NULL); 11200 if (freefile && !LIST_EMPTY(&wkhd)) { 11201 WORKLIST_INSERT(&wkhd, &freefile->fx_list); 11202 freefile = NULL; 11203 } 11204 } 11205 /* 11206 * Move rolled forward dependency completions to the bufwait list 11207 * now that those that were already written have been processed. 11208 */ 11209 if (!LIST_EMPTY(&wkhd) && hadchanges == 0) 11210 panic("handle_written_inodeblock: bufwait but no changes"); 11211 jwork_move(&inodedep->id_bufwait, &wkhd); 11212 11213 if (freefile != NULL) { 11214 /* 11215 * If the inode is goingaway it was never written. Fake up 11216 * the state here so free_inodedep() can succeed. 11217 */ 11218 if (inodedep->id_state & GOINGAWAY) 11219 inodedep->id_state |= COMPLETE | DEPCOMPLETE; 11220 if (free_inodedep(inodedep) == 0) 11221 panic("handle_written_inodeblock: live inodedep %p", 11222 inodedep); 11223 add_to_worklist(&freefile->fx_list, 0); 11224 return (0); 11225 } 11226 11227 /* 11228 * If no outstanding dependencies, free it. 11229 */ 11230 if (free_inodedep(inodedep) || 11231 (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 && 11232 TAILQ_FIRST(&inodedep->id_inoupdt) == 0 && 11233 TAILQ_FIRST(&inodedep->id_extupdt) == 0 && 11234 LIST_FIRST(&inodedep->id_bufwait) == 0)) 11235 return (0); 11236 return (hadchanges); 11237 } 11238 11239 static int 11240 handle_written_indirdep(indirdep, bp, bpp) 11241 struct indirdep *indirdep; 11242 struct buf *bp; 11243 struct buf **bpp; 11244 { 11245 struct allocindir *aip; 11246 struct buf *sbp; 11247 int chgs; 11248 11249 if (indirdep->ir_state & GOINGAWAY) 11250 panic("handle_written_indirdep: indirdep gone"); 11251 if ((indirdep->ir_state & IOSTARTED) == 0) 11252 panic("handle_written_indirdep: IO not started"); 11253 chgs = 0; 11254 /* 11255 * If there were rollbacks revert them here. 11256 */ 11257 if (indirdep->ir_saveddata) { 11258 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 11259 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 11260 free(indirdep->ir_saveddata, M_INDIRDEP); 11261 indirdep->ir_saveddata = NULL; 11262 } 11263 chgs = 1; 11264 } 11265 indirdep->ir_state &= ~(UNDONE | IOSTARTED); 11266 indirdep->ir_state |= ATTACHED; 11267 /* 11268 * Move allocindirs with written pointers to the completehd if 11269 * the indirdep's pointer is not yet written. Otherwise 11270 * free them here. 11271 */ 11272 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) { 11273 LIST_REMOVE(aip, ai_next); 11274 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 11275 LIST_INSERT_HEAD(&indirdep->ir_completehd, aip, 11276 ai_next); 11277 newblk_freefrag(&aip->ai_block); 11278 continue; 11279 } 11280 free_newblk(&aip->ai_block); 11281 } 11282 /* 11283 * Move allocindirs that have finished dependency processing from 11284 * the done list to the write list after updating the pointers. 11285 */ 11286 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 11287 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 11288 handle_allocindir_partdone(aip); 11289 if (aip == LIST_FIRST(&indirdep->ir_donehd)) 11290 panic("disk_write_complete: not gone"); 11291 chgs = 1; 11292 } 11293 } 11294 /* 11295 * Preserve the indirdep if there were any changes or if it is not 11296 * yet valid on disk. 11297 */ 11298 if (chgs) { 11299 stat_indir_blk_ptrs++; 11300 bdirty(bp); 11301 return (1); 11302 } 11303 /* 11304 * If there were no changes we can discard the savedbp and detach 11305 * ourselves from the buf. We are only carrying completed pointers 11306 * in this case. 11307 */ 11308 sbp = indirdep->ir_savebp; 11309 sbp->b_flags |= B_INVAL | B_NOCACHE; 11310 indirdep->ir_savebp = NULL; 11311 indirdep->ir_bp = NULL; 11312 if (*bpp != NULL) 11313 panic("handle_written_indirdep: bp already exists."); 11314 *bpp = sbp; 11315 /* 11316 * The indirdep may not be freed until its parent points at it. 11317 */ 11318 if (indirdep->ir_state & DEPCOMPLETE) 11319 free_indirdep(indirdep); 11320 11321 return (0); 11322 } 11323 11324 /* 11325 * Process a diradd entry after its dependent inode has been written. 11326 * This routine must be called with splbio interrupts blocked. 11327 */ 11328 static void 11329 diradd_inode_written(dap, inodedep) 11330 struct diradd *dap; 11331 struct inodedep *inodedep; 11332 { 11333 11334 dap->da_state |= COMPLETE; 11335 complete_diradd(dap); 11336 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 11337 } 11338 11339 /* 11340 * Returns true if the bmsafemap will have rollbacks when written. Must 11341 * only be called with lk and the buf lock on the cg held. 11342 */ 11343 static int 11344 bmsafemap_backgroundwrite(bmsafemap, bp) 11345 struct bmsafemap *bmsafemap; 11346 struct buf *bp; 11347 { 11348 int dirty; 11349 11350 dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) | 11351 !LIST_EMPTY(&bmsafemap->sm_jnewblkhd); 11352 /* 11353 * If we're initiating a background write we need to process the 11354 * rollbacks as they exist now, not as they exist when IO starts. 11355 * No other consumers will look at the contents of the shadowed 11356 * buf so this is safe to do here. 11357 */ 11358 if (bp->b_xflags & BX_BKGRDMARKER) 11359 initiate_write_bmsafemap(bmsafemap, bp); 11360 11361 return (dirty); 11362 } 11363 11364 /* 11365 * Re-apply an allocation when a cg write is complete. 11366 */ 11367 static int 11368 jnewblk_rollforward(jnewblk, fs, cgp, blksfree) 11369 struct jnewblk *jnewblk; 11370 struct fs *fs; 11371 struct cg *cgp; 11372 uint8_t *blksfree; 11373 { 11374 ufs1_daddr_t fragno; 11375 ufs2_daddr_t blkno; 11376 long cgbno, bbase; 11377 int frags, blk; 11378 int i; 11379 11380 frags = 0; 11381 cgbno = dtogd(fs, jnewblk->jn_blkno); 11382 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { 11383 if (isclr(blksfree, cgbno + i)) 11384 panic("jnewblk_rollforward: re-allocated fragment"); 11385 frags++; 11386 } 11387 if (frags == fs->fs_frag) { 11388 blkno = fragstoblks(fs, cgbno); 11389 ffs_clrblock(fs, blksfree, (long)blkno); 11390 ffs_clusteracct(fs, cgp, blkno, -1); 11391 cgp->cg_cs.cs_nbfree--; 11392 } else { 11393 bbase = cgbno - fragnum(fs, cgbno); 11394 cgbno += jnewblk->jn_oldfrags; 11395 /* If a complete block had been reassembled, account for it. */ 11396 fragno = fragstoblks(fs, bbase); 11397 if (ffs_isblock(fs, blksfree, fragno)) { 11398 cgp->cg_cs.cs_nffree += fs->fs_frag; 11399 ffs_clusteracct(fs, cgp, fragno, -1); 11400 cgp->cg_cs.cs_nbfree--; 11401 } 11402 /* Decrement the old frags. */ 11403 blk = blkmap(fs, blksfree, bbase); 11404 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 11405 /* Allocate the fragment */ 11406 for (i = 0; i < frags; i++) 11407 clrbit(blksfree, cgbno + i); 11408 cgp->cg_cs.cs_nffree -= frags; 11409 /* Add back in counts associated with the new frags */ 11410 blk = blkmap(fs, blksfree, bbase); 11411 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 11412 } 11413 return (frags); 11414 } 11415 11416 /* 11417 * Complete a write to a bmsafemap structure. Roll forward any bitmap 11418 * changes if it's not a background write. Set all written dependencies 11419 * to DEPCOMPLETE and free the structure if possible. 11420 */ 11421 static int 11422 handle_written_bmsafemap(bmsafemap, bp) 11423 struct bmsafemap *bmsafemap; 11424 struct buf *bp; 11425 { 11426 struct newblk *newblk; 11427 struct inodedep *inodedep; 11428 struct jaddref *jaddref, *jatmp; 11429 struct jnewblk *jnewblk, *jntmp; 11430 struct ufsmount *ump; 11431 uint8_t *inosused; 11432 uint8_t *blksfree; 11433 struct cg *cgp; 11434 struct fs *fs; 11435 ino_t ino; 11436 int chgs; 11437 11438 if ((bmsafemap->sm_state & IOSTARTED) == 0) 11439 panic("initiate_write_bmsafemap: Not started\n"); 11440 ump = VFSTOUFS(bmsafemap->sm_list.wk_mp); 11441 chgs = 0; 11442 bmsafemap->sm_state &= ~IOSTARTED; 11443 /* 11444 * Release journal work that was waiting on the write. 11445 */ 11446 handle_jwork(&bmsafemap->sm_freewr); 11447 11448 /* 11449 * Restore unwritten inode allocation pending jaddref writes. 11450 */ 11451 if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) { 11452 cgp = (struct cg *)bp->b_data; 11453 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 11454 inosused = cg_inosused(cgp); 11455 LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd, 11456 ja_bmdeps, jatmp) { 11457 if ((jaddref->ja_state & UNDONE) == 0) 11458 continue; 11459 ino = jaddref->ja_ino % fs->fs_ipg; 11460 if (isset(inosused, ino)) 11461 panic("handle_written_bmsafemap: " 11462 "re-allocated inode"); 11463 if ((bp->b_xflags & BX_BKGRDMARKER) == 0) { 11464 if ((jaddref->ja_mode & IFMT) == IFDIR) 11465 cgp->cg_cs.cs_ndir++; 11466 cgp->cg_cs.cs_nifree--; 11467 setbit(inosused, ino); 11468 chgs = 1; 11469 } 11470 jaddref->ja_state &= ~UNDONE; 11471 jaddref->ja_state |= ATTACHED; 11472 free_jaddref(jaddref); 11473 } 11474 } 11475 /* 11476 * Restore any block allocations which are pending journal writes. 11477 */ 11478 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { 11479 cgp = (struct cg *)bp->b_data; 11480 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 11481 blksfree = cg_blksfree(cgp); 11482 LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps, 11483 jntmp) { 11484 if ((jnewblk->jn_state & UNDONE) == 0) 11485 continue; 11486 if ((bp->b_xflags & BX_BKGRDMARKER) == 0 && 11487 jnewblk_rollforward(jnewblk, fs, cgp, blksfree)) 11488 chgs = 1; 11489 jnewblk->jn_state &= ~(UNDONE | NEWBLOCK); 11490 jnewblk->jn_state |= ATTACHED; 11491 free_jnewblk(jnewblk); 11492 } 11493 } 11494 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) { 11495 newblk->nb_state |= DEPCOMPLETE; 11496 newblk->nb_state &= ~ONDEPLIST; 11497 newblk->nb_bmsafemap = NULL; 11498 LIST_REMOVE(newblk, nb_deps); 11499 if (newblk->nb_list.wk_type == D_ALLOCDIRECT) 11500 handle_allocdirect_partdone( 11501 WK_ALLOCDIRECT(&newblk->nb_list), NULL); 11502 else if (newblk->nb_list.wk_type == D_ALLOCINDIR) 11503 handle_allocindir_partdone( 11504 WK_ALLOCINDIR(&newblk->nb_list)); 11505 else if (newblk->nb_list.wk_type != D_NEWBLK) 11506 panic("handle_written_bmsafemap: Unexpected type: %s", 11507 TYPENAME(newblk->nb_list.wk_type)); 11508 } 11509 while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) { 11510 inodedep->id_state |= DEPCOMPLETE; 11511 inodedep->id_state &= ~ONDEPLIST; 11512 LIST_REMOVE(inodedep, id_deps); 11513 inodedep->id_bmsafemap = NULL; 11514 } 11515 LIST_REMOVE(bmsafemap, sm_next); 11516 if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) && 11517 LIST_EMPTY(&bmsafemap->sm_jnewblkhd) && 11518 LIST_EMPTY(&bmsafemap->sm_newblkhd) && 11519 LIST_EMPTY(&bmsafemap->sm_inodedephd) && 11520 LIST_EMPTY(&bmsafemap->sm_freehd)) { 11521 LIST_REMOVE(bmsafemap, sm_hash); 11522 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 11523 return (0); 11524 } 11525 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); 11526 bdirty(bp); 11527 return (1); 11528 } 11529 11530 /* 11531 * Try to free a mkdir dependency. 11532 */ 11533 static void 11534 complete_mkdir(mkdir) 11535 struct mkdir *mkdir; 11536 { 11537 struct diradd *dap; 11538 11539 if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE) 11540 return; 11541 LIST_REMOVE(mkdir, md_mkdirs); 11542 dap = mkdir->md_diradd; 11543 dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); 11544 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) { 11545 dap->da_state |= DEPCOMPLETE; 11546 complete_diradd(dap); 11547 } 11548 WORKITEM_FREE(mkdir, D_MKDIR); 11549 } 11550 11551 /* 11552 * Handle the completion of a mkdir dependency. 11553 */ 11554 static void 11555 handle_written_mkdir(mkdir, type) 11556 struct mkdir *mkdir; 11557 int type; 11558 { 11559 11560 if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type) 11561 panic("handle_written_mkdir: bad type"); 11562 mkdir->md_state |= COMPLETE; 11563 complete_mkdir(mkdir); 11564 } 11565 11566 static int 11567 free_pagedep(pagedep) 11568 struct pagedep *pagedep; 11569 { 11570 int i; 11571 11572 if (pagedep->pd_state & NEWBLOCK) 11573 return (0); 11574 if (!LIST_EMPTY(&pagedep->pd_dirremhd)) 11575 return (0); 11576 for (i = 0; i < DAHASHSZ; i++) 11577 if (!LIST_EMPTY(&pagedep->pd_diraddhd[i])) 11578 return (0); 11579 if (!LIST_EMPTY(&pagedep->pd_pendinghd)) 11580 return (0); 11581 if (!LIST_EMPTY(&pagedep->pd_jmvrefhd)) 11582 return (0); 11583 if (pagedep->pd_state & ONWORKLIST) 11584 WORKLIST_REMOVE(&pagedep->pd_list); 11585 LIST_REMOVE(pagedep, pd_hash); 11586 WORKITEM_FREE(pagedep, D_PAGEDEP); 11587 11588 return (1); 11589 } 11590 11591 /* 11592 * Called from within softdep_disk_write_complete above. 11593 * A write operation was just completed. Removed inodes can 11594 * now be freed and associated block pointers may be committed. 11595 * Note that this routine is always called from interrupt level 11596 * with further splbio interrupts blocked. 11597 */ 11598 static int 11599 handle_written_filepage(pagedep, bp) 11600 struct pagedep *pagedep; 11601 struct buf *bp; /* buffer containing the written page */ 11602 { 11603 struct dirrem *dirrem; 11604 struct diradd *dap, *nextdap; 11605 struct direct *ep; 11606 int i, chgs; 11607 11608 if ((pagedep->pd_state & IOSTARTED) == 0) 11609 panic("handle_written_filepage: not started"); 11610 pagedep->pd_state &= ~IOSTARTED; 11611 /* 11612 * Process any directory removals that have been committed. 11613 */ 11614 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 11615 LIST_REMOVE(dirrem, dm_next); 11616 dirrem->dm_state |= COMPLETE; 11617 dirrem->dm_dirinum = pagedep->pd_ino; 11618 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), 11619 ("handle_written_filepage: Journal entries not written.")); 11620 add_to_worklist(&dirrem->dm_list, 0); 11621 } 11622 /* 11623 * Free any directory additions that have been committed. 11624 * If it is a newly allocated block, we have to wait until 11625 * the on-disk directory inode claims the new block. 11626 */ 11627 if ((pagedep->pd_state & NEWBLOCK) == 0) 11628 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 11629 free_diradd(dap, NULL); 11630 /* 11631 * Uncommitted directory entries must be restored. 11632 */ 11633 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 11634 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 11635 dap = nextdap) { 11636 nextdap = LIST_NEXT(dap, da_pdlist); 11637 if (dap->da_state & ATTACHED) 11638 panic("handle_written_filepage: attached"); 11639 ep = (struct direct *) 11640 ((char *)bp->b_data + dap->da_offset); 11641 ep->d_ino = dap->da_newinum; 11642 dap->da_state &= ~UNDONE; 11643 dap->da_state |= ATTACHED; 11644 chgs = 1; 11645 /* 11646 * If the inode referenced by the directory has 11647 * been written out, then the dependency can be 11648 * moved to the pending list. 11649 */ 11650 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 11651 LIST_REMOVE(dap, da_pdlist); 11652 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 11653 da_pdlist); 11654 } 11655 } 11656 } 11657 /* 11658 * If there were any rollbacks in the directory, then it must be 11659 * marked dirty so that its will eventually get written back in 11660 * its correct form. 11661 */ 11662 if (chgs) { 11663 if ((bp->b_flags & B_DELWRI) == 0) 11664 stat_dir_entry++; 11665 bdirty(bp); 11666 return (1); 11667 } 11668 /* 11669 * If we are not waiting for a new directory block to be 11670 * claimed by its inode, then the pagedep will be freed. 11671 * Otherwise it will remain to track any new entries on 11672 * the page in case they are fsync'ed. 11673 */ 11674 free_pagedep(pagedep); 11675 return (0); 11676 } 11677 11678 /* 11679 * Writing back in-core inode structures. 11680 * 11681 * The filesystem only accesses an inode's contents when it occupies an 11682 * "in-core" inode structure. These "in-core" structures are separate from 11683 * the page frames used to cache inode blocks. Only the latter are 11684 * transferred to/from the disk. So, when the updated contents of the 11685 * "in-core" inode structure are copied to the corresponding in-memory inode 11686 * block, the dependencies are also transferred. The following procedure is 11687 * called when copying a dirty "in-core" inode to a cached inode block. 11688 */ 11689 11690 /* 11691 * Called when an inode is loaded from disk. If the effective link count 11692 * differed from the actual link count when it was last flushed, then we 11693 * need to ensure that the correct effective link count is put back. 11694 */ 11695 void 11696 softdep_load_inodeblock(ip) 11697 struct inode *ip; /* the "in_core" copy of the inode */ 11698 { 11699 struct inodedep *inodedep; 11700 11701 /* 11702 * Check for alternate nlink count. 11703 */ 11704 ip->i_effnlink = ip->i_nlink; 11705 ACQUIRE_LOCK(&lk); 11706 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 11707 &inodedep) == 0) { 11708 FREE_LOCK(&lk); 11709 return; 11710 } 11711 ip->i_effnlink -= inodedep->id_nlinkdelta; 11712 FREE_LOCK(&lk); 11713 } 11714 11715 /* 11716 * This routine is called just before the "in-core" inode 11717 * information is to be copied to the in-memory inode block. 11718 * Recall that an inode block contains several inodes. If 11719 * the force flag is set, then the dependencies will be 11720 * cleared so that the update can always be made. Note that 11721 * the buffer is locked when this routine is called, so we 11722 * will never be in the middle of writing the inode block 11723 * to disk. 11724 */ 11725 void 11726 softdep_update_inodeblock(ip, bp, waitfor) 11727 struct inode *ip; /* the "in_core" copy of the inode */ 11728 struct buf *bp; /* the buffer containing the inode block */ 11729 int waitfor; /* nonzero => update must be allowed */ 11730 { 11731 struct inodedep *inodedep; 11732 struct inoref *inoref; 11733 struct worklist *wk; 11734 struct mount *mp; 11735 struct buf *ibp; 11736 struct fs *fs; 11737 int error; 11738 11739 mp = UFSTOVFS(ip->i_ump); 11740 fs = ip->i_fs; 11741 /* 11742 * Preserve the freelink that is on disk. clear_unlinked_inodedep() 11743 * does not have access to the in-core ip so must write directly into 11744 * the inode block buffer when setting freelink. 11745 */ 11746 if (fs->fs_magic == FS_UFS1_MAGIC) 11747 DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data + 11748 ino_to_fsbo(fs, ip->i_number))->di_freelink); 11749 else 11750 DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data + 11751 ino_to_fsbo(fs, ip->i_number))->di_freelink); 11752 /* 11753 * If the effective link count is not equal to the actual link 11754 * count, then we must track the difference in an inodedep while 11755 * the inode is (potentially) tossed out of the cache. Otherwise, 11756 * if there is no existing inodedep, then there are no dependencies 11757 * to track. 11758 */ 11759 ACQUIRE_LOCK(&lk); 11760 again: 11761 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { 11762 FREE_LOCK(&lk); 11763 if (ip->i_effnlink != ip->i_nlink) 11764 panic("softdep_update_inodeblock: bad link count"); 11765 return; 11766 } 11767 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) 11768 panic("softdep_update_inodeblock: bad delta"); 11769 /* 11770 * If we're flushing all dependencies we must also move any waiting 11771 * for journal writes onto the bufwait list prior to I/O. 11772 */ 11773 if (waitfor) { 11774 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 11775 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 11776 == DEPCOMPLETE) { 11777 jwait(&inoref->if_list, MNT_WAIT); 11778 goto again; 11779 } 11780 } 11781 } 11782 /* 11783 * Changes have been initiated. Anything depending on these 11784 * changes cannot occur until this inode has been written. 11785 */ 11786 inodedep->id_state &= ~COMPLETE; 11787 if ((inodedep->id_state & ONWORKLIST) == 0) 11788 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 11789 /* 11790 * Any new dependencies associated with the incore inode must 11791 * now be moved to the list associated with the buffer holding 11792 * the in-memory copy of the inode. Once merged process any 11793 * allocdirects that are completed by the merger. 11794 */ 11795 merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); 11796 if (!TAILQ_EMPTY(&inodedep->id_inoupdt)) 11797 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt), 11798 NULL); 11799 merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); 11800 if (!TAILQ_EMPTY(&inodedep->id_extupdt)) 11801 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt), 11802 NULL); 11803 /* 11804 * Now that the inode has been pushed into the buffer, the 11805 * operations dependent on the inode being written to disk 11806 * can be moved to the id_bufwait so that they will be 11807 * processed when the buffer I/O completes. 11808 */ 11809 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 11810 WORKLIST_REMOVE(wk); 11811 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 11812 } 11813 /* 11814 * Newly allocated inodes cannot be written until the bitmap 11815 * that allocates them have been written (indicated by 11816 * DEPCOMPLETE being set in id_state). If we are doing a 11817 * forced sync (e.g., an fsync on a file), we force the bitmap 11818 * to be written so that the update can be done. 11819 */ 11820 if (waitfor == 0) { 11821 FREE_LOCK(&lk); 11822 return; 11823 } 11824 retry: 11825 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) { 11826 FREE_LOCK(&lk); 11827 return; 11828 } 11829 ibp = inodedep->id_bmsafemap->sm_buf; 11830 ibp = getdirtybuf(ibp, &lk, MNT_WAIT); 11831 if (ibp == NULL) { 11832 /* 11833 * If ibp came back as NULL, the dependency could have been 11834 * freed while we slept. Look it up again, and check to see 11835 * that it has completed. 11836 */ 11837 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) 11838 goto retry; 11839 FREE_LOCK(&lk); 11840 return; 11841 } 11842 FREE_LOCK(&lk); 11843 if ((error = bwrite(ibp)) != 0) 11844 softdep_error("softdep_update_inodeblock: bwrite", error); 11845 } 11846 11847 /* 11848 * Merge the a new inode dependency list (such as id_newinoupdt) into an 11849 * old inode dependency list (such as id_inoupdt). This routine must be 11850 * called with splbio interrupts blocked. 11851 */ 11852 static void 11853 merge_inode_lists(newlisthead, oldlisthead) 11854 struct allocdirectlst *newlisthead; 11855 struct allocdirectlst *oldlisthead; 11856 { 11857 struct allocdirect *listadp, *newadp; 11858 11859 newadp = TAILQ_FIRST(newlisthead); 11860 for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) { 11861 if (listadp->ad_offset < newadp->ad_offset) { 11862 listadp = TAILQ_NEXT(listadp, ad_next); 11863 continue; 11864 } 11865 TAILQ_REMOVE(newlisthead, newadp, ad_next); 11866 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 11867 if (listadp->ad_offset == newadp->ad_offset) { 11868 allocdirect_merge(oldlisthead, newadp, 11869 listadp); 11870 listadp = newadp; 11871 } 11872 newadp = TAILQ_FIRST(newlisthead); 11873 } 11874 while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) { 11875 TAILQ_REMOVE(newlisthead, newadp, ad_next); 11876 TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next); 11877 } 11878 } 11879 11880 /* 11881 * If we are doing an fsync, then we must ensure that any directory 11882 * entries for the inode have been written after the inode gets to disk. 11883 */ 11884 int 11885 softdep_fsync(vp) 11886 struct vnode *vp; /* the "in_core" copy of the inode */ 11887 { 11888 struct inodedep *inodedep; 11889 struct pagedep *pagedep; 11890 struct inoref *inoref; 11891 struct worklist *wk; 11892 struct diradd *dap; 11893 struct mount *mp; 11894 struct vnode *pvp; 11895 struct inode *ip; 11896 struct buf *bp; 11897 struct fs *fs; 11898 struct thread *td = curthread; 11899 int error, flushparent, pagedep_new_block; 11900 ino_t parentino; 11901 ufs_lbn_t lbn; 11902 11903 ip = VTOI(vp); 11904 fs = ip->i_fs; 11905 mp = vp->v_mount; 11906 ACQUIRE_LOCK(&lk); 11907 restart: 11908 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { 11909 FREE_LOCK(&lk); 11910 return (0); 11911 } 11912 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 11913 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 11914 == DEPCOMPLETE) { 11915 jwait(&inoref->if_list, MNT_WAIT); 11916 goto restart; 11917 } 11918 } 11919 if (!LIST_EMPTY(&inodedep->id_inowait) || 11920 !TAILQ_EMPTY(&inodedep->id_extupdt) || 11921 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 11922 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 11923 !TAILQ_EMPTY(&inodedep->id_newinoupdt)) 11924 panic("softdep_fsync: pending ops %p", inodedep); 11925 for (error = 0, flushparent = 0; ; ) { 11926 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 11927 break; 11928 if (wk->wk_type != D_DIRADD) 11929 panic("softdep_fsync: Unexpected type %s", 11930 TYPENAME(wk->wk_type)); 11931 dap = WK_DIRADD(wk); 11932 /* 11933 * Flush our parent if this directory entry has a MKDIR_PARENT 11934 * dependency or is contained in a newly allocated block. 11935 */ 11936 if (dap->da_state & DIRCHG) 11937 pagedep = dap->da_previous->dm_pagedep; 11938 else 11939 pagedep = dap->da_pagedep; 11940 parentino = pagedep->pd_ino; 11941 lbn = pagedep->pd_lbn; 11942 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) 11943 panic("softdep_fsync: dirty"); 11944 if ((dap->da_state & MKDIR_PARENT) || 11945 (pagedep->pd_state & NEWBLOCK)) 11946 flushparent = 1; 11947 else 11948 flushparent = 0; 11949 /* 11950 * If we are being fsync'ed as part of vgone'ing this vnode, 11951 * then we will not be able to release and recover the 11952 * vnode below, so we just have to give up on writing its 11953 * directory entry out. It will eventually be written, just 11954 * not now, but then the user was not asking to have it 11955 * written, so we are not breaking any promises. 11956 */ 11957 if (vp->v_iflag & VI_DOOMED) 11958 break; 11959 /* 11960 * We prevent deadlock by always fetching inodes from the 11961 * root, moving down the directory tree. Thus, when fetching 11962 * our parent directory, we first try to get the lock. If 11963 * that fails, we must unlock ourselves before requesting 11964 * the lock on our parent. See the comment in ufs_lookup 11965 * for details on possible races. 11966 */ 11967 FREE_LOCK(&lk); 11968 if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp, 11969 FFSV_FORCEINSMQ)) { 11970 error = vfs_busy(mp, MBF_NOWAIT); 11971 if (error != 0) { 11972 vfs_ref(mp); 11973 VOP_UNLOCK(vp, 0); 11974 error = vfs_busy(mp, 0); 11975 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 11976 vfs_rel(mp); 11977 if (error != 0) 11978 return (ENOENT); 11979 if (vp->v_iflag & VI_DOOMED) { 11980 vfs_unbusy(mp); 11981 return (ENOENT); 11982 } 11983 } 11984 VOP_UNLOCK(vp, 0); 11985 error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE, 11986 &pvp, FFSV_FORCEINSMQ); 11987 vfs_unbusy(mp); 11988 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 11989 if (vp->v_iflag & VI_DOOMED) { 11990 if (error == 0) 11991 vput(pvp); 11992 error = ENOENT; 11993 } 11994 if (error != 0) 11995 return (error); 11996 } 11997 /* 11998 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps 11999 * that are contained in direct blocks will be resolved by 12000 * doing a ffs_update. Pagedeps contained in indirect blocks 12001 * may require a complete sync'ing of the directory. So, we 12002 * try the cheap and fast ffs_update first, and if that fails, 12003 * then we do the slower ffs_syncvnode of the directory. 12004 */ 12005 if (flushparent) { 12006 int locked; 12007 12008 if ((error = ffs_update(pvp, 1)) != 0) { 12009 vput(pvp); 12010 return (error); 12011 } 12012 ACQUIRE_LOCK(&lk); 12013 locked = 1; 12014 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) { 12015 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) { 12016 if (wk->wk_type != D_DIRADD) 12017 panic("softdep_fsync: Unexpected type %s", 12018 TYPENAME(wk->wk_type)); 12019 dap = WK_DIRADD(wk); 12020 if (dap->da_state & DIRCHG) 12021 pagedep = dap->da_previous->dm_pagedep; 12022 else 12023 pagedep = dap->da_pagedep; 12024 pagedep_new_block = pagedep->pd_state & NEWBLOCK; 12025 FREE_LOCK(&lk); 12026 locked = 0; 12027 if (pagedep_new_block && (error = 12028 ffs_syncvnode(pvp, MNT_WAIT, 0))) { 12029 vput(pvp); 12030 return (error); 12031 } 12032 } 12033 } 12034 if (locked) 12035 FREE_LOCK(&lk); 12036 } 12037 /* 12038 * Flush directory page containing the inode's name. 12039 */ 12040 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, 12041 &bp); 12042 if (error == 0) 12043 error = bwrite(bp); 12044 else 12045 brelse(bp); 12046 vput(pvp); 12047 if (error != 0) 12048 return (error); 12049 ACQUIRE_LOCK(&lk); 12050 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 12051 break; 12052 } 12053 FREE_LOCK(&lk); 12054 return (0); 12055 } 12056 12057 /* 12058 * Flush all the dirty bitmaps associated with the block device 12059 * before flushing the rest of the dirty blocks so as to reduce 12060 * the number of dependencies that will have to be rolled back. 12061 * 12062 * XXX Unused? 12063 */ 12064 void 12065 softdep_fsync_mountdev(vp) 12066 struct vnode *vp; 12067 { 12068 struct buf *bp, *nbp; 12069 struct worklist *wk; 12070 struct bufobj *bo; 12071 12072 if (!vn_isdisk(vp, NULL)) 12073 panic("softdep_fsync_mountdev: vnode not a disk"); 12074 bo = &vp->v_bufobj; 12075 restart: 12076 BO_LOCK(bo); 12077 ACQUIRE_LOCK(&lk); 12078 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 12079 /* 12080 * If it is already scheduled, skip to the next buffer. 12081 */ 12082 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 12083 continue; 12084 12085 if ((bp->b_flags & B_DELWRI) == 0) 12086 panic("softdep_fsync_mountdev: not dirty"); 12087 /* 12088 * We are only interested in bitmaps with outstanding 12089 * dependencies. 12090 */ 12091 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 12092 wk->wk_type != D_BMSAFEMAP || 12093 (bp->b_vflags & BV_BKGRDINPROG)) { 12094 BUF_UNLOCK(bp); 12095 continue; 12096 } 12097 FREE_LOCK(&lk); 12098 BO_UNLOCK(bo); 12099 bremfree(bp); 12100 (void) bawrite(bp); 12101 goto restart; 12102 } 12103 FREE_LOCK(&lk); 12104 drain_output(vp); 12105 BO_UNLOCK(bo); 12106 } 12107 12108 /* 12109 * Sync all cylinder groups that were dirty at the time this function is 12110 * called. Newly dirtied cgs will be inserted before the sintenel. This 12111 * is used to flush freedep activity that may be holding up writes to a 12112 * indirect block. 12113 */ 12114 static int 12115 sync_cgs(mp, waitfor) 12116 struct mount *mp; 12117 int waitfor; 12118 { 12119 struct bmsafemap *bmsafemap; 12120 struct bmsafemap *sintenel; 12121 struct ufsmount *ump; 12122 struct buf *bp; 12123 int error; 12124 12125 sintenel = malloc(sizeof(*sintenel), M_BMSAFEMAP, M_ZERO | M_WAITOK); 12126 sintenel->sm_cg = -1; 12127 ump = VFSTOUFS(mp); 12128 error = 0; 12129 ACQUIRE_LOCK(&lk); 12130 LIST_INSERT_HEAD(&ump->softdep_dirtycg, sintenel, sm_next); 12131 for (bmsafemap = LIST_NEXT(sintenel, sm_next); bmsafemap != NULL; 12132 bmsafemap = LIST_NEXT(sintenel, sm_next)) { 12133 /* Skip sintenels and cgs with no work to release. */ 12134 if (bmsafemap->sm_cg == -1 || 12135 (LIST_EMPTY(&bmsafemap->sm_freehd) && 12136 LIST_EMPTY(&bmsafemap->sm_freewr))) { 12137 LIST_REMOVE(sintenel, sm_next); 12138 LIST_INSERT_AFTER(bmsafemap, sintenel, sm_next); 12139 continue; 12140 } 12141 /* 12142 * If we don't get the lock and we're waiting try again, if 12143 * not move on to the next buf and try to sync it. 12144 */ 12145 bp = getdirtybuf(bmsafemap->sm_buf, &lk, waitfor); 12146 if (bp == NULL && waitfor == MNT_WAIT) 12147 continue; 12148 LIST_REMOVE(sintenel, sm_next); 12149 LIST_INSERT_AFTER(bmsafemap, sintenel, sm_next); 12150 if (bp == NULL) 12151 continue; 12152 FREE_LOCK(&lk); 12153 if (waitfor == MNT_NOWAIT) 12154 bawrite(bp); 12155 else 12156 error = bwrite(bp); 12157 ACQUIRE_LOCK(&lk); 12158 if (error) 12159 break; 12160 } 12161 LIST_REMOVE(sintenel, sm_next); 12162 FREE_LOCK(&lk); 12163 free(sintenel, M_BMSAFEMAP); 12164 return (error); 12165 } 12166 12167 /* 12168 * This routine is called when we are trying to synchronously flush a 12169 * file. This routine must eliminate any filesystem metadata dependencies 12170 * so that the syncing routine can succeed. 12171 */ 12172 int 12173 softdep_sync_metadata(struct vnode *vp) 12174 { 12175 int error; 12176 12177 /* 12178 * Ensure that any direct block dependencies have been cleared, 12179 * truncations are started, and inode references are journaled. 12180 */ 12181 ACQUIRE_LOCK(&lk); 12182 /* 12183 * Write all journal records to prevent rollbacks on devvp. 12184 */ 12185 if (vp->v_type == VCHR) 12186 softdep_flushjournal(vp->v_mount); 12187 error = flush_inodedep_deps(vp, vp->v_mount, VTOI(vp)->i_number); 12188 /* 12189 * Ensure that all truncates are written so we won't find deps on 12190 * indirect blocks. 12191 */ 12192 process_truncates(vp); 12193 FREE_LOCK(&lk); 12194 12195 return (error); 12196 } 12197 12198 /* 12199 * This routine is called when we are attempting to sync a buf with 12200 * dependencies. If waitfor is MNT_NOWAIT it attempts to schedule any 12201 * other IO it can but returns EBUSY if the buffer is not yet able to 12202 * be written. Dependencies which will not cause rollbacks will always 12203 * return 0. 12204 */ 12205 int 12206 softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) 12207 { 12208 struct indirdep *indirdep; 12209 struct pagedep *pagedep; 12210 struct allocindir *aip; 12211 struct newblk *newblk; 12212 struct buf *nbp; 12213 struct worklist *wk; 12214 int i, error; 12215 12216 /* 12217 * For VCHR we just don't want to force flush any dependencies that 12218 * will cause rollbacks. 12219 */ 12220 if (vp->v_type == VCHR) { 12221 if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0)) 12222 return (EBUSY); 12223 return (0); 12224 } 12225 ACQUIRE_LOCK(&lk); 12226 /* 12227 * As we hold the buffer locked, none of its dependencies 12228 * will disappear. 12229 */ 12230 error = 0; 12231 top: 12232 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 12233 switch (wk->wk_type) { 12234 12235 case D_ALLOCDIRECT: 12236 case D_ALLOCINDIR: 12237 newblk = WK_NEWBLK(wk); 12238 if (newblk->nb_jnewblk != NULL) { 12239 if (waitfor == MNT_NOWAIT) { 12240 error = EBUSY; 12241 goto out_unlock; 12242 } 12243 jwait(&newblk->nb_jnewblk->jn_list, waitfor); 12244 goto top; 12245 } 12246 if (newblk->nb_state & DEPCOMPLETE || 12247 waitfor == MNT_NOWAIT) 12248 continue; 12249 nbp = newblk->nb_bmsafemap->sm_buf; 12250 nbp = getdirtybuf(nbp, &lk, waitfor); 12251 if (nbp == NULL) 12252 goto top; 12253 FREE_LOCK(&lk); 12254 if ((error = bwrite(nbp)) != 0) 12255 goto out; 12256 ACQUIRE_LOCK(&lk); 12257 continue; 12258 12259 case D_INDIRDEP: 12260 indirdep = WK_INDIRDEP(wk); 12261 if (waitfor == MNT_NOWAIT) { 12262 if (!TAILQ_EMPTY(&indirdep->ir_trunc) || 12263 !LIST_EMPTY(&indirdep->ir_deplisthd)) { 12264 error = EBUSY; 12265 goto out_unlock; 12266 } 12267 } 12268 if (!TAILQ_EMPTY(&indirdep->ir_trunc)) 12269 panic("softdep_sync_buf: truncation pending."); 12270 restart: 12271 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 12272 newblk = (struct newblk *)aip; 12273 if (newblk->nb_jnewblk != NULL) { 12274 jwait(&newblk->nb_jnewblk->jn_list, 12275 waitfor); 12276 goto restart; 12277 } 12278 if (newblk->nb_state & DEPCOMPLETE) 12279 continue; 12280 nbp = newblk->nb_bmsafemap->sm_buf; 12281 nbp = getdirtybuf(nbp, &lk, waitfor); 12282 if (nbp == NULL) 12283 goto restart; 12284 FREE_LOCK(&lk); 12285 if ((error = bwrite(nbp)) != 0) 12286 goto out; 12287 ACQUIRE_LOCK(&lk); 12288 goto restart; 12289 } 12290 continue; 12291 12292 case D_PAGEDEP: 12293 /* 12294 * Only flush directory entries in synchronous passes. 12295 */ 12296 if (waitfor != MNT_WAIT) { 12297 error = EBUSY; 12298 goto out_unlock; 12299 } 12300 /* 12301 * While syncing snapshots, we must allow recursive 12302 * lookups. 12303 */ 12304 BUF_AREC(bp); 12305 /* 12306 * We are trying to sync a directory that may 12307 * have dependencies on both its own metadata 12308 * and/or dependencies on the inodes of any 12309 * recently allocated files. We walk its diradd 12310 * lists pushing out the associated inode. 12311 */ 12312 pagedep = WK_PAGEDEP(wk); 12313 for (i = 0; i < DAHASHSZ; i++) { 12314 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 12315 continue; 12316 if ((error = flush_pagedep_deps(vp, wk->wk_mp, 12317 &pagedep->pd_diraddhd[i]))) { 12318 BUF_NOREC(bp); 12319 goto out_unlock; 12320 } 12321 } 12322 BUF_NOREC(bp); 12323 continue; 12324 12325 case D_FREEWORK: 12326 case D_FREEDEP: 12327 case D_JSEGDEP: 12328 case D_JNEWBLK: 12329 continue; 12330 12331 default: 12332 panic("softdep_sync_buf: Unknown type %s", 12333 TYPENAME(wk->wk_type)); 12334 /* NOTREACHED */ 12335 } 12336 } 12337 out_unlock: 12338 FREE_LOCK(&lk); 12339 out: 12340 return (error); 12341 } 12342 12343 /* 12344 * Flush the dependencies associated with an inodedep. 12345 * Called with splbio blocked. 12346 */ 12347 static int 12348 flush_inodedep_deps(vp, mp, ino) 12349 struct vnode *vp; 12350 struct mount *mp; 12351 ino_t ino; 12352 { 12353 struct inodedep *inodedep; 12354 struct inoref *inoref; 12355 int error, waitfor; 12356 12357 /* 12358 * This work is done in two passes. The first pass grabs most 12359 * of the buffers and begins asynchronously writing them. The 12360 * only way to wait for these asynchronous writes is to sleep 12361 * on the filesystem vnode which may stay busy for a long time 12362 * if the filesystem is active. So, instead, we make a second 12363 * pass over the dependencies blocking on each write. In the 12364 * usual case we will be blocking against a write that we 12365 * initiated, so when it is done the dependency will have been 12366 * resolved. Thus the second pass is expected to end quickly. 12367 * We give a brief window at the top of the loop to allow 12368 * any pending I/O to complete. 12369 */ 12370 for (error = 0, waitfor = MNT_NOWAIT; ; ) { 12371 if (error) 12372 return (error); 12373 FREE_LOCK(&lk); 12374 ACQUIRE_LOCK(&lk); 12375 restart: 12376 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) 12377 return (0); 12378 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12379 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12380 == DEPCOMPLETE) { 12381 jwait(&inoref->if_list, MNT_WAIT); 12382 goto restart; 12383 } 12384 } 12385 if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) || 12386 flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) || 12387 flush_deplist(&inodedep->id_extupdt, waitfor, &error) || 12388 flush_deplist(&inodedep->id_newextupdt, waitfor, &error)) 12389 continue; 12390 /* 12391 * If pass2, we are done, otherwise do pass 2. 12392 */ 12393 if (waitfor == MNT_WAIT) 12394 break; 12395 waitfor = MNT_WAIT; 12396 } 12397 /* 12398 * Try freeing inodedep in case all dependencies have been removed. 12399 */ 12400 if (inodedep_lookup(mp, ino, 0, &inodedep) != 0) 12401 (void) free_inodedep(inodedep); 12402 return (0); 12403 } 12404 12405 /* 12406 * Flush an inode dependency list. 12407 * Called with splbio blocked. 12408 */ 12409 static int 12410 flush_deplist(listhead, waitfor, errorp) 12411 struct allocdirectlst *listhead; 12412 int waitfor; 12413 int *errorp; 12414 { 12415 struct allocdirect *adp; 12416 struct newblk *newblk; 12417 struct buf *bp; 12418 12419 mtx_assert(&lk, MA_OWNED); 12420 TAILQ_FOREACH(adp, listhead, ad_next) { 12421 newblk = (struct newblk *)adp; 12422 if (newblk->nb_jnewblk != NULL) { 12423 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 12424 return (1); 12425 } 12426 if (newblk->nb_state & DEPCOMPLETE) 12427 continue; 12428 bp = newblk->nb_bmsafemap->sm_buf; 12429 bp = getdirtybuf(bp, &lk, waitfor); 12430 if (bp == NULL) { 12431 if (waitfor == MNT_NOWAIT) 12432 continue; 12433 return (1); 12434 } 12435 FREE_LOCK(&lk); 12436 if (waitfor == MNT_NOWAIT) 12437 bawrite(bp); 12438 else 12439 *errorp = bwrite(bp); 12440 ACQUIRE_LOCK(&lk); 12441 return (1); 12442 } 12443 return (0); 12444 } 12445 12446 /* 12447 * Flush dependencies associated with an allocdirect block. 12448 */ 12449 static int 12450 flush_newblk_dep(vp, mp, lbn) 12451 struct vnode *vp; 12452 struct mount *mp; 12453 ufs_lbn_t lbn; 12454 { 12455 struct newblk *newblk; 12456 struct bufobj *bo; 12457 struct inode *ip; 12458 struct buf *bp; 12459 ufs2_daddr_t blkno; 12460 int error; 12461 12462 error = 0; 12463 bo = &vp->v_bufobj; 12464 ip = VTOI(vp); 12465 blkno = DIP(ip, i_db[lbn]); 12466 if (blkno == 0) 12467 panic("flush_newblk_dep: Missing block"); 12468 ACQUIRE_LOCK(&lk); 12469 /* 12470 * Loop until all dependencies related to this block are satisfied. 12471 * We must be careful to restart after each sleep in case a write 12472 * completes some part of this process for us. 12473 */ 12474 for (;;) { 12475 if (newblk_lookup(mp, blkno, 0, &newblk) == 0) { 12476 FREE_LOCK(&lk); 12477 break; 12478 } 12479 if (newblk->nb_list.wk_type != D_ALLOCDIRECT) 12480 panic("flush_newblk_deps: Bad newblk %p", newblk); 12481 /* 12482 * Flush the journal. 12483 */ 12484 if (newblk->nb_jnewblk != NULL) { 12485 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 12486 continue; 12487 } 12488 /* 12489 * Write the bitmap dependency. 12490 */ 12491 if ((newblk->nb_state & DEPCOMPLETE) == 0) { 12492 bp = newblk->nb_bmsafemap->sm_buf; 12493 bp = getdirtybuf(bp, &lk, MNT_WAIT); 12494 if (bp == NULL) 12495 continue; 12496 FREE_LOCK(&lk); 12497 error = bwrite(bp); 12498 if (error) 12499 break; 12500 ACQUIRE_LOCK(&lk); 12501 continue; 12502 } 12503 /* 12504 * Write the buffer. 12505 */ 12506 FREE_LOCK(&lk); 12507 BO_LOCK(bo); 12508 bp = gbincore(bo, lbn); 12509 if (bp != NULL) { 12510 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 12511 LK_INTERLOCK, BO_MTX(bo)); 12512 if (error == ENOLCK) { 12513 ACQUIRE_LOCK(&lk); 12514 continue; /* Slept, retry */ 12515 } 12516 if (error != 0) 12517 break; /* Failed */ 12518 if (bp->b_flags & B_DELWRI) { 12519 bremfree(bp); 12520 error = bwrite(bp); 12521 if (error) 12522 break; 12523 } else 12524 BUF_UNLOCK(bp); 12525 } else 12526 BO_UNLOCK(bo); 12527 /* 12528 * We have to wait for the direct pointers to 12529 * point at the newdirblk before the dependency 12530 * will go away. 12531 */ 12532 error = ffs_update(vp, 1); 12533 if (error) 12534 break; 12535 ACQUIRE_LOCK(&lk); 12536 } 12537 return (error); 12538 } 12539 12540 /* 12541 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 12542 * Called with splbio blocked. 12543 */ 12544 static int 12545 flush_pagedep_deps(pvp, mp, diraddhdp) 12546 struct vnode *pvp; 12547 struct mount *mp; 12548 struct diraddhd *diraddhdp; 12549 { 12550 struct inodedep *inodedep; 12551 struct inoref *inoref; 12552 struct ufsmount *ump; 12553 struct diradd *dap; 12554 struct vnode *vp; 12555 int error = 0; 12556 struct buf *bp; 12557 ino_t inum; 12558 12559 ump = VFSTOUFS(mp); 12560 restart: 12561 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 12562 /* 12563 * Flush ourselves if this directory entry 12564 * has a MKDIR_PARENT dependency. 12565 */ 12566 if (dap->da_state & MKDIR_PARENT) { 12567 FREE_LOCK(&lk); 12568 if ((error = ffs_update(pvp, 1)) != 0) 12569 break; 12570 ACQUIRE_LOCK(&lk); 12571 /* 12572 * If that cleared dependencies, go on to next. 12573 */ 12574 if (dap != LIST_FIRST(diraddhdp)) 12575 continue; 12576 if (dap->da_state & MKDIR_PARENT) 12577 panic("flush_pagedep_deps: MKDIR_PARENT"); 12578 } 12579 /* 12580 * A newly allocated directory must have its "." and 12581 * ".." entries written out before its name can be 12582 * committed in its parent. 12583 */ 12584 inum = dap->da_newinum; 12585 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) 12586 panic("flush_pagedep_deps: lost inode1"); 12587 /* 12588 * Wait for any pending journal adds to complete so we don't 12589 * cause rollbacks while syncing. 12590 */ 12591 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12592 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12593 == DEPCOMPLETE) { 12594 jwait(&inoref->if_list, MNT_WAIT); 12595 goto restart; 12596 } 12597 } 12598 if (dap->da_state & MKDIR_BODY) { 12599 FREE_LOCK(&lk); 12600 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, 12601 FFSV_FORCEINSMQ))) 12602 break; 12603 error = flush_newblk_dep(vp, mp, 0); 12604 /* 12605 * If we still have the dependency we might need to 12606 * update the vnode to sync the new link count to 12607 * disk. 12608 */ 12609 if (error == 0 && dap == LIST_FIRST(diraddhdp)) 12610 error = ffs_update(vp, 1); 12611 vput(vp); 12612 if (error != 0) 12613 break; 12614 ACQUIRE_LOCK(&lk); 12615 /* 12616 * If that cleared dependencies, go on to next. 12617 */ 12618 if (dap != LIST_FIRST(diraddhdp)) 12619 continue; 12620 if (dap->da_state & MKDIR_BODY) { 12621 inodedep_lookup(UFSTOVFS(ump), inum, 0, 12622 &inodedep); 12623 panic("flush_pagedep_deps: MKDIR_BODY " 12624 "inodedep %p dap %p vp %p", 12625 inodedep, dap, vp); 12626 } 12627 } 12628 /* 12629 * Flush the inode on which the directory entry depends. 12630 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 12631 * the only remaining dependency is that the updated inode 12632 * count must get pushed to disk. The inode has already 12633 * been pushed into its inode buffer (via VOP_UPDATE) at 12634 * the time of the reference count change. So we need only 12635 * locate that buffer, ensure that there will be no rollback 12636 * caused by a bitmap dependency, then write the inode buffer. 12637 */ 12638 retry: 12639 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) 12640 panic("flush_pagedep_deps: lost inode"); 12641 /* 12642 * If the inode still has bitmap dependencies, 12643 * push them to disk. 12644 */ 12645 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) { 12646 bp = inodedep->id_bmsafemap->sm_buf; 12647 bp = getdirtybuf(bp, &lk, MNT_WAIT); 12648 if (bp == NULL) 12649 goto retry; 12650 FREE_LOCK(&lk); 12651 if ((error = bwrite(bp)) != 0) 12652 break; 12653 ACQUIRE_LOCK(&lk); 12654 if (dap != LIST_FIRST(diraddhdp)) 12655 continue; 12656 } 12657 /* 12658 * If the inode is still sitting in a buffer waiting 12659 * to be written or waiting for the link count to be 12660 * adjusted update it here to flush it to disk. 12661 */ 12662 if (dap == LIST_FIRST(diraddhdp)) { 12663 FREE_LOCK(&lk); 12664 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, 12665 FFSV_FORCEINSMQ))) 12666 break; 12667 error = ffs_update(vp, 1); 12668 vput(vp); 12669 if (error) 12670 break; 12671 ACQUIRE_LOCK(&lk); 12672 } 12673 /* 12674 * If we have failed to get rid of all the dependencies 12675 * then something is seriously wrong. 12676 */ 12677 if (dap == LIST_FIRST(diraddhdp)) { 12678 inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep); 12679 panic("flush_pagedep_deps: failed to flush " 12680 "inodedep %p ino %ju dap %p", 12681 inodedep, (uintmax_t)inum, dap); 12682 } 12683 } 12684 if (error) 12685 ACQUIRE_LOCK(&lk); 12686 return (error); 12687 } 12688 12689 /* 12690 * A large burst of file addition or deletion activity can drive the 12691 * memory load excessively high. First attempt to slow things down 12692 * using the techniques below. If that fails, this routine requests 12693 * the offending operations to fall back to running synchronously 12694 * until the memory load returns to a reasonable level. 12695 */ 12696 int 12697 softdep_slowdown(vp) 12698 struct vnode *vp; 12699 { 12700 struct ufsmount *ump; 12701 int jlow; 12702 int max_softdeps_hard; 12703 12704 ACQUIRE_LOCK(&lk); 12705 jlow = 0; 12706 /* 12707 * Check for journal space if needed. 12708 */ 12709 if (DOINGSUJ(vp)) { 12710 ump = VFSTOUFS(vp->v_mount); 12711 if (journal_space(ump, 0) == 0) 12712 jlow = 1; 12713 } 12714 max_softdeps_hard = max_softdeps * 11 / 10; 12715 if (dep_current[D_DIRREM] < max_softdeps_hard / 2 && 12716 dep_current[D_INODEDEP] < max_softdeps_hard && 12717 VFSTOUFS(vp->v_mount)->um_numindirdeps < maxindirdeps && 12718 dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0) { 12719 FREE_LOCK(&lk); 12720 return (0); 12721 } 12722 if (VFSTOUFS(vp->v_mount)->um_numindirdeps >= maxindirdeps || jlow) 12723 softdep_speedup(); 12724 stat_sync_limit_hit += 1; 12725 FREE_LOCK(&lk); 12726 if (DOINGSUJ(vp)) 12727 return (0); 12728 return (1); 12729 } 12730 12731 /* 12732 * Called by the allocation routines when they are about to fail 12733 * in the hope that we can free up the requested resource (inodes 12734 * or disk space). 12735 * 12736 * First check to see if the work list has anything on it. If it has, 12737 * clean up entries until we successfully free the requested resource. 12738 * Because this process holds inodes locked, we cannot handle any remove 12739 * requests that might block on a locked inode as that could lead to 12740 * deadlock. If the worklist yields none of the requested resource, 12741 * start syncing out vnodes to free up the needed space. 12742 */ 12743 int 12744 softdep_request_cleanup(fs, vp, cred, resource) 12745 struct fs *fs; 12746 struct vnode *vp; 12747 struct ucred *cred; 12748 int resource; 12749 { 12750 struct ufsmount *ump; 12751 struct mount *mp; 12752 struct vnode *lvp, *mvp; 12753 long starttime; 12754 ufs2_daddr_t needed; 12755 int error; 12756 12757 /* 12758 * If we are being called because of a process doing a 12759 * copy-on-write, then it is not safe to process any 12760 * worklist items as we will recurse into the copyonwrite 12761 * routine. This will result in an incoherent snapshot. 12762 * If the vnode that we hold is a snapshot, we must avoid 12763 * handling other resources that could cause deadlock. 12764 */ 12765 if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp))) 12766 return (0); 12767 12768 if (resource == FLUSH_BLOCKS_WAIT) 12769 stat_cleanup_blkrequests += 1; 12770 else 12771 stat_cleanup_inorequests += 1; 12772 12773 mp = vp->v_mount; 12774 ump = VFSTOUFS(mp); 12775 mtx_assert(UFS_MTX(ump), MA_OWNED); 12776 UFS_UNLOCK(ump); 12777 error = ffs_update(vp, 1); 12778 if (error != 0) { 12779 UFS_LOCK(ump); 12780 return (0); 12781 } 12782 /* 12783 * If we are in need of resources, consider pausing for 12784 * tickdelay to give ourselves some breathing room. 12785 */ 12786 ACQUIRE_LOCK(&lk); 12787 process_removes(vp); 12788 process_truncates(vp); 12789 request_cleanup(UFSTOVFS(ump), resource); 12790 FREE_LOCK(&lk); 12791 /* 12792 * Now clean up at least as many resources as we will need. 12793 * 12794 * When requested to clean up inodes, the number that are needed 12795 * is set by the number of simultaneous writers (mnt_writeopcount) 12796 * plus a bit of slop (2) in case some more writers show up while 12797 * we are cleaning. 12798 * 12799 * When requested to free up space, the amount of space that 12800 * we need is enough blocks to allocate a full-sized segment 12801 * (fs_contigsumsize). The number of such segments that will 12802 * be needed is set by the number of simultaneous writers 12803 * (mnt_writeopcount) plus a bit of slop (2) in case some more 12804 * writers show up while we are cleaning. 12805 * 12806 * Additionally, if we are unpriviledged and allocating space, 12807 * we need to ensure that we clean up enough blocks to get the 12808 * needed number of blocks over the threshhold of the minimum 12809 * number of blocks required to be kept free by the filesystem 12810 * (fs_minfree). 12811 */ 12812 if (resource == FLUSH_INODES_WAIT) { 12813 needed = vp->v_mount->mnt_writeopcount + 2; 12814 } else if (resource == FLUSH_BLOCKS_WAIT) { 12815 needed = (vp->v_mount->mnt_writeopcount + 2) * 12816 fs->fs_contigsumsize; 12817 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0)) 12818 needed += fragstoblks(fs, 12819 roundup((fs->fs_dsize * fs->fs_minfree / 100) - 12820 fs->fs_cstotal.cs_nffree, fs->fs_frag)); 12821 } else { 12822 UFS_LOCK(ump); 12823 printf("softdep_request_cleanup: Unknown resource type %d\n", 12824 resource); 12825 return (0); 12826 } 12827 starttime = time_second; 12828 retry: 12829 if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 && 12830 fs->fs_cstotal.cs_nbfree <= needed) || 12831 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && 12832 fs->fs_cstotal.cs_nifree <= needed)) { 12833 ACQUIRE_LOCK(&lk); 12834 if (ump->softdep_on_worklist > 0 && 12835 process_worklist_item(UFSTOVFS(ump), 12836 ump->softdep_on_worklist, LK_NOWAIT) != 0) 12837 stat_worklist_push += 1; 12838 FREE_LOCK(&lk); 12839 } 12840 /* 12841 * If we still need resources and there are no more worklist 12842 * entries to process to obtain them, we have to start flushing 12843 * the dirty vnodes to force the release of additional requests 12844 * to the worklist that we can then process to reap addition 12845 * resources. We walk the vnodes associated with the mount point 12846 * until we get the needed worklist requests that we can reap. 12847 */ 12848 if ((resource == FLUSH_BLOCKS_WAIT && 12849 fs->fs_cstotal.cs_nbfree <= needed) || 12850 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && 12851 fs->fs_cstotal.cs_nifree <= needed)) { 12852 MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) { 12853 if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) { 12854 VI_UNLOCK(lvp); 12855 continue; 12856 } 12857 if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT, 12858 curthread)) 12859 continue; 12860 if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */ 12861 vput(lvp); 12862 continue; 12863 } 12864 (void) ffs_syncvnode(lvp, MNT_NOWAIT, 0); 12865 vput(lvp); 12866 } 12867 lvp = ump->um_devvp; 12868 if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 12869 VOP_FSYNC(lvp, MNT_NOWAIT, curthread); 12870 VOP_UNLOCK(lvp, 0); 12871 } 12872 if (ump->softdep_on_worklist > 0) { 12873 stat_cleanup_retries += 1; 12874 goto retry; 12875 } 12876 stat_cleanup_failures += 1; 12877 } 12878 if (time_second - starttime > stat_cleanup_high_delay) 12879 stat_cleanup_high_delay = time_second - starttime; 12880 UFS_LOCK(ump); 12881 return (1); 12882 } 12883 12884 /* 12885 * If memory utilization has gotten too high, deliberately slow things 12886 * down and speed up the I/O processing. 12887 */ 12888 extern struct thread *syncertd; 12889 static int 12890 request_cleanup(mp, resource) 12891 struct mount *mp; 12892 int resource; 12893 { 12894 struct thread *td = curthread; 12895 struct ufsmount *ump; 12896 12897 mtx_assert(&lk, MA_OWNED); 12898 /* 12899 * We never hold up the filesystem syncer or buf daemon. 12900 */ 12901 if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF)) 12902 return (0); 12903 ump = VFSTOUFS(mp); 12904 /* 12905 * First check to see if the work list has gotten backlogged. 12906 * If it has, co-opt this process to help clean up two entries. 12907 * Because this process may hold inodes locked, we cannot 12908 * handle any remove requests that might block on a locked 12909 * inode as that could lead to deadlock. We set TDP_SOFTDEP 12910 * to avoid recursively processing the worklist. 12911 */ 12912 if (ump->softdep_on_worklist > max_softdeps / 10) { 12913 td->td_pflags |= TDP_SOFTDEP; 12914 process_worklist_item(mp, 2, LK_NOWAIT); 12915 td->td_pflags &= ~TDP_SOFTDEP; 12916 stat_worklist_push += 2; 12917 return(1); 12918 } 12919 /* 12920 * Next, we attempt to speed up the syncer process. If that 12921 * is successful, then we allow the process to continue. 12922 */ 12923 if (softdep_speedup() && 12924 resource != FLUSH_BLOCKS_WAIT && 12925 resource != FLUSH_INODES_WAIT) 12926 return(0); 12927 /* 12928 * If we are resource constrained on inode dependencies, try 12929 * flushing some dirty inodes. Otherwise, we are constrained 12930 * by file deletions, so try accelerating flushes of directories 12931 * with removal dependencies. We would like to do the cleanup 12932 * here, but we probably hold an inode locked at this point and 12933 * that might deadlock against one that we try to clean. So, 12934 * the best that we can do is request the syncer daemon to do 12935 * the cleanup for us. 12936 */ 12937 switch (resource) { 12938 12939 case FLUSH_INODES: 12940 case FLUSH_INODES_WAIT: 12941 stat_ino_limit_push += 1; 12942 req_clear_inodedeps += 1; 12943 stat_countp = &stat_ino_limit_hit; 12944 break; 12945 12946 case FLUSH_BLOCKS: 12947 case FLUSH_BLOCKS_WAIT: 12948 stat_blk_limit_push += 1; 12949 req_clear_remove += 1; 12950 stat_countp = &stat_blk_limit_hit; 12951 break; 12952 12953 default: 12954 panic("request_cleanup: unknown type"); 12955 } 12956 /* 12957 * Hopefully the syncer daemon will catch up and awaken us. 12958 * We wait at most tickdelay before proceeding in any case. 12959 */ 12960 proc_waiting += 1; 12961 if (callout_pending(&softdep_callout) == FALSE) 12962 callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2, 12963 pause_timer, 0); 12964 12965 msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0); 12966 proc_waiting -= 1; 12967 return (1); 12968 } 12969 12970 /* 12971 * Awaken processes pausing in request_cleanup and clear proc_waiting 12972 * to indicate that there is no longer a timer running. 12973 */ 12974 static void 12975 pause_timer(arg) 12976 void *arg; 12977 { 12978 12979 /* 12980 * The callout_ API has acquired mtx and will hold it around this 12981 * function call. 12982 */ 12983 *stat_countp += 1; 12984 wakeup_one(&proc_waiting); 12985 if (proc_waiting > 0) 12986 callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2, 12987 pause_timer, 0); 12988 } 12989 12990 /* 12991 * Flush out a directory with at least one removal dependency in an effort to 12992 * reduce the number of dirrem, freefile, and freeblks dependency structures. 12993 */ 12994 static void 12995 clear_remove(void) 12996 { 12997 struct pagedep_hashhead *pagedephd; 12998 struct pagedep *pagedep; 12999 static int next = 0; 13000 struct mount *mp; 13001 struct vnode *vp; 13002 struct bufobj *bo; 13003 int error, cnt; 13004 ino_t ino; 13005 13006 mtx_assert(&lk, MA_OWNED); 13007 13008 for (cnt = 0; cnt < pagedep_hash; cnt++) { 13009 pagedephd = &pagedep_hashtbl[next++]; 13010 if (next >= pagedep_hash) 13011 next = 0; 13012 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 13013 if (LIST_EMPTY(&pagedep->pd_dirremhd)) 13014 continue; 13015 mp = pagedep->pd_list.wk_mp; 13016 ino = pagedep->pd_ino; 13017 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 13018 continue; 13019 FREE_LOCK(&lk); 13020 13021 /* 13022 * Let unmount clear deps 13023 */ 13024 error = vfs_busy(mp, MBF_NOWAIT); 13025 if (error != 0) 13026 goto finish_write; 13027 error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, 13028 FFSV_FORCEINSMQ); 13029 vfs_unbusy(mp); 13030 if (error != 0) { 13031 softdep_error("clear_remove: vget", error); 13032 goto finish_write; 13033 } 13034 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) 13035 softdep_error("clear_remove: fsync", error); 13036 bo = &vp->v_bufobj; 13037 BO_LOCK(bo); 13038 drain_output(vp); 13039 BO_UNLOCK(bo); 13040 vput(vp); 13041 finish_write: 13042 vn_finished_write(mp); 13043 ACQUIRE_LOCK(&lk); 13044 return; 13045 } 13046 } 13047 } 13048 13049 /* 13050 * Clear out a block of dirty inodes in an effort to reduce 13051 * the number of inodedep dependency structures. 13052 */ 13053 static void 13054 clear_inodedeps(void) 13055 { 13056 struct inodedep_hashhead *inodedephd; 13057 struct inodedep *inodedep; 13058 static int next = 0; 13059 struct mount *mp; 13060 struct vnode *vp; 13061 struct fs *fs; 13062 int error, cnt; 13063 ino_t firstino, lastino, ino; 13064 13065 mtx_assert(&lk, MA_OWNED); 13066 /* 13067 * Pick a random inode dependency to be cleared. 13068 * We will then gather up all the inodes in its block 13069 * that have dependencies and flush them out. 13070 */ 13071 for (cnt = 0; cnt < inodedep_hash; cnt++) { 13072 inodedephd = &inodedep_hashtbl[next++]; 13073 if (next >= inodedep_hash) 13074 next = 0; 13075 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 13076 break; 13077 } 13078 if (inodedep == NULL) 13079 return; 13080 fs = inodedep->id_fs; 13081 mp = inodedep->id_list.wk_mp; 13082 /* 13083 * Find the last inode in the block with dependencies. 13084 */ 13085 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 13086 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 13087 if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0) 13088 break; 13089 /* 13090 * Asynchronously push all but the last inode with dependencies. 13091 * Synchronously push the last inode with dependencies to ensure 13092 * that the inode block gets written to free up the inodedeps. 13093 */ 13094 for (ino = firstino; ino <= lastino; ino++) { 13095 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) 13096 continue; 13097 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 13098 continue; 13099 FREE_LOCK(&lk); 13100 error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */ 13101 if (error != 0) { 13102 vn_finished_write(mp); 13103 ACQUIRE_LOCK(&lk); 13104 return; 13105 } 13106 if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, 13107 FFSV_FORCEINSMQ)) != 0) { 13108 softdep_error("clear_inodedeps: vget", error); 13109 vfs_unbusy(mp); 13110 vn_finished_write(mp); 13111 ACQUIRE_LOCK(&lk); 13112 return; 13113 } 13114 vfs_unbusy(mp); 13115 if (ino == lastino) { 13116 if ((error = ffs_syncvnode(vp, MNT_WAIT, 0))) 13117 softdep_error("clear_inodedeps: fsync1", error); 13118 } else { 13119 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) 13120 softdep_error("clear_inodedeps: fsync2", error); 13121 BO_LOCK(&vp->v_bufobj); 13122 drain_output(vp); 13123 BO_UNLOCK(&vp->v_bufobj); 13124 } 13125 vput(vp); 13126 vn_finished_write(mp); 13127 ACQUIRE_LOCK(&lk); 13128 } 13129 } 13130 13131 void 13132 softdep_buf_append(bp, wkhd) 13133 struct buf *bp; 13134 struct workhead *wkhd; 13135 { 13136 struct worklist *wk; 13137 13138 ACQUIRE_LOCK(&lk); 13139 while ((wk = LIST_FIRST(wkhd)) != NULL) { 13140 WORKLIST_REMOVE(wk); 13141 WORKLIST_INSERT(&bp->b_dep, wk); 13142 } 13143 FREE_LOCK(&lk); 13144 13145 } 13146 13147 void 13148 softdep_inode_append(ip, cred, wkhd) 13149 struct inode *ip; 13150 struct ucred *cred; 13151 struct workhead *wkhd; 13152 { 13153 struct buf *bp; 13154 struct fs *fs; 13155 int error; 13156 13157 fs = ip->i_fs; 13158 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 13159 (int)fs->fs_bsize, cred, &bp); 13160 if (error) { 13161 softdep_freework(wkhd); 13162 return; 13163 } 13164 softdep_buf_append(bp, wkhd); 13165 bqrelse(bp); 13166 } 13167 13168 void 13169 softdep_freework(wkhd) 13170 struct workhead *wkhd; 13171 { 13172 13173 ACQUIRE_LOCK(&lk); 13174 handle_jwork(wkhd); 13175 FREE_LOCK(&lk); 13176 } 13177 13178 /* 13179 * Function to determine if the buffer has outstanding dependencies 13180 * that will cause a roll-back if the buffer is written. If wantcount 13181 * is set, return number of dependencies, otherwise just yes or no. 13182 */ 13183 static int 13184 softdep_count_dependencies(bp, wantcount) 13185 struct buf *bp; 13186 int wantcount; 13187 { 13188 struct worklist *wk; 13189 struct bmsafemap *bmsafemap; 13190 struct freework *freework; 13191 struct inodedep *inodedep; 13192 struct indirdep *indirdep; 13193 struct freeblks *freeblks; 13194 struct allocindir *aip; 13195 struct pagedep *pagedep; 13196 struct dirrem *dirrem; 13197 struct newblk *newblk; 13198 struct mkdir *mkdir; 13199 struct diradd *dap; 13200 int i, retval; 13201 13202 retval = 0; 13203 ACQUIRE_LOCK(&lk); 13204 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 13205 switch (wk->wk_type) { 13206 13207 case D_INODEDEP: 13208 inodedep = WK_INODEDEP(wk); 13209 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 13210 /* bitmap allocation dependency */ 13211 retval += 1; 13212 if (!wantcount) 13213 goto out; 13214 } 13215 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 13216 /* direct block pointer dependency */ 13217 retval += 1; 13218 if (!wantcount) 13219 goto out; 13220 } 13221 if (TAILQ_FIRST(&inodedep->id_extupdt)) { 13222 /* direct block pointer dependency */ 13223 retval += 1; 13224 if (!wantcount) 13225 goto out; 13226 } 13227 if (TAILQ_FIRST(&inodedep->id_inoreflst)) { 13228 /* Add reference dependency. */ 13229 retval += 1; 13230 if (!wantcount) 13231 goto out; 13232 } 13233 continue; 13234 13235 case D_INDIRDEP: 13236 indirdep = WK_INDIRDEP(wk); 13237 13238 TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) { 13239 /* indirect truncation dependency */ 13240 retval += 1; 13241 if (!wantcount) 13242 goto out; 13243 } 13244 13245 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 13246 /* indirect block pointer dependency */ 13247 retval += 1; 13248 if (!wantcount) 13249 goto out; 13250 } 13251 continue; 13252 13253 case D_PAGEDEP: 13254 pagedep = WK_PAGEDEP(wk); 13255 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 13256 if (LIST_FIRST(&dirrem->dm_jremrefhd)) { 13257 /* Journal remove ref dependency. */ 13258 retval += 1; 13259 if (!wantcount) 13260 goto out; 13261 } 13262 } 13263 for (i = 0; i < DAHASHSZ; i++) { 13264 13265 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 13266 /* directory entry dependency */ 13267 retval += 1; 13268 if (!wantcount) 13269 goto out; 13270 } 13271 } 13272 continue; 13273 13274 case D_BMSAFEMAP: 13275 bmsafemap = WK_BMSAFEMAP(wk); 13276 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) { 13277 /* Add reference dependency. */ 13278 retval += 1; 13279 if (!wantcount) 13280 goto out; 13281 } 13282 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) { 13283 /* Allocate block dependency. */ 13284 retval += 1; 13285 if (!wantcount) 13286 goto out; 13287 } 13288 continue; 13289 13290 case D_FREEBLKS: 13291 freeblks = WK_FREEBLKS(wk); 13292 if (LIST_FIRST(&freeblks->fb_jblkdephd)) { 13293 /* Freeblk journal dependency. */ 13294 retval += 1; 13295 if (!wantcount) 13296 goto out; 13297 } 13298 continue; 13299 13300 case D_ALLOCDIRECT: 13301 case D_ALLOCINDIR: 13302 newblk = WK_NEWBLK(wk); 13303 if (newblk->nb_jnewblk) { 13304 /* Journal allocate dependency. */ 13305 retval += 1; 13306 if (!wantcount) 13307 goto out; 13308 } 13309 continue; 13310 13311 case D_MKDIR: 13312 mkdir = WK_MKDIR(wk); 13313 if (mkdir->md_jaddref) { 13314 /* Journal reference dependency. */ 13315 retval += 1; 13316 if (!wantcount) 13317 goto out; 13318 } 13319 continue; 13320 13321 case D_FREEWORK: 13322 case D_FREEDEP: 13323 case D_JSEGDEP: 13324 case D_JSEG: 13325 case D_SBDEP: 13326 /* never a dependency on these blocks */ 13327 continue; 13328 13329 default: 13330 panic("softdep_count_dependencies: Unexpected type %s", 13331 TYPENAME(wk->wk_type)); 13332 /* NOTREACHED */ 13333 } 13334 } 13335 out: 13336 FREE_LOCK(&lk); 13337 return retval; 13338 } 13339 13340 /* 13341 * Acquire exclusive access to a buffer. 13342 * Must be called with a locked mtx parameter. 13343 * Return acquired buffer or NULL on failure. 13344 */ 13345 static struct buf * 13346 getdirtybuf(bp, mtx, waitfor) 13347 struct buf *bp; 13348 struct mtx *mtx; 13349 int waitfor; 13350 { 13351 int error; 13352 13353 mtx_assert(mtx, MA_OWNED); 13354 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) { 13355 if (waitfor != MNT_WAIT) 13356 return (NULL); 13357 error = BUF_LOCK(bp, 13358 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, mtx); 13359 /* 13360 * Even if we sucessfully acquire bp here, we have dropped 13361 * mtx, which may violates our guarantee. 13362 */ 13363 if (error == 0) 13364 BUF_UNLOCK(bp); 13365 else if (error != ENOLCK) 13366 panic("getdirtybuf: inconsistent lock: %d", error); 13367 mtx_lock(mtx); 13368 return (NULL); 13369 } 13370 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { 13371 if (mtx == &lk && waitfor == MNT_WAIT) { 13372 mtx_unlock(mtx); 13373 BO_LOCK(bp->b_bufobj); 13374 BUF_UNLOCK(bp); 13375 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { 13376 bp->b_vflags |= BV_BKGRDWAIT; 13377 msleep(&bp->b_xflags, BO_MTX(bp->b_bufobj), 13378 PRIBIO | PDROP, "getbuf", 0); 13379 } else 13380 BO_UNLOCK(bp->b_bufobj); 13381 mtx_lock(mtx); 13382 return (NULL); 13383 } 13384 BUF_UNLOCK(bp); 13385 if (waitfor != MNT_WAIT) 13386 return (NULL); 13387 /* 13388 * The mtx argument must be bp->b_vp's mutex in 13389 * this case. 13390 */ 13391 #ifdef DEBUG_VFS_LOCKS 13392 if (bp->b_vp->v_type != VCHR) 13393 ASSERT_BO_LOCKED(bp->b_bufobj); 13394 #endif 13395 bp->b_vflags |= BV_BKGRDWAIT; 13396 msleep(&bp->b_xflags, mtx, PRIBIO, "getbuf", 0); 13397 return (NULL); 13398 } 13399 if ((bp->b_flags & B_DELWRI) == 0) { 13400 BUF_UNLOCK(bp); 13401 return (NULL); 13402 } 13403 bremfree(bp); 13404 return (bp); 13405 } 13406 13407 13408 /* 13409 * Check if it is safe to suspend the file system now. On entry, 13410 * the vnode interlock for devvp should be held. Return 0 with 13411 * the mount interlock held if the file system can be suspended now, 13412 * otherwise return EAGAIN with the mount interlock held. 13413 */ 13414 int 13415 softdep_check_suspend(struct mount *mp, 13416 struct vnode *devvp, 13417 int softdep_deps, 13418 int softdep_accdeps, 13419 int secondary_writes, 13420 int secondary_accwrites) 13421 { 13422 struct bufobj *bo; 13423 struct ufsmount *ump; 13424 int error; 13425 13426 ump = VFSTOUFS(mp); 13427 bo = &devvp->v_bufobj; 13428 ASSERT_BO_LOCKED(bo); 13429 13430 for (;;) { 13431 if (!TRY_ACQUIRE_LOCK(&lk)) { 13432 BO_UNLOCK(bo); 13433 ACQUIRE_LOCK(&lk); 13434 FREE_LOCK(&lk); 13435 BO_LOCK(bo); 13436 continue; 13437 } 13438 MNT_ILOCK(mp); 13439 if (mp->mnt_secondary_writes != 0) { 13440 FREE_LOCK(&lk); 13441 BO_UNLOCK(bo); 13442 msleep(&mp->mnt_secondary_writes, 13443 MNT_MTX(mp), 13444 (PUSER - 1) | PDROP, "secwr", 0); 13445 BO_LOCK(bo); 13446 continue; 13447 } 13448 break; 13449 } 13450 13451 /* 13452 * Reasons for needing more work before suspend: 13453 * - Dirty buffers on devvp. 13454 * - Softdep activity occurred after start of vnode sync loop 13455 * - Secondary writes occurred after start of vnode sync loop 13456 */ 13457 error = 0; 13458 if (bo->bo_numoutput > 0 || 13459 bo->bo_dirty.bv_cnt > 0 || 13460 softdep_deps != 0 || 13461 ump->softdep_deps != 0 || 13462 softdep_accdeps != ump->softdep_accdeps || 13463 secondary_writes != 0 || 13464 mp->mnt_secondary_writes != 0 || 13465 secondary_accwrites != mp->mnt_secondary_accwrites) 13466 error = EAGAIN; 13467 FREE_LOCK(&lk); 13468 BO_UNLOCK(bo); 13469 return (error); 13470 } 13471 13472 13473 /* 13474 * Get the number of dependency structures for the file system, both 13475 * the current number and the total number allocated. These will 13476 * later be used to detect that softdep processing has occurred. 13477 */ 13478 void 13479 softdep_get_depcounts(struct mount *mp, 13480 int *softdep_depsp, 13481 int *softdep_accdepsp) 13482 { 13483 struct ufsmount *ump; 13484 13485 ump = VFSTOUFS(mp); 13486 ACQUIRE_LOCK(&lk); 13487 *softdep_depsp = ump->softdep_deps; 13488 *softdep_accdepsp = ump->softdep_accdeps; 13489 FREE_LOCK(&lk); 13490 } 13491 13492 /* 13493 * Wait for pending output on a vnode to complete. 13494 * Must be called with vnode lock and interlock locked. 13495 * 13496 * XXX: Should just be a call to bufobj_wwait(). 13497 */ 13498 static void 13499 drain_output(vp) 13500 struct vnode *vp; 13501 { 13502 struct bufobj *bo; 13503 13504 bo = &vp->v_bufobj; 13505 ASSERT_VOP_LOCKED(vp, "drain_output"); 13506 ASSERT_BO_LOCKED(bo); 13507 13508 while (bo->bo_numoutput) { 13509 bo->bo_flag |= BO_WWAIT; 13510 msleep((caddr_t)&bo->bo_numoutput, 13511 BO_MTX(bo), PRIBIO + 1, "drainvp", 0); 13512 } 13513 } 13514 13515 /* 13516 * Called whenever a buffer that is being invalidated or reallocated 13517 * contains dependencies. This should only happen if an I/O error has 13518 * occurred. The routine is called with the buffer locked. 13519 */ 13520 static void 13521 softdep_deallocate_dependencies(bp) 13522 struct buf *bp; 13523 { 13524 13525 if ((bp->b_ioflags & BIO_ERROR) == 0) 13526 panic("softdep_deallocate_dependencies: dangling deps"); 13527 if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL) 13528 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); 13529 else 13530 printf("softdep_deallocate_dependencies: " 13531 "got error %d while accessing filesystem\n", bp->b_error); 13532 if (bp->b_error != ENXIO) 13533 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 13534 } 13535 13536 /* 13537 * Function to handle asynchronous write errors in the filesystem. 13538 */ 13539 static void 13540 softdep_error(func, error) 13541 char *func; 13542 int error; 13543 { 13544 13545 /* XXX should do something better! */ 13546 printf("%s: got error %d while accessing filesystem\n", func, error); 13547 } 13548 13549 #ifdef DDB 13550 13551 static void 13552 inodedep_print(struct inodedep *inodedep, int verbose) 13553 { 13554 db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d" 13555 " saveino %p\n", 13556 inodedep, inodedep->id_fs, inodedep->id_state, 13557 (intmax_t)inodedep->id_ino, 13558 (intmax_t)fsbtodb(inodedep->id_fs, 13559 ino_to_fsba(inodedep->id_fs, inodedep->id_ino)), 13560 inodedep->id_nlinkdelta, inodedep->id_savednlink, 13561 inodedep->id_savedino1); 13562 13563 if (verbose == 0) 13564 return; 13565 13566 db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, " 13567 "mkdiradd %p\n", 13568 LIST_FIRST(&inodedep->id_pendinghd), 13569 LIST_FIRST(&inodedep->id_bufwait), 13570 LIST_FIRST(&inodedep->id_inowait), 13571 TAILQ_FIRST(&inodedep->id_inoreflst), 13572 inodedep->id_mkdiradd); 13573 db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n", 13574 TAILQ_FIRST(&inodedep->id_inoupdt), 13575 TAILQ_FIRST(&inodedep->id_newinoupdt), 13576 TAILQ_FIRST(&inodedep->id_extupdt), 13577 TAILQ_FIRST(&inodedep->id_newextupdt)); 13578 } 13579 13580 DB_SHOW_COMMAND(inodedep, db_show_inodedep) 13581 { 13582 13583 if (have_addr == 0) { 13584 db_printf("Address required\n"); 13585 return; 13586 } 13587 inodedep_print((struct inodedep*)addr, 1); 13588 } 13589 13590 DB_SHOW_COMMAND(inodedeps, db_show_inodedeps) 13591 { 13592 struct inodedep_hashhead *inodedephd; 13593 struct inodedep *inodedep; 13594 struct fs *fs; 13595 int cnt; 13596 13597 fs = have_addr ? (struct fs *)addr : NULL; 13598 for (cnt = 0; cnt < inodedep_hash; cnt++) { 13599 inodedephd = &inodedep_hashtbl[cnt]; 13600 LIST_FOREACH(inodedep, inodedephd, id_hash) { 13601 if (fs != NULL && fs != inodedep->id_fs) 13602 continue; 13603 inodedep_print(inodedep, 0); 13604 } 13605 } 13606 } 13607 13608 DB_SHOW_COMMAND(worklist, db_show_worklist) 13609 { 13610 struct worklist *wk; 13611 13612 if (have_addr == 0) { 13613 db_printf("Address required\n"); 13614 return; 13615 } 13616 wk = (struct worklist *)addr; 13617 printf("worklist: %p type %s state 0x%X\n", 13618 wk, TYPENAME(wk->wk_type), wk->wk_state); 13619 } 13620 13621 DB_SHOW_COMMAND(workhead, db_show_workhead) 13622 { 13623 struct workhead *wkhd; 13624 struct worklist *wk; 13625 int i; 13626 13627 if (have_addr == 0) { 13628 db_printf("Address required\n"); 13629 return; 13630 } 13631 wkhd = (struct workhead *)addr; 13632 wk = LIST_FIRST(wkhd); 13633 for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list)) 13634 db_printf("worklist: %p type %s state 0x%X", 13635 wk, TYPENAME(wk->wk_type), wk->wk_state); 13636 if (i == 100) 13637 db_printf("workhead overflow"); 13638 printf("\n"); 13639 } 13640 13641 13642 DB_SHOW_COMMAND(mkdirs, db_show_mkdirs) 13643 { 13644 struct jaddref *jaddref; 13645 struct diradd *diradd; 13646 struct mkdir *mkdir; 13647 13648 LIST_FOREACH(mkdir, &mkdirlisthd, md_mkdirs) { 13649 diradd = mkdir->md_diradd; 13650 db_printf("mkdir: %p state 0x%X dap %p state 0x%X", 13651 mkdir, mkdir->md_state, diradd, diradd->da_state); 13652 if ((jaddref = mkdir->md_jaddref) != NULL) 13653 db_printf(" jaddref %p jaddref state 0x%X", 13654 jaddref, jaddref->ja_state); 13655 db_printf("\n"); 13656 } 13657 } 13658 13659 #endif /* DDB */ 13660 13661 #endif /* SOFTUPDATES */ 13662